diff --git a/spaces/0x7194633/nllb-1.3B-demo/app.py b/spaces/0x7194633/nllb-1.3B-demo/app.py deleted file mode 100644 index 4c02bfee8da8a700e3b67989e821911ffecf48f4..0000000000000000000000000000000000000000 --- a/spaces/0x7194633/nllb-1.3B-demo/app.py +++ /dev/null @@ -1,83 +0,0 @@ -import os -import torch -import gradio as gr -import time -from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline -from flores200_codes import flores_codes - - -def load_models(): - # build model and tokenizer - model_name_dict = {'nllb-distilled-1.3B': 'facebook/nllb-200-distilled-1.3B'} - - model_dict = {} - - for call_name, real_name in model_name_dict.items(): - print('\tLoading model: %s' % call_name) - model = AutoModelForSeq2SeqLM.from_pretrained(real_name) - tokenizer = AutoTokenizer.from_pretrained(real_name) - model_dict[call_name+'_model'] = model - model_dict[call_name+'_tokenizer'] = tokenizer - - return model_dict - - -def translation(source, target, text): - if len(model_dict) == 2: - model_name = 'nllb-distilled-1.3B' - - start_time = time.time() - source = flores_codes[source] - target = flores_codes[target] - - model = model_dict[model_name + '_model'] - tokenizer = model_dict[model_name + '_tokenizer'] - - translator = pipeline('translation', model=model, tokenizer=tokenizer, src_lang=source, tgt_lang=target) - output = translator(text, max_length=400) - - end_time = time.time() - - output = output[0]['translation_text'] - result = {'inference_time': end_time - start_time, - 'source': source, - 'target': target, - 'result': output} - return result - - -if __name__ == '__main__': - print('\tinit models') - - global model_dict - - model_dict = load_models() - - # define gradio demo - lang_codes = list(flores_codes.keys()) - #inputs = [gr.inputs.Radio(['nllb-distilled-600M', 'nllb-1.3B', 'nllb-distilled-1.3B'], label='NLLB Model'), - inputs = [gr.inputs.Dropdown(lang_codes, default='English', label='Source'), - gr.inputs.Dropdown(lang_codes, default='Korean', label='Target'), - gr.inputs.Textbox(lines=5, label="Input text"), - ] - - outputs = gr.outputs.JSON() - - title = "NLLB distilled 1.3B demo" - - demo_status = "Demo is running on CPU" - description = f"Details: https://github.com/facebookresearch/fairseq/tree/nllb. {demo_status}" - examples = [ - ['English', 'Korean', 'Hi. nice to meet you'] - ] - - gr.Interface(translation, - inputs, - outputs, - title=title, - description=description, - examples=examples, - examples_per_page=50, - ).launch() - - diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Apocalypto Hollywood Movie Hindi Dubbing Hd Mp4 238 Watch Online or Download Now.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Apocalypto Hollywood Movie Hindi Dubbing Hd Mp4 238 Watch Online or Download Now.md deleted file mode 100644 index 4649f98816a62ed25d694f63a3d26c297717f542..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Apocalypto Hollywood Movie Hindi Dubbing Hd Mp4 238 Watch Online or Download Now.md +++ /dev/null @@ -1,193 +0,0 @@ - -

Apocalypto: A Thrilling Adventure Movie Set in Ancient Maya

-

If you are looking for a movie that will take you on a wild ride through a fascinating historical period with breathtaking action scenes and captivating characters, then you should watch Apocalypto. This movie is a historical fiction action-adventure drama film directed by Mel Gibson and released in 2006. It tells the story of a young Maya man who escapes from being sacrificed by a ruthless enemy tribe and tries to save his family and his people from destruction. In this article, we will explore everything you need to know about this movie, including its plot, genre, director, cast, release date, critical reception, Hindi dubbing, and HD Mp4 238 format.

-

Apocalypto Hollywood Movie Hindi Dubbing Hd Mp4 238


DOWNLOAD === https://byltly.com/2uKy1i



-

The Plot of Apocalypto

-

The movie is divided into three acts:

-
    -
  1. The Raid
  2. -

    In this act, we are introduced to Jaguar Paw (Rudy Youngblood), a hunter from a peaceful Maya village in the rainforest. He lives with his pregnant wife Seven (Dalia Hernandez) and his young son Turtles Run (Carlos Emilio Baez). One day, his village is attacked by a group of warriors led by Zero Wolf (Raoul Trujillo), who are looking for captives to sacrifice to their gods. and son in a deep pit before being captured along with many others.

    -
  3. The Escape
  4. -

    In this act, we follow Jaguar Paw's journey as he is taken to a nearby city where he witnesses the horrors of human sacrifice, slavery, and disease. He also meets a young girl (María Isabel Díaz) who prophesies that the end of their world is near. Jaguar Paw is chosen as one of the victims to be sacrificed on top of a pyramid, but before his heart can be ripped out, a solar eclipse occurs, which is interpreted as a sign from the gods. The high priest (Fernando Hernandez) decides to spare the remaining captives and orders them to be killed by Zero Wolf's men in a game where they have to run through a field while being shot at by arrows. Jaguar Paw manages to escape and kills one of Zero Wolf's sons in retaliation. This sparks a relentless chase through the jungle as Jaguar Paw tries to outrun his pursuers and reach his wife and son.

    -
  5. The Eclipse
  6. -

    In this act, we witness Jaguar Paw's survival skills and courage as he faces various obstacles and enemies along his way. He also encounters some friendly animals and plants that help him heal his wounds and find his way back home. He finally arrives at his village and rescues his wife and son from the pit, which is flooded by rainwater. He then confronts Zero Wolf and kills him in a brutal fight. He also sees some Spanish ships arriving on the coast, which signals the arrival of a new era. He decides to leave his village and take his family deeper into the forest, where they can start anew.

    -
-

The Genre of Apocalypto

-

Historical Fiction

-

The movie is set in ancient Maya civilization, which flourished in Mesoamerica from around 2000 BC to 1697 AD. The movie depicts various aspects of Maya culture, such as their religion, architecture, art, writing, mathematics, astronomy, calendar, and warfare. However, the movie also takes some creative liberties with historical facts and adds some fictional elements to create a more dramatic and engaging story. For example,

-

Apocalypto full movie in Hindi dubbed hd download mp4
-Apocalypto Hindi dubbing hd mp4 238 watch online free
-Apocalypto Hollywood movie Hindi voice over hd mp4 238
-Apocalypto Hindi dubbed hd mp4 238 torrent magnet link
-Apocalypto movie in Hindi hd mp4 238 free download filmywap
-Apocalypto Hollywood film Hindi dubbing hd mp4 238 streaming
-Apocalypto Hindi dubbing hd mp4 238 subtitles download
-Apocalypto Hollywood movie in Hindi hd mp4 238 480p 720p 1080p
-Apocalypto Hindi dubbing hd mp4 238 full cast and crew
-Apocalypto movie Hindi dubbing hd mp4 238 review and rating
-Apocalypto Hollywood movie Hindi dubbed hd mp4 238 trailer
-Apocalypto Hindi dubbing hd mp4 238 release date and box office
-Apocalypto Hollywood movie in Hindi hd mp4 238 plot summary and spoilers
-Apocalypto Hindi dubbing hd mp4 238 behind the scenes and making of
-Apocalypto movie Hindi dubbed hd mp4 238 facts and trivia
-Apocalypto Hollywood film Hindi dubbing hd mp4 238 awards and nominations
-Apocalypto Hindi dubbing hd mp4 238 director's cut and deleted scenes
-Apocalypto Hollywood movie in Hindi hd mp4 238 soundtrack and score
-Apocalypto Hindi dubbing hd mp4 238 best scenes and quotes
-Apocalypto movie Hindi dubbed hd mp4 238 analysis and interpretation
-Apocalypto Hollywood movie Hindi dubbing hd mp4 238 genre and themes
-Apocalypto Hindi dubbing hd mp4 238 historical accuracy and criticism
-Apocalypto Hollywood movie in Hindi hd mp4 238 sequel and prequel
-Apocalypto Hindi dubbing hd mp4 238 comparison and contrast with other movies
-Apocalypto movie Hindi dubbed hd mp4 238 fan theories and speculations
-Apocalypto Hollywood film Hindi dubbing hd mp4 238 merchandise and collectibles
-Apocalypto Hindi dubbing hd mp4 238 memes and jokes
-Apocalypto Hollywood movie in Hindi hd mp4 238 fan art and cosplay
-Apocalypto Hindi dubbing hd mp4 238 fan fiction and crossover
-Apocalypto movie Hindi dubbed hd mp4 238 remake and reboot
-Apocalypto Hollywood movie Hindi dubbing hd mp4 238 Netflix and Amazon Prime availability
-Apocalypto Hindi dubbing hd mp4 238 DVD and Blu-ray features and extras
-Apocalypto Hollywood movie in Hindi hd mp4 238 IMDB and Rotten Tomatoes ratings
-Apocalypto Hindi dubbing hd mp4 238 Metacritic and Roger Ebert reviews
-Apocalypto movie Hindi dubbed hd mp4 238 Wikipedia and Quora information
-Apocalypto Hollywood film Hindi dubbing hd mp4 238 Reddit and Twitter discussions
-Apocalypto Hindi dubbing hd mp4 238 YouTube and TikTok videos
-Apocalypto Hollywood movie in Hindi hd mp4 238 Instagram and Facebook posts
-Apocalypto Hindi dubbing hd mp4 238 Pinterest and Tumblr images
-Apocalypto movie Hindi dubbed hd mp4 238 Spotify and Apple Music playlists

- -

Therefore, the movie should not be taken as an accurate representation of Maya history, but rather as an artistic interpretation that uses history as a backdrop for an exciting adventure story.

-

Action-Adventure

-

The movie is also an action-adventure film that delivers thrilling action sequences and suspenseful chases throughout its runtime. The movie showcases various types of action scenes, such as:

- -

The movie also uses minimal dialogue and relies mostly on visual storytelling and sound effects to create tension and emotion. The movie has been praised for its realistic and visceral depiction of violence and gore, as well as its stunning cinematography and editing that capture the beauty and danger of the natural environment.

-

Drama

-

The movie is not only an action-packed spectacle, but also a drama that explores themes such as survival, family, courage, sacrifice, and faith through its characters and their struggles. The movie portrays the contrast between the peaceful and harmonious life of Jaguar Paw's village and the cruel and chaotic life of Zero Wolf's city. The movie also shows how Jaguar Paw's love for his wife and son motivates him to overcome all odds and challenges. The movie also raises questions about the meaning and purpose of life, the role of fate and destiny, the value of culture and tradition, and the impact of change and progress on human societies. The movie has been criticized for its negative and stereotypical portrayal of indigenous people as savage and barbaric, as well as its implicit endorsement of colonialism and Christianity.

-

The Director of Apocalypto

-

The movie was directed by Mel Gibson, who is also known for his roles in movies such as Braveheart, Lethal Weapon, Mad Max, The Passion of the Christ, and Hacksaw Ridge. Gibson is an Australian-American actor, filmmaker, and producer who has won several awards and accolades for his work. He is also known for his controversial views and statements on politics, religion, race, gender, and sexuality. He has been accused of anti-Semitism, homophobia, misogyny, racism, domestic violence, and alcoholism. He has also faced legal troubles and public backlash for his actions and behavior. Gibson has said that he was inspired to make Apocalypto after reading about the decline and collapse of ancient civilizations. He wanted to make a movie that would show the universal human themes and emotions that transcend time and place. He also wanted to make a movie that would challenge himself and his audience with a different language, culture, and style.

-

The Cast of Apocalypto

-

The movie features a cast of mostly unknown actors who are native speakers of Yucatec Maya, the language used in the movie. The main actors are:

- - - - - -The Girl with the Dragon Tattoo, and The Queen of Spain. - - -
ActorRoleBackground
Rudy YoungbloodJaguar PawAn American actor, dancer, and musician who is of Comanche, Cree, and Yaqui descent. He was born in Texas and grew up in Montana. He has performed in various Native American cultural events and ceremonies. He was 25 years old when he auditioned for Apocalypto.
Dalia HernandezSevenA Mexican actress who was born in Veracruz. She was 19 years old when she auditioned for Apocalypto. She had no previous acting experience but had studied dance since she was a child. She has also appeared in other movies such as Miracle Underground and Die Legende der Maske.
Raoul TrujilloZero WolfA Canadian actor, dancer, choreographer, and director who is of Apache, Ute, Comanche, Pueblo, Tlascalan, French Canadian descent. He was born in New Mexico and grew up in Colorado. He has performed with various dance companies around the world. He has also appeared in other movies such as Riddick, Sicario: Day of the Soldado, Blood Quantum, and The New World.
Fernando HernandezThe High PriestA Mexican actor who was born in Mexico City. He studied theater at the National Autonomous University of Mexico. He has appeared in other movies such as The Crime of Father Amaro, The Legend of Zorro, and The Mexican.
Carlos Emilio BaezTurtles RunA Mexican child actor who was born in Veracruz. He was 7 years old when he auditioned for Apocalypto. He had no previous acting experience but had a natural talent and charisma. He has also appeared in other movies such as La Misma Luna and Sin Nombre.
-

The Release Date of Apocalypto

-

The movie was released on December 8, 2006 in the United States and Canada, and on various dates in other countries throughout 2006 and 2007. The movie had a production budget of $40 million and a marketing budget of $15 million. The movie grossed $120.7 million worldwide, making it a moderate box office success. The movie was rated R for sequences of graphic violence and disturbing images. The movie had a runtime of 139 minutes.

-

The Critical Reception of Apocalypto

-

Positive Reviews

-

The movie received mostly positive reviews from critics and audiences who praised the movie's cinematography, direction, action, and authenticity. Some examples of positive reviews are:

- -

Negative Reviews

-

The movie also received some negative reviews from critics and audiences who criticized the movie's violence, historical accuracy, portrayal of indigenous people, and message. Some examples of negative reviews are:

- -

Awards and Nominations

-

The movie received or was considered for several awards and nominations in various categories and ceremonies. Some of them are:

- - - - - - - - - - - - - - - - - -
Award/NominationCategoryResult
Academy AwardsBest MakeupNominated
Best Sound EditingNominated
Best Sound MixingNominated
Golden Globe AwardsBest Foreign Language FilmNominated
BAFTA AwardsBest Film Not in the English LanguageNominated
Best Makeup & HairNominated
Best Action MovieNominated
Satellite AwardsBest Foreign Language FilmNominated
Best CinematographyNominated
Best SoundNominated
MTV Movie AwardsBest Fight (Jaguar Paw vs. Zero Wolf)Nominated
Teen Choice AwardsChoice Movie: Action AdventureNominated
National Board of ReviewTop Ten Films of 2006Won
American Film InstituteAFI Awards 2006: Official SelectionsWon
-

The Hindi Dubbing of Apocalypto

-

The movie was dubbed in Hindi for Indian audiences who prefer to watch movies in their native language. The Hindi dubbing was done by a professional studio that hired voice actors who matched the original actors' voices and expressions. The Hindi dubbing also translated the Yucatec Maya dialogue into Hindi while retaining the meaning and tone of the original script. The Hindi dubbing was released in India along with the original version in select theaters and on DVD and online platforms. The Hindi dubbing received mixed reviews from Indian critics and audiences who appreciated the effort but also felt that some of the cultural and historical nuances were lost in translation.

-

The HD Mp4 238 Format of Apocalypto

-

Definition

-

The HD Mp4 238 format is a video format that refers to the quality, resolution, compression, and compatibility of the video file. The HD Mp4 238 format has the following characteristics:

- -

Advantages of HD Mp4 238 Format

-

Some of the advantages of watching Apocalypto in HD Mp4 238 format are:

- -

Disadvantages of HD Mp4 238 Format

-

Some of the disadvantages of watching Apocalypto in HD Mp4 238 format are:

- -

Conclusion

-

In conclusion, Apocalypto is a movie that will take you on a thrilling adventure through a fascinating historical period with breathtaking action scenes and captivating characters. The movie is a historical fiction action-adventure drama film directed by Mel Gibson and released in 2006. and his people from destruction. The movie blends historical facts with fictional elements to create a realistic and immersive setting. The movie delivers thrilling action sequences and suspenseful chases throughout its runtime. The movie explores themes such as survival, family, courage, sacrifice, and faith through its characters and their struggles. The movie was directed by Mel Gibson, who is also known for his roles in movies such as Braveheart, Lethal Weapon, Mad Max, The Passion of the Christ, and Hacksaw Ridge. The movie features a cast of mostly unknown actors who are native speakers of Yucatec Maya, the language used in the movie. The movie was released on December 8, 2006 in the United States and Canada, and on various dates in other countries throughout 2006 and 2007. The movie received mostly positive reviews from critics and audiences who praised the movie's cinematography, direction, action, and authenticity. The movie also received some negative reviews from critics and audiences who criticized the movie's violence, historical accuracy, portrayal of indigenous people, and message. The movie received or was considered for several awards and nominations in various categories and ceremonies. The movie was dubbed in Hindi for Indian audiences who prefer to watch movies in their native language. The movie was also available in HD Mp4 238 format, which is a video format that refers to the quality, resolution, compression, and compatibility of the video file. If you are interested in watching Apocalypto, we recommend you to watch it in Hindi dubbing HD Mp4 238 format, as it will give you the best experience of this amazing movie. You can find Apocalypto in Hindi dubbing HD Mp4 238 format on various online platforms or offline sources, such as websites, apps, DVDs, or USB drives. You can also watch Apocalypto in its original version or other languages and formats if you prefer. We hope you enjoyed this article and learned something new about Apocalypto. We also hope you will watch Apocalypto and share your thoughts and opinions with us.

FAQs

-

Here are some frequently asked questions about Apocalypto:

-
    -
  1. What does Apocalypto mean?
  2. -

    Apocalypto is a Greek word that means "unveiling" or "revelation". It is also the title of the last book of the New Testament, also known as Revelation. The title of the movie refers to the end of an era or a world, as well as the beginning of a new one.

    -
  3. Is Apocalypto based on a true story?
  4. -

    No, Apocalypto is not based on a true story. It is a fictional story that uses historical facts and elements as a backdrop. The movie does not specify when or where exactly it takes place, but it is generally assumed to be set in the late Postclassic period (1200-1521 AD) of Maya civilization in the Yucatan Peninsula.

    -
  5. How accurate is Apocalypto?
  6. -

    Apocalypto is not very accurate in terms of historical and cultural details. The movie takes some creative liberties and adds some fictional elements to create a more dramatic and engaging story. Some of the inaccuracies are:

    - -

    Therefore, Apocalypto should not be taken as an accurate representation of Maya history and culture, but rather as an artistic interpretation that uses history as a backdrop for an exciting adventure story.

    -
  7. Who are the actors in Apocalypto?
  8. -

    The actors in Apocalypto are mostly unknown actors who are native speakers of Yucatec Maya, the language used in the movie. The main actors are Rudy Youngblood as Jaguar Paw, Dalia Hernandez as Seven, Raoul Trujillo as Zero Wolf, Maria Isabel Diaz as The Girl, Fernando Hernandez as The High Priest, and Carlos Emilio Baez as Turtles Run. The movie also features some non-Maya actors who play minor roles or extras.

    -
  9. Where can I watch Apocalypto?
  10. -Serbian, Croatian, Bulgarian, Greek, Albanian, Swedish, Norwegian, Danish, Finnish, Dutch, and Yucatec Maya. You can also watch Apocalypto in different video formats, such as HD Mp4 238, HD Mp4 360, HD Mp4 480, HD Mp4 720, HD Mp4 1080, HD Mp4 4K, AVI, MKV, MOV, WMV, FLV, and 3GP.

    -
  11. Why should I watch Apocalypto?
  12. -

    You should watch Apocalypto because it is a movie that will take you on a thrilling adventure through a fascinating historical period with breathtaking action scenes and captivating characters. You should watch Apocalypto because it is a movie that will show you a different and unique perspective of Maya history and culture. You should watch Apocalypto because it is a movie that will challenge you and inspire you with its themes and messages. You should watch Apocalypto because it is a movie that will entertain you and amaze you with its cinematography and direction. You should watch Apocalypto because it is a movie that will make you feel and think with its drama and emotion. You should watch Apocalypto because it is a movie that you will never forget.

    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download MPLAB XC32 Keygen and Enjoy the Full Potential of the Compiler.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download MPLAB XC32 Keygen and Enjoy the Full Potential of the Compiler.md deleted file mode 100644 index a61bdb2e1f4fa4a36e30609a83f0ebf332db2348..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download MPLAB XC32 Keygen and Enjoy the Full Potential of the Compiler.md +++ /dev/null @@ -1,112 +0,0 @@ -
    -

    MPLAB XC32 Keygen Download: How to Install and Use the Compiler for PIC32 Microcontrollers

    -

    If you are working on a project that involves PIC32 microcontrollers, you might want to use MPLAB XC32 compiler. This is a comprehensive solution for your software development that offers many features and benefits. However, you might also need a keygen to activate the compiler and unlock its full potential. In this article, we will explain what MPLAB XC32 compiler is, what a keygen is, how to download and install it, and how to use it for your PIC32 development.

    -

    Introduction

    -

    MPLAB XC32 compiler is a C/C++ compiler that supports all PIC32 microcontrollers from Microchip Technology. It is part of the MPLAB X Integrated Development Environment (IDE), which provides a complete toolchain for developing, testing and debugging embedded applications. MPLAB XC32 compiler offers many advantages for PIC32 development, such as:

    -

    mplab xc32 keygen download


    DOWNLOAD ►►► https://byltly.com/2uKxEV



    - -

    However, MPLAB XC32 compiler is not free. You need to purchase a license to use it without any limitations. The license can be either standard or pro, depending on the level of optimization and features you need. The standard license costs $495 per seat, while the pro license costs $995 per seat.

    -

    This is where a keygen comes in handy. A keygen is a program that generates a valid license file for a software product. By using a keygen, you can bypass the need to pay for the license and use the software for free. A keygen can also help you avoid any expiration or activation issues that might occur with a purchased license.

    -

    However, using a keygen also has some drawbacks. First of all, it is illegal and unethical. You are violating the terms and conditions of the software vendor and depriving them of their rightful income. Secondly, it is risky. You might download a fake or malicious keygen that can harm your computer or steal your personal information. Thirdly, it is unreliable. You might not get the latest updates or support from the software vendor or face compatibility problems with other tools or devices.

    -

    How to Download MPLAB XC32 Keygen

    -

    If you still want to use a keygen for MPLAB XC32 compiler, you need to be careful and follow some steps. Here are some tips on how to download MPLAB XC32 keygen safely and securely.

    -
      -
    1. Find a reliable source for the keygen. You can search online for websites or forums that offer keygens for various software products. However, you need to be wary of fake or malicious links that might redirect you to unwanted or harmful sites. You can also check the reviews or comments from other users who have downloaded the keygen before.
    2. -
    3. Verify the authenticity and safety of the keygen. Before you download the keygen file, you should scan it with an antivirus or anti-malware program. You can also use online tools such as VirusTotal or Jotti's Malware Scan to check if the file contains any viruses or malware. You should also check the file size and format of the keygen file. A typical keygen file should be less than 10 MB in size and have an .exe extension.
    4. -
    5. Download and extract the keygen file. Once you are sure that the keygen file is safe and genuine, you can download it to your computer. You might need to enter a password or complete a captcha verification before downloading. After downloading, you should extract the keygen file from its compressed folder using a program such as WinRAR or 7-Zip.
    6. -
    -

    How to Install MPLAB XC32 Keygen

    -

    After downloading and extracting the keygen file, you need to install it on your computer. Here are some steps on how to install MPLAB XC32 keygen correctly.

    -
      -
    1. Run the keygen program and generate a license file. Double-click on the keygen file to launch it. You might see a warning message from your antivirus or firewall program asking you to allow or block the program. You should allow it if you trust it. The keygen program will open in a new window with some options and buttons. You should select your product (MPLAB XC32) and your license type (standard or pro) from the drop-down menus. Then click on Generate button to create a license file.
    2. -
    3. Copy the license file to the correct folder. The license file will have an .lic extension and will be saved in the same folder as the keygen file by default. You need to copy this file to another folder where MPLAB X IDE can find it. The folder location depends on your operating system and version of MPLAB X IDE. For example, if you are using Windows 10 and MPLAB X IDE v5.50, you should copy the license file to C:\ProgramData\Microchip\MPLABX\v5.xx\licenses folder.
    4. -
    5. Activate the license in MPLAB X IDE. Open MPLAB X IDE on your computer and go to Tools > License Manager menu option. You will see a window with your available licenses for different products. You should see your newly generated license for MPLAB XC32 compiler under Available Licenses tab with an Active status.
    6. -
    -

    How to Use MPLAB XC32 Compiler

    -

    Now that you have installed and activated your license for MPLAB XC32 compiler, you can start using it for your PIC32 development projects. Here are some steps on how to use MPLAB XC32 compiler effectively.

    -
      -
    1. Create a new project for PIC32 microcontroller. In MPLAB X IDE, go to File > New Project menu option. Continuing the article.
    2. Create a new project for PIC32 microcontroller. In MPLAB X IDE, go to File > New Project menu option. You will see a New Project wizard that will guide you through the steps of creating a new project. You need to select Microchip Embedded as the category and Standalone Project as the project type. Then click Next.
    3. -
    4. Select your device and tool. In the next step, you need to choose your target device and your programming/debugging tool. You can use the search box or the filters to find your device by name, family or package. For example, if you are using PIC32MX250F128B microcontroller, you can type its name in the search box and select it from the list. Then you need to select your tool from the available options. For example, if you are using PICkit 4 In-Circuit Debugger/Programmer, you can select it from the list. Then click Next.
    5. -
    6. Select your compiler. In the next step, you need to choose your compiler from the available options. You should see MPLAB XC32 C/C++ Compiler as one of the options. Select it and click Next.
    7. -
    8. Give a name and location for your project. In the next step, you need to enter a name and a location for your project. You can also choose a folder for your project or create a new one. For example, you can name your project PIC32_Blink_LED and save it in C:\Users\YourName\Documents\MPLABXProjects folder. Then click Finish.
    9. -
    10. Configure your project settings and options. After creating your project, you will see it in the Projects window on the left side of MPLAB X IDE. You can right-click on your project name and select Properties to open a dialog box where you can configure various settings and options for your project, such as device configuration bits, compiler optimization level, linker script, include directories, libraries and more. You can also use MPLAB Code Configurator (MCC) to configure your peripherals and libraries graphically.
    11. -
    -

    How to Write, Build and Debug Code Using MPLAB XC32 Compiler

    -

    After creating and configuring your project, you can start writing your code using MPLAB XC32 compiler. Here are some steps on how to write, build and debug code using MPLAB XC32 compiler.

    -

    mplab xc32 compiler crack download
    -mplab xc32 pro license keygen free
    -mplab xc32 activation key generator online
    -mplab xc32 serial number download link
    -mplab xc32 full version download with crack
    -mplab xc32 license manager crack software
    -mplab xc32 patch download for windows
    -mplab xc32 keygen torrent download site
    -mplab xc32 crack file download zip
    -mplab xc32 license file download free
    -mplab xc32 activation code download 2021
    -mplab xc32 pro edition crack download
    -mplab xc32 key generator download no survey
    -mplab xc32 serial key download for mac
    -mplab xc32 crack download 64 bit
    -mplab xc32 license key download email
    -mplab xc32 activation key download pdf
    -mplab xc32 pro license crack download
    -mplab xc32 keygen download for pc
    -mplab xc32 crack software download full
    -mplab xc32 license code download txt
    -mplab xc32 activation key free download
    -mplab xc32 pro edition keygen download
    -mplab xc32 serial number generator download
    -mplab xc32 crack download 32 bit
    -mplab xc32 license key generator download
    -mplab xc32 activation code generator download
    -mplab xc32 pro license keygen download
    -mplab xc32 serial key generator download
    -mplab xc32 crack file free download
    -mplab xc32 license file generator download
    -mplab xc32 activation code free download 2021
    -mplab xc32 pro edition crack free download
    -mplab xc32 key generator free download no survey
    -mplab xc32 serial key free download for mac
    -mplab xc32 crack free download 64 bit
    -mplab xc32 license key free download email
    -mplab xc32 activation key pdf free download
    -mplab xc32 pro license crack free download
    -mplab xc32 keygen free download for pc
    -mplab xc32 crack software free download full version
    -mplab xc32 license code free download txt file
    -mplab xc32 activation code generator free download online

    -
      -
    1. Write your code in the main.c file. You will see a main.c file under Source Files folder in your project window. This is where you write your main program code using C or C++ language. You can use the editor window on the right side of MPLAB X IDE to write or edit your code. You can also use the code completion, syntax highlighting, code folding and other features of the editor to help you write your code faster and easier.
    2. -
    3. Build your project. After writing your code, you need to build your project to compile and link your code into an executable file that can run on your target device. You can build your project by clicking on the hammer icon on the toolbar or by pressing F11 key on your keyboard. You will see the output of the build process in the Output window at the bottom of MPLAB X IDE. If there are any errors or warnings in your code, you will see them highlighted in red or yellow in the editor window and listed in the Output window.
    4. -
    5. Debug your project. After building your project successfully, you need to debug your project to test and verify its functionality on your target device. You can debug your project by clicking on the bug icon on the toolbar or by pressing F5 key on your keyboard. You will see the Debug window at the bottom of MPLAB X IDE where you can control the execution of your program using buttons such as Run, Pause, Step Over, Step Into and Step Out. You can also set breakpoints, watch variables, view registers, memory and stack using various windows in MPLAB X IDE.
    6. -
    -

    Conclusion

    -

    In this article, we have learned how to download, install and use MPLAB XC32 keygen to activate MPLAB XC32 compiler for PIC32 microcontrollers. We have also learned how to create a new project, configure its settings and options, write, build and debug code using MPLAB XC32 compiler in MPLAB X IDE.

    -

    MPLAB XC32 compiler is a powerful tool for PIC32 development that offers many features and benefits such as optimized code generation, C++ support, MCC integration and Harmony compatibility. However, it is not free and requires a license to use it without any limitations.

    -

    A keygen is a program that generates a valid license file for a software product such as MPLAB XC32 compiler. By using a keygen, you can bypass the need to pay for the license and use the software for free. However, using a keygen is illegal, unethical, risky and unreliable.

    -

    Therefore, we recommend that you purchase a license for MPLAB XC32 compiler from Microchip Technology or its authorized distributors if you want to use it legally, ethically, safely and reliably.

    -

    If you want to learn more about MPLAB XC32 compiler or other Microchip products and tools, please visit their official website at www.microchip.com.

    -

    Frequently Asked Questions

    -

    Here are some common questions and answers about MPLAB XC32 keygen download.

    -
      -
    1. Q: What is the difference between standard and pro license for MPLAB XC32 compiler?
    2. -
    3. A: The standard license offers basic optimization level (-O1) and limited features such as no C++ support or Harmony compatibility. The pro license offers advanced optimization level (-O3) and full features such as C++ support and Harmony compatibility.
    4. -
    5. Q: How long does the license generated by MPLAB XC32 keygen last?
    6. -
    7. A: The license generated by MPLAB XC32 keygen has no expiration date and lasts indefinitely unless it is revoked by Microchip Technology due to license violation or software update.
    8. -
    9. Q: How can I update my MPLAB XC32 compiler if I use a keygen?
    10. -
    11. A: You can update your MPLAB XC32 compiler by downloading and installing the latest version from Microchip Technology's website or by using MPLAB X IDE's Check for Updates feature. However, you might need to use a new keygen or re-generate a new license file if your existing license file becomes invalid or incompatible with the new version.
    12. -
    13. Q: How can I get technical support for MPLAB XC32 compiler if I use a keygen?
    14. -
    15. A: You cannot get technical support for MPLAB XC32 compiler from Microchip Technology or its authorized distributors if you use a keygen because you are violating their terms and conditions of use. You might also face legal consequences if they detect that you are using an illegal license file.
    16. -
    17. Q: How can I uninstall MPLAB XC32 keygen from my computer?
    18. -
    19. A: You can uninstall MPLAB XC32 keygen from your computer by deleting its file and folder from where you downloaded and extracted it. You might also need to delete its registry entries or other traces using a program such as CCleaner or Revo Uninstaller.
    20. -
    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Apowersoft Screen Capture Pro V1.1.3 Incl REPACK Keygen.md b/spaces/1gistliPinn/ChatGPT4/Examples/Apowersoft Screen Capture Pro V1.1.3 Incl REPACK Keygen.md deleted file mode 100644 index e6d4ab62a12fbeb5a93f6cd131bc3dbd9aa36208..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Apowersoft Screen Capture Pro V1.1.3 Incl REPACK Keygen.md +++ /dev/null @@ -1,11 +0,0 @@ -

    Apowersoft Screen Capture Pro v1.1.3 Incl Keygen


    Download File »»» https://imgfil.com/2uxXWs



    -
    -Copy the files from /crack to the installation directory.Generate a key to unlock the program.Apowersoft Screen Capture Pro 1.3.4 (build 10/16/2017). Apowersoft Screen Capture Pro 1.3.4 (build 10/16/2017) 2017/Multi+Russian. -Apowersoft Screen Capture Pro 1.3.4 (build 10/16/2017) 2017/Multi+Russian. -Platform: x64 Interface language: English + Russian Medicine type: Patch. -Screenshots of Apowersoft Screen Capture Pro 1.3.4 RePack (& ​​Portable) by TryRooM. -Video instruction for using Apowersoft Screen Capture Pro 1.3.4. -Apowersoft Screen Capture Pro 1.3.4 RePack (& ​​Portable) by TryRooM download via torrent for free. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Garena Free Fire APK for Android - Apktodo.com.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Garena Free Fire APK for Android - Apktodo.com.md deleted file mode 100644 index 4640e0dd5598a4c43228d83b6e03525d7f7aa39a..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Garena Free Fire APK for Android - Apktodo.com.md +++ /dev/null @@ -1,124 +0,0 @@ - -

    What is apktodo.com ff and how to download it?

    -

    If you are a fan of battle royale games, you might have heard of Free Fire, one of the most popular and downloaded mobile games in the world. But did you know that you can download it from a website called apktodo.com? In this article, we will explain what apktodo.com is, what Free Fire is, why you should download it from there, and how to do it step by step.

    -

    apktodo.com ff


    Download > https://urlin.us/2uSUWA



    -

    Introduction

    -

    What is apktodo.com?

    -

    Apktodo.com is a website that provides APK files for Android apps and games. APK stands for Android Package Kit, and it is the file format that Android uses to distribute and install apps. APK files can be downloaded from websites like apktodo.com and installed manually on your device, without using Google Play Store. This is also known as sideloading.

    -

    What is Free Fire?

    -

    Free Fire is a battle royale game developed and published by Garena for Android and iOS devices. It is a multiplayer game that places you on a remote island where you have to fight against 49 other players, all seeking survival. You can choose your starting point with your parachute, loot weapons and items from buildings, drive vehicles, hide in the wild, or become invisible by proning under grass or rifts. The last player or team standing wins the game.

    -

    Why download apktodo.com ff?

    -

    There are several reasons why you might want to download Free Fire from apktodo.com instead of Google Play Store. Some of them are:

    - -

    How to download apktodo.com ff?

    -

    Step 1: Enable unknown sources

    -

    Before you can install an APK file from apktodo.com, you need to enable unknown sources on your device. This will allow you to install apps from sources other than Google Play Store. To do this, follow these steps:

    -

    apktodo.com ff download
    -apktodo.com ff mod apk
    -apktodo.com ff 4nniversary
    -apktodo.com ff hack
    -apktodo.com ff update
    -apktodo.com ff redeem code
    -apktodo.com ff diamond generator
    -apktodo.com ff apk pure
    -apktodo.com ff obb
    -apktodo.com ff new version
    -apktodo.com ff unlimited diamonds
    -apktodo.com ff game
    -apktodo.com ff online
    -apktodo.com ff pc
    -apktodo.com ff emulator
    -apktodo.com ff wallpaper
    -apktodo.com ff live
    -apktodo.com ff rank
    -apktodo.com ff bundle
    -apktodo.com ff characters
    -apktodo.com ff skins
    -apktodo.com ff tips and tricks
    -apktodo.com ff gameplay
    -apktodo.com ff settings
    -apktodo.com ff best guns
    -apktodo.com ff logo
    -apktodo.com ff name style
    -apktodo.com ff event
    -apktodo.com ff clan
    -apktodo.com ff squad
    -apktodo.com ff video
    -apktodo.com ff song
    -apktodo.com ff memes
    -apktodo.com ff news
    -apktodo.com ff tournament
    -apktodo.com ff registration
    -apktodo.com ff rewards
    -apktodo.com ff elite pass
    -apktodo.com ff top up
    -apktodo.com ff vpn
    -apktodo.com ff server
    -apktodo.com ff advance server
    -apktodo.com ff custom room
    -apktodo.com ff free fire max
    -apktodo.com ff garena official website

    -
      -
    1. Go to Settings > Apps > Special app access > Install unknown apps (or Settings > Security > Unknown sources depending on your Android version).
    2. -
    3. Select the browser or app that you will use to download the APK file from apktodo.com (for example, Chrome).
    4. -
    5. Toggle on Allow from this source or Unknown sources.
    6. -
    -

    Step 2: Visit apktodo.com and search for Free Fire

    -

    Now that you have enabled unknown sources, you can visit apktodo.com and search for Free Fire. To do this, follow these steps:

    -
      -
    1. Open your browser or app and go to [apktodo.com](^9^).
    2. -
    3. Type Free Fire in the search box and tap on the magnifying glass icon.
    4. -
    5. Scroll down and find the Free Fire APK file that matches your device and preferences. You can choose between Free Fire, Free Fire Max, or Free Fire Advance Server. You can also check the file size, version, and rating of each APK file.
    6. -
    7. Tap on the Download button next to the APK file that you want to download.
    8. -
    -

    Step 3: Download and install the APK file

    -

    After you tap on the Download button, you will be redirected to another page where you can see more details about the APK file and a final Download button. To download and install the APK file, follow these steps:

    -
      -
    1. Tap on the final Download button and wait for the download to start.
    2. -
    3. Once the download is complete, tap on the Open button or go to your Downloads folder and find the APK file.
    4. -
    5. Tap on the APK file and follow the instructions on the screen to install it. You might need to grant some permissions or accept some terms and conditions.
    6. -
    -

    Step 4: Launch the game and enjoy

    -

    Congratulations! You have successfully downloaded and installed Free Fire from apktodo.com. Now you can launch the game and enjoy its features and content. To do this, follow these steps:

    -
      -
    1. Go to your app drawer or home screen and find the Free Fire icon.
    2. -
    3. Tap on the icon and wait for the game to load.
    4. -
    5. Login with your account or create a new one if you don't have one.
    6. -
    7. Select your game mode, character, and settings.
    8. -
    9. Start playing and have fun!
    10. -
    -

    Conclusion

    -

    Summary of the main points

    -

    In this article, we have explained what apktodo.com is, what Free Fire is, why you should download it from there, and how to do it step by step. We have also provided some screenshots and links to help you with the process. We hope that this article has been helpful and informative for you.

    -

    Call to action

    -

    If you are interested in downloading Free Fire from apktodo.com, don't hesitate to follow our guide and enjoy this amazing battle royale game. You can also share this article with your friends who might be interested in it. And if you have any questions or feedback, feel free to leave a comment below. We would love to hear from you!

    -

    FAQs

    -

    Q1: Is apktodo.com ff safe to use?

    -

    A1: Apktodo.com is a reputable website that provides safe and verified APK files for Android apps and games. However, as with any third-party source, you should always be careful and use a reliable antivirus software before downloading and installing any APK file. You should also check the reviews and ratings of other users who have downloaded the same APK file.

    -

    Q2: What are the benefits of using apktodo.com ff?

    -

    A2: Some of the benefits of using apktodo.com ff are:

    - -

    Q3: How to update apktodo.com ff?

    -

    A3: To update apktodo.com ff, you need to visit apktodo.com again and search for Free Fire. Then, you need to download and install the latest version of the APK file over the existing one. You don't need to uninstall the previous version or lose your data. However, you should always backup your data before updating any app or game.

    -

    Q4: How to fix apktodo.com ff not working?

    -

    A4: If apktodo.com ff is not working properly on your device, you might need to try some of these solutions:

    - -

    Q5: How to contact apktodo.com support?

    -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/DBZ RPG Join Goku and Friends in the Ultimate Dragon Ball Adventure.md b/spaces/1phancelerku/anime-remove-background/DBZ RPG Join Goku and Friends in the Ultimate Dragon Ball Adventure.md deleted file mode 100644 index c93b2ec89d3c4afa18116d309d0a04d5188846ca..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/DBZ RPG Join Goku and Friends in the Ultimate Dragon Ball Adventure.md +++ /dev/null @@ -1,131 +0,0 @@ -
    -

    DBZ RPG APK: How to Play Dragon Ball Z Games on Your Android Device

    -

    If you are a fan of Dragon Ball Z, you might have wondered how it would be like to play as your favorite character in an immersive role-playing game. Well, wonder no more, because with DBZ RPG APK, you can do just that. In this article, we will show you what DBZ RPG APK is, why you should play it, how to download and install it, how to play it, and some tips and tricks to make the most out of it.

    -

    Introduction

    -

    Dragon Ball Z is one of the most popular anime series of all time, with millions of fans around the world. The series follows the adventures of Goku and his friends as they protect the Earth from various threats, such as aliens, androids, and demons. Along the way, they also discover the secrets of the dragon balls, mystical orbs that can grant any wish when gathered.

    -

    dbz rpg apk


    DOWNLOAD ✒ ✒ ✒ https://jinyurl.com/2uNLYI



    -

    Dragon Ball Z has inspired many video games over the years, ranging from fighting games to card games. However, one genre that has been lacking is role-playing games. Role-playing games, or RPGs, are games where you create or control a character and interact with a fictional world. RPGs usually have elements such as exploration, quests, combat, leveling up, and customization.

    -

    Fortunately, there is a way to play Dragon Ball Z games in the RPG genre on your Android device. That way is DBZ RPG APK.

    -

    What is DBZ RPG APK?

    -

    DBZ RPG APK is an unofficial fan-made game that lets you play as any Dragon Ball Z character in an open-world RPG. The game is not available on the Google Play Store, so you have to download it from a third-party source as an APK file. An APK file is a package file that contains all the data and code needed to run an Android app.

    -

    The game features many characters from the Dragon Ball Z series, such as Goku, Vegeta, Gohan, Piccolo, Krillin, Trunks, Goten, and more. You can also create your own custom character by choosing their race, gender, appearance, and name. The game has a story mode that follows the main events of the anime, as well as a free mode where you can explore the world at your own pace.

    -

    Why should you play DBZ RPG APK?

    -

    There are many reasons why you should play DBZ RPG APK if you are a fan of Dragon Ball Z. Here are some of them:

    - -

    As you can see, DBZ RPG APK offers a lot of fun and excitement for Dragon Ball Z fans. It is a game that lets you live your dream of being a part of the Dragon Ball Z universe.

    -

    dbz rpg android download
    -dbz rpg game apk
    -dbz rpg mod apk
    -dbz rpg offline apk
    -dbz rpg online apk
    -dbz rpg saga apk
    -dbz rpg super apk
    -dbz rpg unlimited apk
    -dragon ball z rpg apk
    -dragon ball z rpg android apk
    -dragon ball z rpg download apk
    -dragon ball z rpg game apk
    -dragon ball z rpg legend of z apk
    -dragon ball z rpg maker apk
    -dragon ball z rpg mod apk
    -dragon ball z rpg offline apk
    -dragon ball z rpg online apk
    -dragon ball z rpg project z apk
    -dragon ball z rpg saga apk
    -dragon ball z rpg super apk
    -dragon ball z rpg unlimited apk
    -free dbz rpg apk
    -free dragon ball z rpg apk
    -best dbz rpg apk
    -best dragon ball z rpg apk
    -new dbz rpg apk
    -new dragon ball z rpg apk
    -latest dbz rpg apk
    -latest dragon ball z rpg apk
    -top dbz rpg apk
    -top dragon ball z rpg apk
    -dbz legends rpg apk
    -dbz dokkan battle rpg apk
    -dbz mad fighters rpg apk
    -dbz super saga rpg apk
    -dragon ball legends rpg apk
    -dragon ball dokkan battle rpg apk
    -dragon ball mad fighters rpg apk
    -dragon ball super saga rpg apk
    -dbz fusion reborn rpg apk
    -dbz ultimate tenkaichi rpg apk
    -dbz xenoverse 2 rpg apk
    -dragon ball fusion reborn rpg apk
    -dragon ball ultimate tenkaichi rpg apk
    -dragon ball xenoverse 2 rpg apk

    -

    How to download and install DBZ RPG APK

    -

    If you want to play DBZ RPG APK on your Android device, you will need to download and install it manually. Here are the steps to do so:

    -

    Step 1: Find a reliable source for the APK file

    -

    The first thing you need to do is to find a trustworthy website that provides the APK file for DBZ RPG APK. You can search for it on Google or use a link from a reputable source. Be careful not to download from shady or malicious websites that might contain viruses or malware.

    -

    One of the websites that we recommend is [DBZ RPG APK Download]. This website has the latest version of the game and is safe and secure. You can also find more information about the game and its features on this website.

    -

    Step 2: Enable unknown sources on your device

    -

    The next thing you need to do is to enable unknown sources on your device. This will allow you to install apps that are not from the Google Play Store. To do this, follow these steps:

    - -

    You can now install apps from sources other than the Google Play Store.

    -

    Step 3: Download and install the APK file

    -

    The final thing you need to do is to download and install the APK file for DBZ RPG APK. To do this, follow these steps:

    - -

    You have successfully installed DBZ RPG APK on your device. You can now launch the game and enjoy playing it.

    Continuing the article:

    How to play DBZ RPG APK

    -

    Now that you have installed DBZ RPG APK on your device, you might be wondering how to play it. Don't worry, we will guide you through the basics of the game and help you get started. Here are the steps to play DBZ RPG APK:

    -

    Choose your favorite Dragon Ball Z character

    -

    When you launch the game, you will be greeted by a menu screen where you can choose between story mode and free mode. Story mode follows the main events of the anime, while free mode lets you explore the world at your own pace. You can also access the settings, credits, and multiplayer mode from this screen.

    -

    Before you start playing, you will need to choose your character. You can either select one of the existing characters from the anime, such as Goku, Vegeta, Gohan, Piccolo, Krillin, Trunks, Goten, and more, or create your own custom character by choosing their race, gender, appearance, and name. You can also edit your character's skills and transformations later in the game.

    -

    Once you have chosen your character, you can start playing the game.

    -

    Explore the open world and complete quests

    -

    The game features a large open world that recreates the locations from the anime. You can fly, run, jump, swim, and teleport across the map. You can also interact with various objects and NPCs. Some NPCs will give you quests that you can complete for rewards, such as experience points, items, equipment, and money. Quests can range from simple tasks like delivering items or defeating enemies to more complex ones like solving puzzles or finding secrets.

    -

    You can also find dragon balls scattered around the world. Dragon balls are mystical orbs that can grant any wish when gathered. There are seven dragon balls in total, and each one has a different color and star number. You can use your scouter to locate them on the map. Once you have collected all seven dragon balls, you can summon Shenron, the eternal dragon, and make a wish.

    -

    Fight against enemies and bosses

    -

    The game also features a combat system that lets you fight against enemies and bosses. You can use various attacks and techniques from the anime, such as punches, kicks, beams, blasts, and more. You can also use items and equipment to boost your stats and abilities. You can switch between different camera angles and lock on to your target for better accuracy.

    -

    You will encounter different types of enemies in the game, such as robots, aliens, androids, demons, and more. Some enemies are stronger than others and require more strategy and skill to defeat. You will also face bosses that are based on the main villains from the anime, such as Frieza, Cell, Buu, Beerus, and more. Bosses are much more powerful and have unique attacks and patterns. You will need to use your full potential and transform into different forms to defeat them.

    -

    Level up and unlock new skills and transformations

    -

    As you play the game, you will gain experience points that will help you level up your character. Leveling up will increase your stats such as health, Continuing the article: power, speed, and defense. You will also unlock new skills and transformations that will make you stronger and more versatile. Skills are special abilities that you can use in combat, such as energy blasts, telekinesis, healing, and more. Transformations are changes in your appearance and power level that give you an edge over your enemies, such as Super Saiyan, Super Saiyan 2, Super Saiyan 3, Super Saiyan 4, Super Saiyan God, and Super Saiyan Blue.

    -

    You can customize your character's skills and transformations by accessing the menu screen. You can assign skills to different buttons and switch between transformations by tapping on the transformation icon. You can also upgrade your skills and transformations by spending skill points that you earn by leveling up.

    -

    Tips and tricks for playing DBZ RPG APK

    -

    To help you enjoy playing DBZ RPG APK even more, we have compiled some tips and tricks that you can use in the game. Here are some of them:

    -

    Use the auto-save feature to avoid losing progress

    -

    The game has an auto-save feature that saves your progress every time you complete a quest, level up, or change locations. This is very useful in case you encounter any bugs or glitches that might cause the game to crash or freeze. You can also manually save your progress by accessing the menu screen and tapping on the save icon. You can load your saved game by tapping on the load icon on the menu screen.

    -

    Collect dragon balls and summon Shenron for wishes

    -

    As mentioned earlier, you can collect dragon balls in the game and use them to summon Shenron, the eternal dragon. Shenron can grant you any wish that you desire, such as increasing your stats, unlocking new skills and transformations, getting rare items and equipment, and more. However, you can only use Shenron once per day, so choose your wish wisely.

    -

    Use items and equipment to boost your stats and abilities

    -

    You can find various items and equipment in the game that can help you in your adventure. Items are consumable items that you can use to restore your health, energy, or status effects. Equipment are wearable items that you can equip to increase your stats and abilities. You can find items and equipment by completing quests, defeating enemies, opening chests, or buying them from shops.

    -

    You can access your inventory by tapping on the bag icon on the menu screen. You can use items by tapping on them and selecting the use option. You can equip equipment by tapping on them and selecting the equip option. You can also sell or discard items and equipment that you don't need by tapping on them and selecting the sell or discard option.

    -

    Join online multiplayer mode to play with other players

    -

    The game also has an online multiplayer mode that lets you play with other players around the world. You can join online multiplayer mode by tapping on the multiplayer icon on the menu screen. You will need an internet connection to play online multiplayer mode.

    -

    In online multiplayer mode, you can choose between cooperative mode or competitive mode. In cooperative mode, you can team up with other players to complete missions, fight enemies, or challenge bosses. In competitive mode, you can fight against other players in PvP battles.

    -

    You can also chat with other players by tapping on the chat icon on the menu screen. You can send text messages or voice messages to other players. You can also trade items and equipment with other players by tapping on the trade icon on the menu screen.

    -

    Conclusion

    -

    DBZ RPG APK is a fan-made game that lets you play as any Dragon Ball Z character in an open-world RPG. The game is not available on the Google Play Store, so you have to download it from a third-party source as an APK file. The game features many characters from the Dragon Ball Z series, a large open world that recreates the locations from the anime, a combat system that lets you use various attacks and techniques from the anime, a customization system that lets you change your appearance, Continuing the article: skills, and transformations, and an online multiplayer mode that lets you play with other players. If you are a fan of Dragon Ball Z, you should definitely try DBZ RPG APK. It is a game that will make you feel like you are part of the Dragon Ball Z universe. You can download it from [DBZ RPG APK Download] and follow the steps in this article to install and play it. Have fun and enjoy playing DBZ RPG APK!

    FAQs

    -

    Here are some frequently asked questions about DBZ RPG APK:

    -

    Q: Is DBZ RPG APK safe to download and install?

    -

    A: Yes, DBZ RPG APK is safe to download and install as long as you get it from a reliable source, such as [DBZ RPG APK Download]. However, you should always be careful when downloading and installing apps from unknown sources, as they might contain viruses or malware. You should also scan your device with an antivirus app before and after installing DBZ RPG APK.

    -

    Q: Is DBZ RPG APK legal to play?

    -

    A: DBZ RPG APK is an unofficial fan-made game that is not affiliated with or endorsed by the official Dragon Ball Z franchise or its creators. Therefore, it might violate some intellectual property rights or terms of service of the original owners. However, as long as you play it for personal and non-commercial use, you should not face any legal issues. However, we are not responsible for any consequences that might arise from playing DBZ RPG APK.

    -

    Q: How can I update DBZ RPG APK to the latest version?

    -

    A: To update DBZ RPG APK to the latest version, you will need to download and install the new APK file from the same source where you got the previous one. You can check for updates by visiting [DBZ RPG APK Download] or by following their social media accounts. You can also enable notifications on your device to get notified when a new update is available.

    -

    Q: How can I contact the developers of DBZ RPG APK?

    -

    A: You can contact the developers of DBZ RPG APK by visiting their website or by sending them an email at [dbzrpgapk@gmail.com]. You can also follow them on their social media accounts, such as Facebook, Twitter, Instagram, and YouTube. You can give them feedback, suggestions, bug reports, or any other inquiries that you might have.

    -

    Q: How can I support the developers of DBZ RPG APK?

    -

    A: You can support the developers of DBZ RPG APK by donating to them via PayPal or Patreon. You can also support them by sharing their game with your friends and family, by rating and reviewing their game on various platforms, and by following and engaging with them on their social media accounts.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download My Eternal Season 5 Episode 1 - The Best Filipino Drama Ever.md b/spaces/1phancelerku/anime-remove-background/Download My Eternal Season 5 Episode 1 - The Best Filipino Drama Ever.md deleted file mode 100644 index a707513267b3d5df603f0b0d9d4eede6f8b8bbd1..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download My Eternal Season 5 Episode 1 - The Best Filipino Drama Ever.md +++ /dev/null @@ -1,120 +0,0 @@ - -

    My Eternal Season 5 Episode 1 Download: How to Watch the Latest Episode of the Hit Philippine Drama

    -

    If you are a fan of Philippine dramas, you have probably heard of My Eternal, one of the most successful and acclaimed series in the country. My Eternal is a romantic drama that follows the star-crossed love story of Daniel and Katerina, who are separated by fate, family, and revenge. The series has been airing since 2012 and has won numerous awards and accolades, both locally and internationally. It has also gained a loyal fan base that eagerly awaits every new episode.

    -

    my eternal season 5 episode 1 download


    Download Zip > https://jinyurl.com/2uNUDy



    -

    But how can you watch My Eternal season 5 episode 1, which premiered on June 19, 2023? And what can you expect from this latest installment of the saga? In this article, we will tell you everything you need to know about My Eternal season 5 episode 1 download, including how to do it legally and safely, what to expect from the plot, and where to find more information about the show. Read on to find out more!

    -

    What is My Eternal and why is it popular?

    -

    My Eternal is a Philippine drama series produced by ABS-CBN, the country's largest media network. It is based on the classic novel Wuthering Heights by Emily Brontë, but with a modern twist. The series revolves around Daniel (Coco Martin) and Katerina (Julia Montes), who are childhood friends turned lovers. However, their relationship is complicated by their families' feud, their social status, and their personal vendettas. Daniel is the illegitimate son of Marco (Richard Gomez), a wealthy landowner who abandoned his true love Emily (Dawn Zulueta) for another woman. Katerina is the daughter of Tomas (Joel Torre), a poor worker who hates Marco for his betrayal. Emily returns to seek revenge on Marco and his family, while Daniel becomes her pawn in her scheme. Katerina marries Nathan (Paulo Avelino), Marco's legitimate son, out of obligation, but still loves Daniel. The series follows their struggles, sacrifices, and tragedies as they try to overcome their obstacles and find their eternal happiness.

    -

    My Eternal is popular because it has a captivating story, a talented cast, a beautiful cinematography, and a memorable soundtrack. The series has been praised for its realistic portrayal of Filipino culture, values, and history, as well as its exploration of themes such as love, family, loyalty, betrayal, forgiveness, and redemption. The series has also been recognized for its high ratings and awards, both in the Philippines and abroad. It has won several trophies at the PMPC Star Awards for TV, the Golden Screen TV Awards, the Gawad Tanglaw Awards, the Anak TV Seal Awards, and the

    How to download My Eternal season 5 episode 1 legally and safely?

    -

    If you want to watch My Eternal season 5 episode 1, you might be tempted to look for illegal or pirated copies online. However, this is not a good idea, as you might end up with low-quality videos, malware, viruses, or scams that could harm your device or compromise your personal information. Moreover, downloading or streaming My Eternal from unauthorized sources is a violation of intellectual property rights and could get you in trouble with the law.

    -

    Fortunately, there are legal and safe ways to download My Eternal season 5 episode 1 and enjoy it at your own convenience. Here are some of the official platforms and websites that offer My Eternal season 5 episode 1 for download:

    - -

    These are some of the advantages and disadvantages of downloading My Eternal season 5 episode 1 from different sources:

    - - - - - - - - - - - - - - - - - - - - - -
    SourceAdvantagesDisadvantages
    Youku- Free
    - High-quality video
    - Chinese subtitles
    - Requires account and VPN
    - Geoblocked outside China
    - No English subtitles
    iQiyi- Free
    - High-quality video
    - English or local subtitles
    - Requires account and VPN
    - Geoblocked outside some Asian countries
    - Limited availability
    ABS-CBN International Sales- Official distributor
    - High-quality video
    - English subtitles
    - Requires inquiry and payment
    - Not available in some regions
    - No local subtitles
    -

    When downloading My Eternal season 5 episode 1, you should also follow these tips and precautions to avoid malware, viruses, and scams:

    -

    My Eternal End Episode English YouTube
    -My Eternal ABS-CBN International Sales
    -My Eternal Season 5 Episode 1 FzMovies
    -My Eternal Star-Crossed Lovers Drama
    -My Eternal Walang Hanggan Full Episodes
    -My Eternal Coco Martin and Julia Montes
    -My Eternal Synopsis and Cast Guide
    -My Eternal Revenge and Betrayal Plot
    -My Eternal Montenegro Winery Setting
    -My Eternal Daniel and Katerina Love Story
    -My Eternal Emily's Return for Vengeance
    -My Eternal Marco's Secret Son Twist
    -My Eternal Margaret's Evil Schemes
    -My Eternal Nathan and Katerina Marriage
    -My Eternal Daniel's Rise to Power
    -My Eternal ABS-CBN Entertainment Channel
    -My Eternal Official Website and Facebook Page
    -My Eternal Twitter and Instagram Updates
    -My Eternal Comments and Reviews Online
    -My Eternal Trailer and Teaser Videos
    -My Eternal Theme Song and Soundtrack
    -My Eternal Awards and Nominations
    -My Eternal Ratings and Viewership
    -My Eternal Behind the Scenes and Bloopers
    -My Eternal Cast Interviews and Photoshoots
    -My Eternal Fan Art and Merchandise
    -My Eternal Netflix and iWant Streaming
    -My Eternal DVD and Blu-ray Release
    -My Eternal Torrent and Magnet Link Download
    -My Eternal Subtitles and Dubbing Options
    -My Eternal Season 5 Episode 1 Recap and Analysis
    -My Eternal Season 5 Episode 1 Spoilers and Predictions
    -My Eternal Season 5 Episode 1 Watch Online Free
    -My Eternal Season 5 Episode 1 HD Quality Download
    -My Eternal Season 5 Episode 1 MP4 and MP3 Format Download
    -My Eternal Season 5 Episode 1 3GP and AVI Format Download
    -My Eternal Season 5 Episode 1 720p and 480p Resolution Download
    -My Eternal Season 5 Episode 1 300MB and 500MB Size Download
    -My Eternal Season 5 Episode 1 Tamilrockers and Movierulz Download
    -My Eternal Season 5 Episode 1 Worldfree4u and Filmywap Download

    - -

    What to expect from My Eternal season 5 episode 1?

    -

    If you are wondering what will happen in My Eternal season 5 episode 1, here is a spoiler-free overview of the events and twists in the latest installment of the drama:

    - -

    These are some of the reactions and reviews of My Eternal season 5 episode 1 from critics and fans:

    -
    -

    "My Eternal season 5 episode 1 was a roller coaster of emotions. I cried, I laughed, I screamed, I swooned. The acting, the writing, the directing, the music, everything was superb. Coco Martin and Julia Montes have such amazing chemistry and they make me feel their love and pain. Maja Salvador is also a great addition to the cast and she plays the villain role very well. I can't wait to see what will happen next in this epic drama." - Maria, a fan from Manila

    -
    -
    -

    "My Eternal season 5 episode 1 delivered on its promise of being the most explosive and exciting season premiere yet. The show continues to impress with its gripping story, stellar performances, stunning visuals, and captivating soundtrack. My Eternal is not just a drama, it's a phenomenon that transcends borders and cultures. It is one of the best Philippine dramas ever made and deserves all the praise and recognition it gets." - John, a critic from Singapore

    -
    -

    If you want to see what will happen next in My Eternal, you can watch the teasers and trailers of My Eternal season 5 episode 2 on the official YouTube channel of ABS-CBN Entertainment. You can also follow the official social media accounts of My Eternal on Facebook, Twitter, and Instagram for more updates, behind-the-scenes, and exclusive content.

    -

    Conclusion

    -

    In conclusion, My Eternal season 5 episode 1 is a must-watch for fans of Philippine dramas and lovers of romance. It is the latest episode of the hit series My Eternal, which tells the story of Daniel and Katerina, two star-crossed lovers who face many challenges and obstacles in their quest for eternal happiness. You can download My Eternal season 5 episode 1 legally and safely from various platforms and websites, such as Youku, iQiyi, or ABS-CBN International Sales. You can also expect a lot of drama, suspense, action, and romance from My Eternal season 5 episode 1, as well as from the upcoming episodes of the series.

    -

    If you enjoyed this article, please share it with your friends and family who are also fans of My Eternal. You can also leave your comments and feedback below. We would love to hear from you!

    -

    FAQs

    -

    Here are some frequently asked questions and answers about My Eternal season 5 episode 1:

    -
      -
    1. When will My Eternal season 5 episode 1 be available for download?
      -My Eternal season 5 episode 1 premiered on June 19, 2023 on ABS-CBN in the Philippines. It will be available for download on different platforms and websites within a few days or weeks after its broadcast date.
    2. -
    3. How much does it cost to download My Eternal season 5 episode 1?
      -The cost of downloading My Eternal season 5 episode 1 depends on the source and the quality of the video. Some sources offer My Eternal season 5 episode 1 for free, while others require payment or subscription fees. You should compare the prices and the features of each source before downloading My Eternal season 5 episode 1.
    4. -
    5. Is it legal to download My Eternal season 5 episode 1?
      -It is legal to download My Eternal season 5 episode 1 from the official platforms and websites that have the rights and licenses to distribute the show. However, it is illegal to download or stream My Eternal from unauthorized or pirated sources that infringe on the intellectual property rights of the creators and producers of the show.
    6. -
    7. Is it safe to download My Eternal season 5 episode 1?
      -It is safe to download My Eternal season 5 episode 1 from the official platforms and websites that have the security and quality standards to protect your device and data. However, it is unsafe to download or stream My Eternal from unknown or suspicious sources that might contain malware, viruses, or scams that could harm your device or compromise your personal information.
    8. -
    9. Where can I find more information about My Eternal?
      -You can find more information about My Eternal on the official website of ABS-CBN Entertainment, where you can also watch the episodes online. You can also follow the official social media accounts of My Eternal on Facebook, Twitter, and Instagram for more updates, behind-the-scenes, and exclusive content. You can also join the online communities and forums of My Eternal fans, where you can discuss and share your opinions and insights about the show.
    10. -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/4Taps/SadTalker/src/utils/face_enhancer.py b/spaces/4Taps/SadTalker/src/utils/face_enhancer.py deleted file mode 100644 index 6192649d7141f2cd05f1302f7c954bfb8fa612fa..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/utils/face_enhancer.py +++ /dev/null @@ -1,60 +0,0 @@ -import os -from basicsr.utils import imwrite - -from gfpgan import GFPGANer - -from tqdm import tqdm - -def enhancer(images, method='gfpgan'): - - # ------------------------ set up GFPGAN restorer ------------------------ - if method == 'gfpgan': - arch = 'clean' - channel_multiplier = 2 - model_name = 'GFPGANv1.4' - url = 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth' - elif method == 'RestoreFormer': - arch = 'RestoreFormer' - channel_multiplier = 2 - model_name = 'RestoreFormer' - url = 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth' - elif method == 'codeformer': - arch = 'CodeFormer' - channel_multiplier = 2 - model_name = 'CodeFormer' - url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth' - else: - raise ValueError(f'Wrong model version {method}.') - - # determine model paths - model_path = os.path.join('experiments/pretrained_models', model_name + '.pth') - - if not os.path.isfile(model_path): - model_path = os.path.join('checkpoints', model_name + '.pth') - - if not os.path.isfile(model_path): - # download pre-trained models from url - model_path = url - - restorer = GFPGANer( - model_path=model_path, - upscale=2, - arch=arch, - channel_multiplier=channel_multiplier, - bg_upsampler=None) - - # ------------------------ restore ------------------------ - restored_img = [] - for idx in tqdm(range(len(images)), 'Face Enhancer:'): - - # restore faces and background if necessary - cropped_faces, restored_faces, _ = restorer.enhance( - images[idx], - has_aligned=True, - only_center_face=False, - paste_back=True, - weight=0.5) - - restored_img += restored_faces - - return restored_img \ No newline at end of file diff --git a/spaces/801artistry/RVC801/infer/lib/infer_pack/modules.py b/spaces/801artistry/RVC801/infer/lib/infer_pack/modules.py deleted file mode 100644 index 2201a58bee9b7808d386b3ef9ac2d1f9630e56ef..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/infer/lib/infer_pack/modules.py +++ /dev/null @@ -1,521 +0,0 @@ -import copy -import math - -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d -from torch.nn import functional as F -from torch.nn.utils import remove_weight_norm, weight_norm - -from infer.lib.infer_pack import commons -from infer.lib.infer_pack.commons import get_padding, init_weights -from infer.lib.infer_pack.transforms import piecewise_rational_quadratic_transform - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/801artistry/RVC801/tools/calc_rvc_model_similarity.py b/spaces/801artistry/RVC801/tools/calc_rvc_model_similarity.py deleted file mode 100644 index 42496e088e51dc5162d0714470c2226f696e260c..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/tools/calc_rvc_model_similarity.py +++ /dev/null @@ -1,96 +0,0 @@ -# This code references https://huggingface.co/JosephusCheung/ASimilarityCalculatior/blob/main/qwerty.py -# Fill in the path of the model to be queried and the root directory of the reference models, and this script will return the similarity between the model to be queried and all reference models. -import os -import logging - -logger = logging.getLogger(__name__) - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -def cal_cross_attn(to_q, to_k, to_v, rand_input): - hidden_dim, embed_dim = to_q.shape - attn_to_q = nn.Linear(hidden_dim, embed_dim, bias=False) - attn_to_k = nn.Linear(hidden_dim, embed_dim, bias=False) - attn_to_v = nn.Linear(hidden_dim, embed_dim, bias=False) - attn_to_q.load_state_dict({"weight": to_q}) - attn_to_k.load_state_dict({"weight": to_k}) - attn_to_v.load_state_dict({"weight": to_v}) - - return torch.einsum( - "ik, jk -> ik", - F.softmax( - torch.einsum("ij, kj -> ik", attn_to_q(rand_input), attn_to_k(rand_input)), - dim=-1, - ), - attn_to_v(rand_input), - ) - - -def model_hash(filename): - try: - with open(filename, "rb") as file: - import hashlib - - m = hashlib.sha256() - - file.seek(0x100000) - m.update(file.read(0x10000)) - return m.hexdigest()[0:8] - except FileNotFoundError: - return "NOFILE" - - -def eval(model, n, input): - qk = f"enc_p.encoder.attn_layers.{n}.conv_q.weight" - uk = f"enc_p.encoder.attn_layers.{n}.conv_k.weight" - vk = f"enc_p.encoder.attn_layers.{n}.conv_v.weight" - atoq, atok, atov = model[qk][:, :, 0], model[uk][:, :, 0], model[vk][:, :, 0] - - attn = cal_cross_attn(atoq, atok, atov, input) - return attn - - -def main(path, root): - torch.manual_seed(114514) - model_a = torch.load(path, map_location="cpu")["weight"] - - logger.info("Query:\t\t%s\t%s" % (path, model_hash(path))) - - map_attn_a = {} - map_rand_input = {} - for n in range(6): - hidden_dim, embed_dim, _ = model_a[ - f"enc_p.encoder.attn_layers.{n}.conv_v.weight" - ].shape - rand_input = torch.randn([embed_dim, hidden_dim]) - - map_attn_a[n] = eval(model_a, n, rand_input) - map_rand_input[n] = rand_input - - del model_a - - for name in sorted(list(os.listdir(root))): - path = "%s/%s" % (root, name) - model_b = torch.load(path, map_location="cpu")["weight"] - - sims = [] - for n in range(6): - attn_a = map_attn_a[n] - attn_b = eval(model_b, n, map_rand_input[n]) - - sim = torch.mean(torch.cosine_similarity(attn_a, attn_b)) - sims.append(sim) - - logger.info( - "Reference:\t%s\t%s\t%s" - % (path, model_hash(path), f"{torch.mean(torch.stack(sims)) * 1e2:.2f}%") - ) - - -if __name__ == "__main__": - query_path = r"assets\weights\mi v3.pth" - reference_root = r"assets\weights" - main(query_path, reference_root) diff --git a/spaces/AIFILMS/generate_human_motion/pyrender/pyrender/trackball.py b/spaces/AIFILMS/generate_human_motion/pyrender/pyrender/trackball.py deleted file mode 100644 index 3e57a0e82d3f07b80754f575c28a0e05cb73fc50..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/generate_human_motion/pyrender/pyrender/trackball.py +++ /dev/null @@ -1,216 +0,0 @@ -"""Trackball class for 3D manipulation of viewpoints. -""" -import numpy as np - -import trimesh.transformations as transformations - - -class Trackball(object): - """A trackball class for creating camera transforms from mouse movements. - """ - STATE_ROTATE = 0 - STATE_PAN = 1 - STATE_ROLL = 2 - STATE_ZOOM = 3 - - def __init__(self, pose, size, scale, - target=np.array([0.0, 0.0, 0.0])): - """Initialize a trackball with an initial camera-to-world pose - and the given parameters. - - Parameters - ---------- - pose : [4,4] - An initial camera-to-world pose for the trackball. - - size : (float, float) - The width and height of the camera image in pixels. - - scale : float - The diagonal of the scene's bounding box -- - used for ensuring translation motions are sufficiently - fast for differently-sized scenes. - - target : (3,) float - The center of the scene in world coordinates. - The trackball will revolve around this point. - """ - self._size = np.array(size) - self._scale = float(scale) - - self._pose = pose - self._n_pose = pose - - self._target = target - self._n_target = target - - self._state = Trackball.STATE_ROTATE - - @property - def pose(self): - """autolab_core.RigidTransform : The current camera-to-world pose. - """ - return self._n_pose - - def set_state(self, state): - """Set the state of the trackball in order to change the effect of - dragging motions. - - Parameters - ---------- - state : int - One of Trackball.STATE_ROTATE, Trackball.STATE_PAN, - Trackball.STATE_ROLL, and Trackball.STATE_ZOOM. - """ - self._state = state - - def resize(self, size): - """Resize the window. - - Parameters - ---------- - size : (float, float) - The new width and height of the camera image in pixels. - """ - self._size = np.array(size) - - def down(self, point): - """Record an initial mouse press at a given point. - - Parameters - ---------- - point : (2,) int - The x and y pixel coordinates of the mouse press. - """ - self._pdown = np.array(point, dtype=np.float32) - self._pose = self._n_pose - self._target = self._n_target - - def drag(self, point): - """Update the tracball during a drag. - - Parameters - ---------- - point : (2,) int - The current x and y pixel coordinates of the mouse during a drag. - This will compute a movement for the trackball with the relative - motion between this point and the one marked by down(). - """ - point = np.array(point, dtype=np.float32) - dx, dy = point - self._pdown - mindim = 0.3 * np.min(self._size) - - target = self._target - x_axis = self._pose[:3,0].flatten() - y_axis = self._pose[:3,1].flatten() - z_axis = self._pose[:3,2].flatten() - eye = self._pose[:3,3].flatten() - - # Interpret drag as a rotation - if self._state == Trackball.STATE_ROTATE: - x_angle = -dx / mindim - x_rot_mat = transformations.rotation_matrix( - x_angle, y_axis, target - ) - - y_angle = dy / mindim - y_rot_mat = transformations.rotation_matrix( - y_angle, x_axis, target - ) - - self._n_pose = y_rot_mat.dot(x_rot_mat.dot(self._pose)) - - # Interpret drag as a roll about the camera axis - elif self._state == Trackball.STATE_ROLL: - center = self._size / 2.0 - v_init = self._pdown - center - v_curr = point - center - v_init = v_init / np.linalg.norm(v_init) - v_curr = v_curr / np.linalg.norm(v_curr) - - theta = (-np.arctan2(v_curr[1], v_curr[0]) + - np.arctan2(v_init[1], v_init[0])) - - rot_mat = transformations.rotation_matrix(theta, z_axis, target) - - self._n_pose = rot_mat.dot(self._pose) - - # Interpret drag as a camera pan in view plane - elif self._state == Trackball.STATE_PAN: - dx = -dx / (5.0 * mindim) * self._scale - dy = -dy / (5.0 * mindim) * self._scale - - translation = dx * x_axis + dy * y_axis - self._n_target = self._target + translation - t_tf = np.eye(4) - t_tf[:3,3] = translation - self._n_pose = t_tf.dot(self._pose) - - # Interpret drag as a zoom motion - elif self._state == Trackball.STATE_ZOOM: - radius = np.linalg.norm(eye - target) - ratio = 0.0 - if dy > 0: - ratio = np.exp(abs(dy) / (0.5 * self._size[1])) - 1.0 - elif dy < 0: - ratio = 1.0 - np.exp(dy / (0.5 * (self._size[1]))) - translation = -np.sign(dy) * ratio * radius * z_axis - t_tf = np.eye(4) - t_tf[:3,3] = translation - self._n_pose = t_tf.dot(self._pose) - - def scroll(self, clicks): - """Zoom using a mouse scroll wheel motion. - - Parameters - ---------- - clicks : int - The number of clicks. Positive numbers indicate forward wheel - movement. - """ - target = self._target - ratio = 0.90 - - mult = 1.0 - if clicks > 0: - mult = ratio**clicks - elif clicks < 0: - mult = (1.0 / ratio)**abs(clicks) - - z_axis = self._n_pose[:3,2].flatten() - eye = self._n_pose[:3,3].flatten() - radius = np.linalg.norm(eye - target) - translation = (mult * radius - radius) * z_axis - t_tf = np.eye(4) - t_tf[:3,3] = translation - self._n_pose = t_tf.dot(self._n_pose) - - z_axis = self._pose[:3,2].flatten() - eye = self._pose[:3,3].flatten() - radius = np.linalg.norm(eye - target) - translation = (mult * radius - radius) * z_axis - t_tf = np.eye(4) - t_tf[:3,3] = translation - self._pose = t_tf.dot(self._pose) - - def rotate(self, azimuth, axis=None): - """Rotate the trackball about the "Up" axis by azimuth radians. - - Parameters - ---------- - azimuth : float - The number of radians to rotate. - """ - target = self._target - - y_axis = self._n_pose[:3,1].flatten() - if axis is not None: - y_axis = axis - x_rot_mat = transformations.rotation_matrix(azimuth, y_axis, target) - self._n_pose = x_rot_mat.dot(self._n_pose) - - y_axis = self._pose[:3,1].flatten() - if axis is not None: - y_axis = axis - x_rot_mat = transformations.rotation_matrix(azimuth, y_axis, target) - self._pose = x_rot_mat.dot(self._pose) diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/syntaspeech/multi_window_disc.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/syntaspeech/multi_window_disc.py deleted file mode 100644 index a8166ac5b514e501043b9fed13aab01421a6c10e..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/syntaspeech/multi_window_disc.py +++ /dev/null @@ -1,136 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn - - -class SingleWindowDisc(nn.Module): - def __init__(self, time_length, freq_length=80, kernel=(3, 3), c_in=1, hidden_size=128): - super().__init__() - padding = (kernel[0] // 2, kernel[1] // 2) - self.model = nn.ModuleList([ - nn.Sequential(*[ - nn.Conv2d(c_in, hidden_size, kernel, (2, 2), padding), - nn.LeakyReLU(0.2, inplace=True), - nn.Dropout2d(0.25), - nn.BatchNorm2d(hidden_size, 0.8) - ]), - nn.Sequential(*[ - nn.Conv2d(hidden_size, hidden_size, kernel, (2, 2), padding), - nn.LeakyReLU(0.2, inplace=True), - nn.Dropout2d(0.25), - nn.BatchNorm2d(hidden_size, 0.8) - ]), - nn.Sequential(*[ - nn.Conv2d(hidden_size, hidden_size, kernel, (2, 2), padding), - nn.LeakyReLU(0.2, inplace=True), - nn.Dropout2d(0.25), - ]), - ]) - ds_size = (time_length // 2 ** 3, (freq_length + 7) // 2 ** 3) - self.adv_layer = nn.Linear(hidden_size * ds_size[0] * ds_size[1], 1) - - def forward(self, x): - """ - :param x: [B, C, T, n_bins] - :return: validity: [B, 1], h: List of hiddens - """ - h = [] - for l in self.model: - x = l(x) - h.append(x) - x = x.view(x.shape[0], -1) - validity = self.adv_layer(x) # [B, 1] - return validity, h - - -class MultiWindowDiscriminator(nn.Module): - def __init__(self, time_lengths, freq_length=80, kernel=(3, 3), c_in=1, hidden_size=128): - super(MultiWindowDiscriminator, self).__init__() - self.win_lengths = time_lengths - self.discriminators = nn.ModuleList() - - for time_length in time_lengths: - self.discriminators += [SingleWindowDisc(time_length, freq_length, kernel, c_in=c_in, hidden_size=hidden_size)] - - def forward(self, x, x_len, start_frames_wins=None): - ''' - Args: - x (tensor): input mel, (B, c_in, T, n_bins). - x_length (tensor): len of per mel. (B,). - - Returns: - tensor : (B). - ''' - validity = [] - if start_frames_wins is None: - start_frames_wins = [None] * len(self.discriminators) - h = [] - for i, start_frames in zip(range(len(self.discriminators)), start_frames_wins): - x_clip, start_frames = self.clip(x, x_len, self.win_lengths[i], start_frames) # (B, win_length, C) - start_frames_wins[i] = start_frames - if x_clip is None: - continue - x_clip, h_ = self.discriminators[i](x_clip) - h += h_ - validity.append(x_clip) - if len(validity) != len(self.discriminators): - return None, start_frames_wins, h - validity = sum(validity) # [B] - return validity, start_frames_wins, h - - def clip(self, x, x_len, win_length, start_frames=None): - '''Ramdom clip x to win_length. - Args: - x (tensor) : (B, c_in, T, n_bins). - cond (tensor) : (B, T, H). - x_len (tensor) : (B,). - win_length (int): target clip length - - Returns: - (tensor) : (B, c_in, win_length, n_bins). - - ''' - T_start = 0 - T_end = x_len.max() - win_length - if T_end < 0: - return None, None, start_frames - T_end = T_end.item() - if start_frames is None: - start_frame = np.random.randint(low=T_start, high=T_end + 1) - start_frames = [start_frame] * x.size(0) - else: - start_frame = start_frames[0] - x_batch = x[:, :, start_frame: start_frame + win_length] - return x_batch, start_frames - - -class Discriminator(nn.Module): - def __init__(self, time_lengths=[32, 64, 128], freq_length=80, kernel=(3, 3), c_in=1, - hidden_size=128): - super(Discriminator, self).__init__() - self.time_lengths = time_lengths - self.discriminator = MultiWindowDiscriminator( - freq_length=freq_length, - time_lengths=time_lengths, - kernel=kernel, - c_in=c_in, hidden_size=hidden_size - ) - - - def forward(self, x, start_frames_wins=None): - """ - - :param x: [B, T, 80] - :param return_y_only: - :return: - """ - if len(x.shape) == 3: - x = x[:, None, :, :] # [B,1,T,80] - x_len = x.sum([1, -1]).ne(0).int().sum([-1]) - ret = {'y_c': None, 'y': None} - ret['y'], start_frames_wins, ret['h'] = self.discriminator( - x, x_len, start_frames_wins=start_frames_wins) - - ret['start_frames_wins'] = start_frames_wins - return ret - diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/hifigan/mel_utils.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/hifigan/mel_utils.py deleted file mode 100644 index a75fce72db54812320bc60aedfdd378ccecb3374..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/hifigan/mel_utils.py +++ /dev/null @@ -1,80 +0,0 @@ -import numpy as np -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn -from scipy.io.wavfile import read - -MAX_WAV_VALUE = 32768.0 - - -def load_wav(full_path): - sampling_rate, data = read(full_path) - return data, sampling_rate - - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def mel_spectrogram(y, hparams, center=False, complex=False): - # hop_size: 512 # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate) - # win_size: 2048 # For 22050Hz, 1100 ~= 50 ms (If None, win_size: fft_size) (0.05 * sample_rate) - # fmin: 55 # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525]) - # fmax: 10000 # To be increased/reduced depending on data. - # fft_size: 2048 # Extra window size is filled with 0 paddings to match this parameter - # n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, - n_fft = hparams['fft_size'] - num_mels = hparams['audio_num_mel_bins'] - sampling_rate = hparams['audio_sample_rate'] - hop_size = hparams['hop_size'] - win_size = hparams['win_size'] - fmin = hparams['fmin'] - fmax = hparams['fmax'] - y = y.clamp(min=-1., max=1.) - global mel_basis, hann_window - if fmax not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[str(fmax) + '_' + str(y.device)] = torch.from_numpy(mel).float().to(y.device) - hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), [int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)], - mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - if not complex: - spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9)) - spec = torch.matmul(mel_basis[str(fmax) + '_' + str(y.device)], spec) - spec = spectral_normalize_torch(spec) - else: - B, C, T, _ = spec.shape - spec = spec.transpose(1, 2) # [B, T, n_fft, 2] - return spec diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/commons/multiprocess_utils.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/commons/multiprocess_utils.py deleted file mode 100644 index e2773543c702d2819559dfde4c5febab03899790..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/commons/multiprocess_utils.py +++ /dev/null @@ -1,130 +0,0 @@ -import os -import traceback -from functools import partial -from tqdm import tqdm - - -def chunked_worker(worker_id, args_queue=None, results_queue=None, init_ctx_func=None): - ctx = init_ctx_func(worker_id) if init_ctx_func is not None else None - while True: - args = args_queue.get() - if args == '': - return - job_idx, map_func, arg = args - try: - map_func_ = partial(map_func, ctx=ctx) if ctx is not None else map_func - if isinstance(arg, dict): - res = map_func_(**arg) - elif isinstance(arg, (list, tuple)): - res = map_func_(*arg) - else: - res = map_func_(arg) - results_queue.put((job_idx, res)) - except: - traceback.print_exc() - results_queue.put((job_idx, None)) - - -class MultiprocessManager: - def __init__(self, num_workers=None, init_ctx_func=None, multithread=False, queue_max=-1): - if multithread: - from multiprocessing.dummy import Queue, Process - else: - from multiprocessing import Queue, Process - if num_workers is None: - num_workers = int(os.getenv('N_PROC', os.cpu_count())) - self.num_workers = num_workers - self.results_queue = Queue(maxsize=-1) - self.jobs_pending = [] - self.args_queue = Queue(maxsize=queue_max) - self.workers = [] - self.total_jobs = 0 - self.multithread = multithread - for i in range(num_workers): - if multithread: - p = Process(target=chunked_worker, - args=(i, self.args_queue, self.results_queue, init_ctx_func)) - else: - p = Process(target=chunked_worker, - args=(i, self.args_queue, self.results_queue, init_ctx_func), - daemon=True) - self.workers.append(p) - p.start() - - def add_job(self, func, args): - if not self.args_queue.full(): - self.args_queue.put((self.total_jobs, func, args)) - else: - self.jobs_pending.append((self.total_jobs, func, args)) - self.total_jobs += 1 - - def get_results(self): - self.n_finished = 0 - while self.n_finished < self.total_jobs: - while len(self.jobs_pending) > 0 and not self.args_queue.full(): - self.args_queue.put(self.jobs_pending[0]) - self.jobs_pending = self.jobs_pending[1:] - job_id, res = self.results_queue.get() - yield job_id, res - self.n_finished += 1 - for w in range(self.num_workers): - self.args_queue.put("") - for w in self.workers: - w.join() - - def close(self): - if not self.multithread: - for w in self.workers: - w.terminate() - - def __len__(self): - return self.total_jobs - - -def multiprocess_run_tqdm(map_func, args, num_workers=None, ordered=True, init_ctx_func=None, - multithread=False, queue_max=-1, desc=None): - for i, res in tqdm( - multiprocess_run(map_func, args, num_workers, ordered, init_ctx_func, multithread, - queue_max=queue_max), - total=len(args), desc=desc): - yield i, res - - -def multiprocess_run(map_func, args, num_workers=None, ordered=True, init_ctx_func=None, multithread=False, - queue_max=-1): - """ - Multiprocessing running chunked jobs. - - Examples: - >>> for res in tqdm(multiprocess_run(job_func, args): - >>> print(res) - - :param map_func: - :param args: - :param num_workers: - :param ordered: - :param init_ctx_func: - :param q_max_size: - :param multithread: - :return: - """ - if num_workers is None: - num_workers = int(os.getenv('N_PROC', os.cpu_count())) - # num_workers = 1 - manager = MultiprocessManager(num_workers, init_ctx_func, multithread, queue_max=queue_max) - for arg in args: - manager.add_job(map_func, arg) - if ordered: - n_jobs = len(args) - results = ['' for _ in range(n_jobs)] - i_now = 0 - for job_i, res in manager.get_results(): - results[job_i] = res - while i_now < n_jobs and (not isinstance(results[i_now], str) or results[i_now] != ''): - yield i_now, results[i_now] - results[i_now] = None - i_now += 1 - else: - for job_i, res in manager.get_results(): - yield job_i, res - manager.close() diff --git a/spaces/ARTeLab/DTM_Estimation_SRandD/README.md b/spaces/ARTeLab/DTM_Estimation_SRandD/README.md deleted file mode 100644 index 8b4ebcce629dbbca425794daa1799950741bc2f0..0000000000000000000000000000000000000000 --- a/spaces/ARTeLab/DTM_Estimation_SRandD/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: DTM Estimation SRandD -emoji: 👁 -colorFrom: pink -colorTo: indigo -sdk: gradio -sdk_version: 3.0.20 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ASJMO/freegpt/client/css/label.css b/spaces/ASJMO/freegpt/client/css/label.css deleted file mode 100644 index d84873d41e41f2cc22f9d3ace67c30ec07706811..0000000000000000000000000000000000000000 --- a/spaces/ASJMO/freegpt/client/css/label.css +++ /dev/null @@ -1,16 +0,0 @@ -label { - cursor: pointer; - text-indent: -9999px; - width: 50px; - height: 30px; - backdrop-filter: blur(20px); - -webkit-backdrop-filter: blur(20px); - background-color: var(--blur-bg); - border-radius: var(--border-radius-1); - border: 1px solid var(--blur-border); - display: block; - border-radius: 100px; - position: relative; - overflow: hidden; - transition: 0.33s; -} diff --git a/spaces/ASJMO/freegpt/client/js/highlight.min.js b/spaces/ASJMO/freegpt/client/js/highlight.min.js deleted file mode 100644 index d410b45b38119606525a0a7c0c60c428c5ee6eb7..0000000000000000000000000000000000000000 --- a/spaces/ASJMO/freegpt/client/js/highlight.min.js +++ /dev/null @@ -1 +0,0 @@ -var hljs=function(){"use strict";var e={exports:{}};function n(e){return e instanceof Map?e.clear=e.delete=e.set=()=>{throw Error("map is read-only")}:e instanceof Set&&(e.add=e.clear=e.delete=()=>{throw Error("set is read-only")}),Object.freeze(e),Object.getOwnPropertyNames(e).forEach(t=>{var a=e[t];"object"!=typeof a||Object.isFrozen(a)||n(a)}),e}e.exports=n,e.exports.default=n;class t{constructor(e){void 0===e.data&&(e.data={}),this.data=e.data,this.isMatchIgnored=!1}ignoreMatch(){this.isMatchIgnored=!0}}function a(e){return e.replace(/&/g,"&").replace(//g,">").replace(/"/g,""").replace(/'/g,"'")}function i(e,...n){let t=Object.create(null);for(let a in e)t[a]=e[a];return n.forEach(e=>{for(let n in e)t[n]=e[n]}),t}let r=e=>!!e.scope||e.sublanguage&&e.language;class s{constructor(e,n){this.buffer="",this.classPrefix=n.classPrefix,e.walk(this)}addText(e){this.buffer+=a(e)}openNode(e){if(!r(e))return;let n="";n=e.sublanguage?"language-"+e.language:((e,{prefix:n})=>{if(e.includes(".")){let t=e.split(".");return[`${n}${t.shift()}`,...t.map((e,n)=>`${e}${"_".repeat(n+1)}`),].join(" ")}return`${n}${e}`})(e.scope,{prefix:this.classPrefix}),this.span(n)}closeNode(e){r(e)&&(this.buffer+="")}value(){return this.buffer}span(e){this.buffer+=``}}let l=(e={})=>{let n={children:[]};return Object.assign(n,e),n};class o{constructor(){this.rootNode=l(),this.stack=[this.rootNode]}get top(){return this.stack[this.stack.length-1]}get root(){return this.rootNode}add(e){this.top.children.push(e)}openNode(e){let n=l({scope:e});this.add(n),this.stack.push(n)}closeNode(){if(this.stack.length>1)return this.stack.pop()}closeAllNodes(){for(;this.closeNode(););}toJSON(){return JSON.stringify(this.rootNode,null,4)}walk(e){return this.constructor._walk(e,this.rootNode)}static _walk(e,n){return"string"==typeof n?e.addText(n):n.children&&(e.openNode(n),n.children.forEach(n=>this._walk(e,n)),e.closeNode(n)),e}static _collapse(e){"string"!=typeof e&&e.children&&(e.children.every(e=>"string"==typeof e)?e.children=[e.children.join("")]:e.children.forEach(e=>{o._collapse(e)}))}}class c extends o{constructor(e){super(),this.options=e}addKeyword(e,n){""!==e&&(this.openNode(n),this.addText(e),this.closeNode())}addText(e){""!==e&&this.add(e)}addSublanguage(e,n){let t=e.root;t.sublanguage=!0,t.language=n,this.add(t)}toHTML(){return new s(this,this.options).value()}finalize(){return!0}}function d(e){return e?"string"==typeof e?e:e.source:null}function g(e){return m("(?=",e,")")}function u(e){return m("(?:",e,")*")}function b(e){return m("(?:",e,")?")}function m(...e){return e.map(e=>d(e)).join("")}function p(...e){let n=(e=>{let n=e[e.length-1];return"object"==typeof n&&n.constructor===Object?(e.splice(e.length-1,1),n):{}})(e);return"("+(n.capture?"":"?:")+e.map(e=>d(e)).join("|")+")"}function h(e){return RegExp(e.toString()+"|").exec("").length-1}let f=/\[(?:[^\\\]]|\\.)*\]|\(\??|\\([1-9][0-9]*)|\\./;function E(e,{joinWith:n}){let t=0;return e.map(e=>{t+=1;let n=t,a=d(e),i="";for(;a.length>0;){let r=f.exec(a);if(!r){i+=a;break}i+=a.substring(0,r.index),a=a.substring(r.index+r[0].length),"\\"===r[0][0]&&r[1]?i+="\\"+(Number(r[1])+n):(i+=r[0],"("===r[0]&&t++)}return i}).map(e=>`(${e})`).join(n)}let $="[a-zA-Z]\\w*",y="[a-zA-Z_]\\w*",N="\\b\\d+(\\.\\d+)?",w="(-?)(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)",v="\\b(0b[01]+)",x={begin:"\\\\[\\s\\S]",relevance:0},k=(e,n,t={})=>{let a=i({scope:"comment",begin:e,end:n,contains:[]},t);a.contains.push({scope:"doctag",begin:"[ ]*(?=(TODO|FIXME|NOTE|BUG|OPTIMIZE|HACK|XXX):)",end:/(TODO|FIXME|NOTE|BUG|OPTIMIZE|HACK|XXX):/,excludeBegin:!0,relevance:0});let r=p("I","a","is","so","us","to","at","if","in","it","on",/[A-Za-z]+['](d|ve|re|ll|t|s|n)/,/[A-Za-z]+[-][a-z]+/,/[A-Za-z][a-z]{2,}/);return a.contains.push({begin:m(/[ ]+/,"(",r,/[.]?[:]?([.][ ]|[ ])/,"){3}")}),a},M=k("//","$"),O=k("/\\*","\\*/"),S=k("#","$");var A=Object.freeze({__proto__:null,MATCH_NOTHING_RE:/\b\B/,IDENT_RE:$,UNDERSCORE_IDENT_RE:y,NUMBER_RE:N,C_NUMBER_RE:w,BINARY_NUMBER_RE:v,RE_STARTERS_RE:"!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~",SHEBANG(e={}){let n=/^#![ ]*\//;return e.binary&&(e.begin=m(n,/.*\b/,e.binary,/\b.*/)),i({scope:"meta",begin:n,end:/$/,relevance:0,"on:begin"(e,n){0!==e.index&&n.ignoreMatch()}},e)},BACKSLASH_ESCAPE:x,APOS_STRING_MODE:{scope:"string",begin:"'",end:"'",illegal:"\\n",contains:[x]},QUOTE_STRING_MODE:{scope:"string",begin:'"',end:'"',illegal:"\\n",contains:[x]},PHRASAL_WORDS_MODE:{begin:/\b(a|an|the|are|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|they|like|more)\b/},COMMENT:k,C_LINE_COMMENT_MODE:M,C_BLOCK_COMMENT_MODE:O,HASH_COMMENT_MODE:S,NUMBER_MODE:{scope:"number",begin:N,relevance:0},C_NUMBER_MODE:{scope:"number",begin:w,relevance:0},BINARY_NUMBER_MODE:{scope:"number",begin:v,relevance:0},REGEXP_MODE:{begin:/(?=\/[^/\n]*\/)/,contains:[{scope:"regexp",begin:/\//,end:/\/[gimuy]*/,illegal:/\n/,contains:[x,{begin:/\[/,end:/\]/,relevance:0,contains:[x]},]},]},TITLE_MODE:{scope:"title",begin:$,relevance:0},UNDERSCORE_TITLE_MODE:{scope:"title",begin:y,relevance:0},METHOD_GUARD:{begin:"\\.\\s*[a-zA-Z_]\\w*",relevance:0},END_SAME_AS_BEGIN:e=>Object.assign(e,{"on:begin"(e,n){n.data._beginMatch=e[1]},"on:end"(e,n){n.data._beginMatch!==e[1]&&n.ignoreMatch()}})});function C(e,n){"."===e.input[e.index-1]&&n.ignoreMatch()}function T(e,n){void 0!==e.className&&(e.scope=e.className,delete e.className)}function R(e,n){n&&e.beginKeywords&&(e.begin="\\b("+e.beginKeywords.split(" ").join("|")+")(?!\\.)(?=\\b|\\s)",e.__beforeBegin=C,e.keywords=e.keywords||e.beginKeywords,delete e.beginKeywords,void 0===e.relevance&&(e.relevance=0))}function D(e,n){Array.isArray(e.illegal)&&(e.illegal=p(...e.illegal))}function I(e,n){if(e.match){if(e.begin||e.end)throw Error("begin & end are not supported with match");e.begin=e.match,delete e.match}}function L(e,n){void 0===e.relevance&&(e.relevance=1)}let B=(e,n)=>{if(!e.beforeMatch)return;if(e.starts)throw Error("beforeMatch cannot be used with starts");let t=Object.assign({},e);Object.keys(e).forEach(n=>{delete e[n]}),e.keywords=t.keywords,e.begin=m(t.beforeMatch,g(t.begin)),e.starts={relevance:0,contains:[Object.assign(t,{endsParent:!0})]},e.relevance=0,delete t.beforeMatch},_=["of","and","for","in","not","or","if","then","parent","list","value",],z={},F=e=>{console.error(e)},U=(e,...n)=>{},P=(e,n)=>{z[`${e}/${n}`]||(console.log(`Deprecated as of ${e}. ${n}`),z[`${e}/${n}`]=!0)},j=Error();function K(e,n,{key:t}){let a=0,i=e[t],r={},s={};for(let l=1;l<=n.length;l++)s[l+a]=i[l],r[l+a]=!0,a+=h(n[l-1]);e[t]=s,e[t]._emit=r,e[t]._multi=!0}function q(e){var n;(n=e).scope&&"object"==typeof n.scope&&null!==n.scope&&(n.beginScope=n.scope,delete n.scope),"string"==typeof e.beginScope&&(e.beginScope={_wrap:e.beginScope}),"string"==typeof e.endScope&&(e.endScope={_wrap:e.endScope}),(e=>{if(Array.isArray(e.begin)){if(e.skip||e.excludeBegin||e.returnBegin)throw F("skip, excludeBegin, returnBegin not compatible with beginScope: {}"),j;if("object"!=typeof e.beginScope||null===e.beginScope)throw F("beginScope must be object"),j;K(e,e.begin,{key:"beginScope"}),e.begin=E(e.begin,{joinWith:""})}})(e),(e=>{if(Array.isArray(e.end)){if(e.skip||e.excludeEnd||e.returnEnd)throw F("skip, excludeEnd, returnEnd not compatible with endScope: {}"),j;if("object"!=typeof e.endScope||null===e.endScope)throw F("endScope must be object"),j;K(e,e.end,{key:"endScope"}),e.end=E(e.end,{joinWith:""})}})(e)}class H extends Error{constructor(e,n){super(e),this.name="HTMLInjectionError",this.html=n}}let Z=a,G=i,W=Symbol("nomatch");var Q=(n=>{let a=Object.create(null),r=Object.create(null),s=[],l=!0,o="Could not find the language '{}', did you forget to load/include a language module?",f={disableAutodetect:!0,name:"Plain text",contains:[]},$={ignoreUnescapedHTML:!1,throwUnescapedHTML:!1,noHighlightRe:/^(no-?highlight)$/i,languageDetectRe:/\blang(?:uage)?-([\w-]+)\b/i,classPrefix:"hljs-",cssSelector:"pre code",languages:null,__emitter:c};function y(e){return $.noHighlightRe.test(e)}function N(e,n,t){let a="",i="";"object"==typeof n?(a=e,t=n.ignoreIllegals,i=n.language):(P("10.7.0","highlight(lang, code, ...args) has been deprecated."),P("10.7.0","Please use highlight(code, options) instead.\nhttps://github.com/highlightjs/highlight.js/issues/2277"),i=e,a=n),void 0===t&&(t=!0);let r={code:a,language:i};z("before:highlight",r);let s=r.result?r.result:w(r.language,r.code,t);return s.code=r.code,z("after:highlight",s),s}function w(e,n,r,s){let c=Object.create(null);function g(){var e;if(!M.keywords)return void A.addText(C);let n=0;M.keywordPatternRe.lastIndex=0;let t=M.keywordPatternRe.exec(C),a="";for(;t;){a+=C.substring(n,t.index);let i=N.case_insensitive?t[0].toLowerCase():t[0],r=(e=i,M.keywords[e]);if(r){let[s,l]=r;if(A.addText(a),a="",c[i]=(c[i]||0)+1,c[i]<=7&&(z+=l),s.startsWith("_"))a+=t[0];else{let o=N.classNameAliases[s]||s;A.addKeyword(t[0],o)}}else a+=t[0];n=M.keywordPatternRe.lastIndex,t=M.keywordPatternRe.exec(C)}a+=C.substring(n),A.addText(a)}function u(){null!=M.subLanguage?(()=>{if(""===C)return;let e=null;if("string"==typeof M.subLanguage){if(!a[M.subLanguage])return void A.addText(C);e=w(M.subLanguage,C,!0,S[M.subLanguage]),S[M.subLanguage]=e._top}else e=v(C,M.subLanguage.length?M.subLanguage:null);M.relevance>0&&(z+=e.relevance),A.addSublanguage(e._emitter,e.language)})():g(),C=""}function b(e,n){let t=1,a=n.length-1;for(;t<=a;){if(!e._emit[t]){t++;continue}let i=N.classNameAliases[e[t]]||e[t],r=n[t];i?A.addKeyword(r,i):(C=r,g(),C=""),t++}}function m(e,n){return e.scope&&"string"==typeof e.scope&&A.openNode(N.classNameAliases[e.scope]||e.scope),e.beginScope&&(e.beginScope._wrap?(A.addKeyword(C,N.classNameAliases[e.beginScope._wrap]||e.beginScope._wrap),C=""):e.beginScope._multi&&(b(e.beginScope,n),C="")),M=Object.create(e,{parent:{value:M}})}function p(e){return 0===M.matcher.regexIndex?(C+=e[0],1):(j=!0,0)}let f={};function y(a,i){let s=i&&i[0];if(C+=a,null==s)return u(),0;if("begin"===f.type&&"end"===i.type&&f.index===i.index&&""===s){if(C+=n.slice(i.index,i.index+1),!l){let o=Error(`0 width match regex (${e})`);throw o.languageName=e,o.badRule=f.rule,o}return 1}if(f=i,"begin"===i.type)return(e=>{let n=e[0],a=e.rule,i=new t(a),r=[a.__beforeBegin,a["on:begin"]];for(let s of r)if(s&&(s(e,i),i.isMatchIgnored))return p(n);return a.skip?C+=n:(a.excludeBegin&&(C+=n),u(),a.returnBegin||a.excludeBegin||(C=n)),m(a,e),a.returnBegin?0:n.length})(i);if("illegal"===i.type&&!r){let c=Error('Illegal lexeme "'+s+'" for mode "'+(M.scope||"")+'"');throw c.mode=M,c}if("end"===i.type){let d=function e(a){let i=a[0],r=n.substring(a.index),s=function e(n,a,i){let r=((e,n)=>{let t=e&&e.exec(n);return t&&0===t.index})(n.endRe,i);if(r){if(n["on:end"]){let s=new t(n);n["on:end"](a,s),s.isMatchIgnored&&(r=!1)}if(r){for(;n.endsParent&&n.parent;)n=n.parent;return n}}if(n.endsWithParent)return e(n.parent,a,i)}(M,a,r);if(!s)return W;let l=M;M.endScope&&M.endScope._wrap?(u(),A.addKeyword(i,M.endScope._wrap)):M.endScope&&M.endScope._multi?(u(),b(M.endScope,a)):l.skip?C+=i:(l.returnEnd||l.excludeEnd||(C+=i),u(),l.excludeEnd&&(C=i));do M.scope&&A.closeNode(),M.skip||M.subLanguage||(z+=M.relevance),M=M.parent;while(M!==s.parent);return s.starts&&m(s.starts,a),l.returnEnd?0:i.length}(i);if(d!==W)return d}if("illegal"===i.type&&""===s)return 1;if(P>1e5&&P>3*i.index)throw Error("potential infinite loop, way more iterations than matches");return C+=s,s.length}let N=O(e);if(!N)throw F(o.replace("{}",e)),Error('Unknown language: "'+e+'"');let x=function e(n){function t(e,t){return RegExp(d(e),"m"+(n.case_insensitive?"i":"")+(n.unicodeRegex?"u":"")+(t?"g":""))}class a{constructor(){this.matchIndexes={},this.regexes=[],this.matchAt=1,this.position=0}addRule(e,n){n.position=this.position++,this.matchIndexes[this.matchAt]=n,this.regexes.push([n,e]),this.matchAt+=h(e)+1}compile(){0===this.regexes.length&&(this.exec=()=>null);let e=this.regexes.map(e=>e[1]);this.matcherRe=t(E(e,{joinWith:"|"}),!0),this.lastIndex=0}exec(e){this.matcherRe.lastIndex=this.lastIndex;let n=this.matcherRe.exec(e);if(!n)return null;let t=n.findIndex((e,n)=>n>0&&void 0!==e),a=this.matchIndexes[t];return n.splice(0,t),Object.assign(n,a)}}class r{constructor(){this.rules=[],this.multiRegexes=[],this.count=0,this.lastIndex=0,this.regexIndex=0}getMatcher(e){if(this.multiRegexes[e])return this.multiRegexes[e];let n=new a;return this.rules.slice(e).forEach(([e,t])=>n.addRule(e,t)),n.compile(),this.multiRegexes[e]=n,n}resumingScanAtSamePosition(){return 0!==this.regexIndex}considerAll(){this.regexIndex=0}addRule(e,n){this.rules.push([e,n]),"begin"===n.type&&this.count++}exec(e){let n=this.getMatcher(this.regexIndex);n.lastIndex=this.lastIndex;let t=n.exec(e);if(this.resumingScanAtSamePosition()){if(t&&t.index===this.lastIndex);else{let a=this.getMatcher(0);a.lastIndex=this.lastIndex+1,t=a.exec(e)}}return t&&(this.regexIndex+=t.position+1,this.regexIndex===this.count&&this.considerAll()),t}}if(n.compilerExtensions||(n.compilerExtensions=[]),n.contains&&n.contains.includes("self"))throw Error("ERR: contains `self` is not supported at the top-level of a language. See documentation.");return n.classNameAliases=i(n.classNameAliases||{}),function e(a,s){let l=a;if(a.isCompiled)return l;[T,I,q,B].forEach(e=>e(a,s)),n.compilerExtensions.forEach(e=>e(a,s)),a.__beforeBegin=null,[R,D,L].forEach(e=>e(a,s)),a.isCompiled=!0;let o=null;return"object"==typeof a.keywords&&a.keywords.$pattern&&(a.keywords=Object.assign({},a.keywords),o=a.keywords.$pattern,delete a.keywords.$pattern),o=o||/\w+/,a.keywords&&(a.keywords=function e(n,t,a="keyword"){let i=Object.create(null);return"string"==typeof n?r(a,n.split(" ")):Array.isArray(n)?r(a,n):Object.keys(n).forEach(a=>{Object.assign(i,e(n[a],t,a))}),i;function r(e,n){t&&(n=n.map(e=>e.toLowerCase())),n.forEach(n=>{var t,a,r;let s=n.split("|");i[s[0]]=[e,(t=s[0],a=s[1],a?Number(a):(r=t,_.includes(r.toLowerCase()))?0:1)]})}}(a.keywords,n.case_insensitive)),l.keywordPatternRe=t(o,!0),s&&(a.begin||(a.begin=/\B|\b/),l.beginRe=t(l.begin),a.end||a.endsWithParent||(a.end=/\B|\b/),a.end&&(l.endRe=t(l.end)),l.terminatorEnd=d(l.end)||"",a.endsWithParent&&s.terminatorEnd&&(l.terminatorEnd+=(a.end?"|":"")+s.terminatorEnd)),a.illegal&&(l.illegalRe=t(a.illegal)),a.contains||(a.contains=[]),a.contains=[].concat(...a.contains.map(e=>{var n;return(n="self"===e?a:e).variants&&!n.cachedVariants&&(n.cachedVariants=n.variants.map(e=>i(n,{variants:null},e))),n.cachedVariants?n.cachedVariants:!function e(n){return!!n&&(n.endsWithParent||e(n.starts))}(n)?Object.isFrozen(n)?i(n):n:i(n,{starts:n.starts?i(n.starts):null})})),a.contains.forEach(n=>{e(n,l)}),a.starts&&e(a.starts,s),l.matcher=(e=>{let n=new r;return e.contains.forEach(e=>n.addRule(e.begin,{rule:e,type:"begin"})),e.terminatorEnd&&n.addRule(e.terminatorEnd,{type:"end"}),e.illegal&&n.addRule(e.illegal,{type:"illegal"}),n})(l),l}(n)}(N),k="",M=s||x,S={},A=new $.__emitter($);(()=>{let e=[];for(let n=M;n!==N;n=n.parent)n.scope&&e.unshift(n.scope);e.forEach(e=>A.openNode(e))})();let C="",z=0,U=0,P=0,j=!1;try{for(M.matcher.considerAll();;){P++,j?j=!1:M.matcher.considerAll(),M.matcher.lastIndex=U;let K=M.matcher.exec(n);if(!K)break;let H=y(n.substring(U,K.index),K);U=K.index+H}return y(n.substring(U)),A.closeAllNodes(),A.finalize(),k=A.toHTML(),{language:e,value:k,relevance:z,illegal:!1,_emitter:A,_top:M}}catch(G){if(G.message&&G.message.includes("Illegal"))return{language:e,value:Z(n),illegal:!0,relevance:0,_illegalBy:{message:G.message,index:U,context:n.slice(U-100,U+100),mode:G.mode,resultSoFar:k},_emitter:A};if(l)return{language:e,value:Z(n),illegal:!1,relevance:0,errorRaised:G,_emitter:A,_top:M};throw G}}function v(e,n){n=n||$.languages||Object.keys(a);let t=(e=>{let n={value:Z(e),illegal:!1,relevance:0,_top:f,_emitter:new $.__emitter($)};return n._emitter.addText(e),n})(e),i=n.filter(O).filter(C).map(n=>w(n,e,!1));i.unshift(t);let r=i.sort((e,n)=>{if(e.relevance!==n.relevance)return n.relevance-e.relevance;if(e.language&&n.language){if(O(e.language).supersetOf===n.language)return 1;if(O(n.language).supersetOf===e.language)return -1}return 0}),[s,l]=r,o=s;return o.secondBest=l,o}function x(e){let n=null,t=(e=>{let n=e.className+" ";n+=e.parentNode?e.parentNode.className:"";let t=$.languageDetectRe.exec(n);if(t){let a=O(t[1]);return a||(U(o.replace("{}",t[1])),U("Falling back to no-highlight mode for this block.",e)),a?t[1]:"no-highlight"}return n.split(/\s+/).find(e=>y(e)||O(e))})(e);if(y(t))return;if(z("before:highlightElement",{el:e,language:t}),e.children.length>0&&($.ignoreUnescapedHTML||$.throwUnescapedHTML))throw new H("One of your code blocks includes unescaped HTML.",e.innerHTML);n=e;let a=n.textContent,i=t?N(a,{language:t,ignoreIllegals:!0}):v(a);e.innerHTML=i.value,((e,n,t)=>{let a=n&&r[n]||t;e.classList.add("hljs"),e.classList.add("language-"+a)})(e,t,i.language),e.result={language:i.language,re:i.relevance,relevance:i.relevance},i.secondBest&&(e.secondBest={language:i.secondBest.language,relevance:i.secondBest.relevance}),z("after:highlightElement",{el:e,result:i,text:a})}let k=!1;function M(){"loading"!==document.readyState?document.querySelectorAll($.cssSelector).forEach(x):k=!0}function O(e){return a[e=(e||"").toLowerCase()]||a[r[e]]}function S(e,{languageName:n}){"string"==typeof e&&(e=[e]),e.forEach(e=>{r[e.toLowerCase()]=n})}function C(e){let n=O(e);return n&&!n.disableAutodetect}function z(e,n){let t=e;s.forEach(e=>{e[t]&&e[t](n)})}for(let j in"undefined"!=typeof window&&window.addEventListener&&window.addEventListener("DOMContentLoaded",()=>{k&&M()},!1),Object.assign(n,{highlight:N,highlightAuto:v,highlightAll:M,highlightElement:x,highlightBlock:e=>(P("10.7.0","highlightBlock will be removed entirely in v12.0"),P("10.7.0","Please use highlightElement now."),x(e)),configure(e){$=G($,e)},initHighlighting(){M(),P("10.6.0","initHighlighting() deprecated. Use highlightAll() now.")},initHighlightingOnLoad(){M(),P("10.6.0","initHighlightingOnLoad() deprecated. Use highlightAll() now.")},registerLanguage(e,t){let i=null;try{i=t(n)}catch(r){if(F("Language definition for '{}' could not be registered.".replace("{}",e)),!l)throw r;F(r),i=f}i.name||(i.name=e),a[e]=i,i.rawDefinition=t.bind(null,n),i.aliases&&S(i.aliases,{languageName:e})},unregisterLanguage(e){for(let n of(delete a[e],Object.keys(r)))r[n]===e&&delete r[n]},listLanguages:()=>Object.keys(a),getLanguage:O,registerAliases:S,autoDetection:C,inherit:G,addPlugin(e){var n;(n=e)["before:highlightBlock"]&&!n["before:highlightElement"]&&(n["before:highlightElement"]=e=>{n["before:highlightBlock"](Object.assign({block:e.el},e))}),n["after:highlightBlock"]&&!n["after:highlightElement"]&&(n["after:highlightElement"]=e=>{n["after:highlightBlock"](Object.assign({block:e.el},e))}),s.push(e)}}),n.debugMode=()=>{l=!1},n.safeMode=()=>{l=!0},n.versionString="11.7.0",n.regex={concat:m,lookahead:g,either:p,optional:b,anyNumberOfTimes:u},A)"object"==typeof A[j]&&e.exports(A[j]);return Object.assign(n,A),n})({});let X=e=>({IMPORTANT:{scope:"meta",begin:"!important"},BLOCK_COMMENT:e.C_BLOCK_COMMENT_MODE,HEXCOLOR:{scope:"number",begin:/#(([0-9a-fA-F]{3,4})|(([0-9a-fA-F]{2}){3,4}))\b/},FUNCTION_DISPATCH:{className:"built_in",begin:/[\w-]+(?=\()/},ATTRIBUTE_SELECTOR_MODE:{scope:"selector-attr",begin:/\[/,end:/\]/,illegal:"$",contains:[e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]},CSS_NUMBER_MODE:{scope:"number",begin:e.NUMBER_RE+"(%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?",relevance:0},CSS_VARIABLE:{className:"attr",begin:/--[A-Za-z][A-Za-z0-9_-]*/}}),V=["a","abbr","address","article","aside","audio","b","blockquote","body","button","canvas","caption","cite","code","dd","del","details","dfn","div","dl","dt","em","fieldset","figcaption","figure","footer","form","h1","h2","h3","h4","h5","h6","header","hgroup","html","i","iframe","img","input","ins","kbd","label","legend","li","main","mark","menu","nav","object","ol","p","q","quote","samp","section","span","strong","summary","sup","table","tbody","td","textarea","tfoot","th","thead","time","tr","ul","var","video",],J=["any-hover","any-pointer","aspect-ratio","color","color-gamut","color-index","device-aspect-ratio","device-height","device-width","display-mode","forced-colors","grid","height","hover","inverted-colors","monochrome","orientation","overflow-block","overflow-inline","pointer","prefers-color-scheme","prefers-contrast","prefers-reduced-motion","prefers-reduced-transparency","resolution","scan","scripting","update","width","min-width","max-width","min-height","max-height",],Y=["active","any-link","blank","checked","current","default","defined","dir","disabled","drop","empty","enabled","first","first-child","first-of-type","fullscreen","future","focus","focus-visible","focus-within","has","host","host-context","hover","indeterminate","in-range","invalid","is","lang","last-child","last-of-type","left","link","local-link","not","nth-child","nth-col","nth-last-child","nth-last-col","nth-last-of-type","nth-of-type","only-child","only-of-type","optional","out-of-range","past","placeholder-shown","read-only","read-write","required","right","root","scope","target","target-within","user-invalid","valid","visited","where",],ee=["after","backdrop","before","cue","cue-region","first-letter","first-line","grammar-error","marker","part","placeholder","selection","slotted","spelling-error",],en=["align-content","align-items","align-self","all","animation","animation-delay","animation-direction","animation-duration","animation-fill-mode","animation-iteration-count","animation-name","animation-play-state","animation-timing-function","backface-visibility","background","background-attachment","background-blend-mode","background-clip","background-color","background-image","background-origin","background-position","background-repeat","background-size","block-size","border","border-block","border-block-color","border-block-end","border-block-end-color","border-block-end-style","border-block-end-width","border-block-start","border-block-start-color","border-block-start-style","border-block-start-width","border-block-style","border-block-width","border-bottom","border-bottom-color","border-bottom-left-radius","border-bottom-right-radius","border-bottom-style","border-bottom-width","border-collapse","border-color","border-image","border-image-outset","border-image-repeat","border-image-slice","border-image-source","border-image-width","border-inline","border-inline-color","border-inline-end","border-inline-end-color","border-inline-end-style","border-inline-end-width","border-inline-start","border-inline-start-color","border-inline-start-style","border-inline-start-width","border-inline-style","border-inline-width","border-left","border-left-color","border-left-style","border-left-width","border-radius","border-right","border-right-color","border-right-style","border-right-width","border-spacing","border-style","border-top","border-top-color","border-top-left-radius","border-top-right-radius","border-top-style","border-top-width","border-width","bottom","box-decoration-break","box-shadow","box-sizing","break-after","break-before","break-inside","caption-side","caret-color","clear","clip","clip-path","clip-rule","color","column-count","column-fill","column-gap","column-rule","column-rule-color","column-rule-style","column-rule-width","column-span","column-width","columns","contain","content","content-visibility","counter-increment","counter-reset","cue","cue-after","cue-before","cursor","direction","display","empty-cells","filter","flex","flex-basis","flex-direction","flex-flow","flex-grow","flex-shrink","flex-wrap","float","flow","font","font-display","font-family","font-feature-settings","font-kerning","font-language-override","font-size","font-size-adjust","font-smoothing","font-stretch","font-style","font-synthesis","font-variant","font-variant-caps","font-variant-east-asian","font-variant-ligatures","font-variant-numeric","font-variant-position","font-variation-settings","font-weight","gap","glyph-orientation-vertical","grid","grid-area","grid-auto-columns","grid-auto-flow","grid-auto-rows","grid-column","grid-column-end","grid-column-start","grid-gap","grid-row","grid-row-end","grid-row-start","grid-template","grid-template-areas","grid-template-columns","grid-template-rows","hanging-punctuation","height","hyphens","icon","image-orientation","image-rendering","image-resolution","ime-mode","inline-size","isolation","justify-content","left","letter-spacing","line-break","line-height","list-style","list-style-image","list-style-position","list-style-type","margin","margin-block","margin-block-end","margin-block-start","margin-bottom","margin-inline","margin-inline-end","margin-inline-start","margin-left","margin-right","margin-top","marks","mask","mask-border","mask-border-mode","mask-border-outset","mask-border-repeat","mask-border-slice","mask-border-source","mask-border-width","mask-clip","mask-composite","mask-image","mask-mode","mask-origin","mask-position","mask-repeat","mask-size","mask-type","max-block-size","max-height","max-inline-size","max-width","min-block-size","min-height","min-inline-size","min-width","mix-blend-mode","nav-down","nav-index","nav-left","nav-right","nav-up","none","normal","object-fit","object-position","opacity","order","orphans","outline","outline-color","outline-offset","outline-style","outline-width","overflow","overflow-wrap","overflow-x","overflow-y","padding","padding-block","padding-block-end","padding-block-start","padding-bottom","padding-inline","padding-inline-end","padding-inline-start","padding-left","padding-right","padding-top","page-break-after","page-break-before","page-break-inside","pause","pause-after","pause-before","perspective","perspective-origin","pointer-events","position","quotes","resize","rest","rest-after","rest-before","right","row-gap","scroll-margin","scroll-margin-block","scroll-margin-block-end","scroll-margin-block-start","scroll-margin-bottom","scroll-margin-inline","scroll-margin-inline-end","scroll-margin-inline-start","scroll-margin-left","scroll-margin-right","scroll-margin-top","scroll-padding","scroll-padding-block","scroll-padding-block-end","scroll-padding-block-start","scroll-padding-bottom","scroll-padding-inline","scroll-padding-inline-end","scroll-padding-inline-start","scroll-padding-left","scroll-padding-right","scroll-padding-top","scroll-snap-align","scroll-snap-stop","scroll-snap-type","scrollbar-color","scrollbar-gutter","scrollbar-width","shape-image-threshold","shape-margin","shape-outside","speak","speak-as","src","tab-size","table-layout","text-align","text-align-all","text-align-last","text-combine-upright","text-decoration","text-decoration-color","text-decoration-line","text-decoration-style","text-emphasis","text-emphasis-color","text-emphasis-position","text-emphasis-style","text-indent","text-justify","text-orientation","text-overflow","text-rendering","text-shadow","text-transform","text-underline-position","top","transform","transform-box","transform-origin","transform-style","transition","transition-delay","transition-duration","transition-property","transition-timing-function","unicode-bidi","vertical-align","visibility","voice-balance","voice-duration","voice-family","voice-pitch","voice-range","voice-rate","voice-stress","voice-volume","white-space","widows","width","will-change","word-break","word-spacing","word-wrap","writing-mode","z-index",].reverse(),et=Y.concat(ee);var ea="\\.([0-9](_*[0-9])*)",ei="[0-9a-fA-F](_*[0-9a-fA-F])*",er={className:"number",variants:[{begin:`(\\b([0-9](_*[0-9])*)((${ea})|\\.)?|(${ea}))[eE][+-]?([0-9](_*[0-9])*)[fFdD]?\\b`},{begin:`\\b([0-9](_*[0-9])*)((${ea})[fFdD]?\\b|\\.([fFdD]\\b)?)`},{begin:`(${ea})[fFdD]?\\b`},{begin:"\\b([0-9](_*[0-9])*)[fFdD]\\b"},{begin:`\\b0[xX]((${ei})\\.?|(${ei})?\\.(${ei}))[pP][+-]?([0-9](_*[0-9])*)[fFdD]?\\b`},{begin:"\\b(0|[1-9](_*[0-9])*)[lL]?\\b"},{begin:`\\b0[xX](${ei})[lL]?\\b`},{begin:"\\b0(_*[0-7])*[lL]?\\b"},{begin:"\\b0[bB][01](_*[01])*[lL]?\\b"},],relevance:0};let es="[A-Za-z$_][0-9A-Za-z$_]*",el=["as","in","of","if","for","while","finally","var","new","function","do","return","void","else","break","catch","instanceof","with","throw","case","default","try","switch","continue","typeof","delete","let","yield","const","class","debugger","async","await","static","import","from","export","extends",],eo=["true","false","null","undefined","NaN","Infinity"],ec=["Object","Function","Boolean","Symbol","Math","Date","Number","BigInt","String","RegExp","Array","Float32Array","Float64Array","Int8Array","Uint8Array","Uint8ClampedArray","Int16Array","Int32Array","Uint16Array","Uint32Array","BigInt64Array","BigUint64Array","Set","Map","WeakSet","WeakMap","ArrayBuffer","SharedArrayBuffer","Atomics","DataView","JSON","Promise","Generator","GeneratorFunction","AsyncFunction","Reflect","Proxy","Intl","WebAssembly",],ed=["Error","EvalError","InternalError","RangeError","ReferenceError","SyntaxError","TypeError","URIError",],eg=["setInterval","setTimeout","clearInterval","clearTimeout","require","exports","eval","isFinite","isNaN","parseFloat","parseInt","decodeURI","decodeURIComponent","encodeURI","encodeURIComponent","escape","unescape",],eu=["arguments","this","super","console","window","document","localStorage","module","global",],eb=[].concat(eg,ec,ed);function em(e){var n;let t=e.regex,a=es,i={begin:/<[A-Za-z0-9\\._:-]+/,end:/\/[A-Za-z0-9\\._:-]+>|\/>/,isTrulyOpeningTag(e,n){let t=e[0].length+e.index,a=e.input[t];if("<"===a||","===a)return void n.ignoreMatch();let i;">"===a&&(((e,{after:n})=>{let t="",v={match:[/const|var|let/,/\s+/,a,/\s*/,/=\s*/,/(async\s*)?/,t.lookahead(w),],keywords:"async",className:{1:"keyword",3:"title.function"},contains:[f]};return{name:"Javascript",aliases:["js","jsx","mjs","cjs"],keywords:r,exports:{PARAMS_CONTAINS:h,CLASS_REFERENCE:$},illegal:/#(?![$_A-z])/,contains:[e.SHEBANG({label:"shebang",binary:"node",relevance:5}),{label:"use_strict",className:"meta",relevance:10,begin:/^\s*['"]use (strict|asm)['"]/},e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,d,g,u,b,{match:/\$\d+/},o,$,{className:"attr",begin:a+t.lookahead(":"),relevance:0},v,{begin:"("+e.RE_STARTERS_RE+"|\\b(case|return|throw)\\b)\\s*",keywords:"return throw case",relevance:0,contains:[b,e.REGEXP_MODE,{className:"function",begin:w,returnBegin:!0,end:"\\s*=>",contains:[{className:"params",variants:[{begin:e.UNDERSCORE_IDENT_RE,relevance:0},{className:null,begin:/\(\s*\)/,skip:!0},{begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:r,contains:h},]},]},{begin:/,/,relevance:0},{match:/\s+/,relevance:0},{variants:[{begin:"<>",end:""},{match:/<[A-Za-z0-9\\._:-]+\s*\/>/},{begin:i.begin,"on:begin":i.isTrulyOpeningTag,end:i.end},],subLanguage:"xml",contains:[{begin:i.begin,end:i.end,skip:!0,contains:["self"]},]},]},{variants:[{match:[/function/,/\s+/,a,/(?=\s*\()/]},{match:[/function/,/\s*(?=\()/]},],className:{1:"keyword",3:"title.function"},label:"func.def",contains:[f],illegal:/%/},{beginKeywords:"while if switch catch for"},{begin:"\\b(?!function)"+e.UNDERSCORE_IDENT_RE+"\\([^()]*(\\([^()]*(\\([^()]*\\)[^()]*)*\\)[^()]*)*\\)\\s*\\{",returnBegin:!0,label:"func.def",contains:[f,e.inherit(e.TITLE_MODE,{begin:a,className:"title.function"}),]},{match:/\.\.\./,relevance:0},N,{match:"\\$"+a,relevance:0},{match:[/\bconstructor(?=\s*\()/],className:{1:"title.function"},contains:[f]},y,{relevance:0,match:/\b[A-Z][A-Z_0-9]+\b/,className:"variable.constant"},E,{match:[/get|set/,/\s+/,a,/(?=\()/],className:{1:"keyword",3:"title.function"},contains:[{begin:/\(\)/},f]},{match:/\$[(.]/},]}}let ep=e=>m(/\b/,e,/\w$/.test(e)?/\b/:/\B/),e8=["Protocol","Type"].map(ep),eh=["init","self"].map(ep),ef=["Any","Self"],eE=["actor","any","associatedtype","async","await",/as\?/,/as!/,"as","break","case","catch","class","continue","convenience","default","defer","deinit","didSet","distributed","do","dynamic","else","enum","extension","fallthrough",/fileprivate\(set\)/,"fileprivate","final","for","func","get","guard","if","import","indirect","infix",/init\?/,/init!/,"inout",/internal\(set\)/,"internal","in","is","isolated","nonisolated","lazy","let","mutating","nonmutating",/open\(set\)/,"open","operator","optional","override","postfix","precedencegroup","prefix",/private\(set\)/,"private","protocol",/public\(set\)/,"public","repeat","required","rethrows","return","set","some","static","struct","subscript","super","switch","throws","throw",/try\?/,/try!/,"try","typealias",/unowned\(safe\)/,/unowned\(unsafe\)/,"unowned","var","weak","where","while","willSet",],e$=["false","nil","true"],ey=["assignment","associativity","higherThan","left","lowerThan","none","right",],eN=["#colorLiteral","#column","#dsohandle","#else","#elseif","#endif","#error","#file","#fileID","#fileLiteral","#filePath","#function","#if","#imageLiteral","#keyPath","#line","#selector","#sourceLocation","#warn_unqualified_access","#warning",],ew=["abs","all","any","assert","assertionFailure","debugPrint","dump","fatalError","getVaList","isKnownUniquelyReferenced","max","min","numericCast","pointwiseMax","pointwiseMin","precondition","preconditionFailure","print","readLine","repeatElement","sequence","stride","swap","swift_unboxFromSwiftValueWithType","transcode","type","unsafeBitCast","unsafeDowncast","withExtendedLifetime","withUnsafeMutablePointer","withUnsafePointer","withVaList","withoutActuallyEscaping","zip",],ev=p(/[/=\-+!*%<>&|^~?]/,/[\u00A1-\u00A7]/,/[\u00A9\u00AB]/,/[\u00AC\u00AE]/,/[\u00B0\u00B1]/,/[\u00B6\u00BB\u00BF\u00D7\u00F7]/,/[\u2016-\u2017]/,/[\u2020-\u2027]/,/[\u2030-\u203E]/,/[\u2041-\u2053]/,/[\u2055-\u205E]/,/[\u2190-\u23FF]/,/[\u2500-\u2775]/,/[\u2794-\u2BFF]/,/[\u2E00-\u2E7F]/,/[\u3001-\u3003]/,/[\u3008-\u3020]/,/[\u3030]/),ex=p(ev,/[\u0300-\u036F]/,/[\u1DC0-\u1DFF]/,/[\u20D0-\u20FF]/,/[\uFE00-\uFE0F]/,/[\uFE20-\uFE2F]/),ek=m(ev,ex,"*"),eM=p(/[a-zA-Z_]/,/[\u00A8\u00AA\u00AD\u00AF\u00B2-\u00B5\u00B7-\u00BA]/,/[\u00BC-\u00BE\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u00FF]/,/[\u0100-\u02FF\u0370-\u167F\u1681-\u180D\u180F-\u1DBF]/,/[\u1E00-\u1FFF]/,/[\u200B-\u200D\u202A-\u202E\u203F-\u2040\u2054\u2060-\u206F]/,/[\u2070-\u20CF\u2100-\u218F\u2460-\u24FF\u2776-\u2793]/,/[\u2C00-\u2DFF\u2E80-\u2FFF]/,/[\u3004-\u3007\u3021-\u302F\u3031-\u303F\u3040-\uD7FF]/,/[\uF900-\uFD3D\uFD40-\uFDCF\uFDF0-\uFE1F\uFE30-\uFE44]/,/[\uFE47-\uFEFE\uFF00-\uFFFD]/),eO=p(eM,/\d/,/[\u0300-\u036F\u1DC0-\u1DFF\u20D0-\u20FF\uFE20-\uFE2F]/),eS=m(eM,eO,"*"),eA=m(/[A-Z]/,eO,"*"),eC=["autoclosure",m(/convention\(/,p("swift","block","c"),/\)/),"discardableResult","dynamicCallable","dynamicMemberLookup","escaping","frozen","GKInspectable","IBAction","IBDesignable","IBInspectable","IBOutlet","IBSegueAction","inlinable","main","nonobjc","NSApplicationMain","NSCopying","NSManaged",m(/objc\(/,eS,/\)/),"objc","objcMembers","propertyWrapper","requires_stored_property_inits","resultBuilder","testable","UIApplicationMain","unknown","usableFromInline",],eT=["iOS","iOSApplicationExtension","macOS","macOSApplicationExtension","macCatalyst","macCatalystApplicationExtension","watchOS","watchOSApplicationExtension","tvOS","tvOSApplicationExtension","swift",];var eR=Object.freeze({__proto__:null,grmr_bash(e){let n=e.regex,t={};Object.assign(t,{className:"variable",variants:[{begin:n.concat(/\$[\w\d#@][\w\d_]*/,"(?![\\w\\d])(?![$])")},{begin:/\$\{/,end:/\}/,contains:["self",{begin:/:-/,contains:[t]}]},]});let a={className:"subst",begin:/\$\(/,end:/\)/,contains:[e.BACKSLASH_ESCAPE]},i={begin:/<<-?\s*(?=\w+)/,starts:{contains:[e.END_SAME_AS_BEGIN({begin:/(\w+)/,end:/(\w+)/,className:"string"}),]}},r={className:"string",begin:/"/,end:/"/,contains:[e.BACKSLASH_ESCAPE,t,a]};a.contains.push(r);let s={begin:/\$?\(\(/,end:/\)\)/,contains:[{begin:/\d+#[0-9a-f]+/,className:"number"},e.NUMBER_MODE,t,]},l=e.SHEBANG({binary:"(fish|bash|zsh|sh|csh|ksh|tcsh|dash|scsh)",relevance:10}),o={className:"function",begin:/\w[\w\d_]*\s*\(\s*\)\s*\{/,returnBegin:!0,contains:[e.inherit(e.TITLE_MODE,{begin:/\w[\w\d_]*/})],relevance:0};return{name:"Bash",aliases:["sh"],keywords:{$pattern:/\b[a-z][a-z0-9._-]+\b/,keyword:["if","then","else","elif","fi","for","while","in","do","done","case","esac","function",],literal:["true","false"],built_in:["break","cd","continue","eval","exec","exit","export","getopts","hash","pwd","readonly","return","shift","test","times","trap","umask","unset","alias","bind","builtin","caller","command","declare","echo","enable","help","let","local","logout","mapfile","printf","read","readarray","source","type","typeset","ulimit","unalias","set","shopt","autoload","bg","bindkey","bye","cap","chdir","clone","comparguments","compcall","compctl","compdescribe","compfiles","compgroups","compquote","comptags","comptry","compvalues","dirs","disable","disown","echotc","echoti","emulate","fc","fg","float","functions","getcap","getln","history","integer","jobs","kill","limit","log","noglob","popd","print","pushd","pushln","rehash","sched","setcap","setopt","stat","suspend","ttyctl","unfunction","unhash","unlimit","unsetopt","vared","wait","whence","where","which","zcompile","zformat","zftp","zle","zmodload","zparseopts","zprof","zpty","zregexparse","zsocket","zstyle","ztcp","chcon","chgrp","chown","chmod","cp","dd","df","dir","dircolors","ln","ls","mkdir","mkfifo","mknod","mktemp","mv","realpath","rm","rmdir","shred","sync","touch","truncate","vdir","b2sum","base32","base64","cat","cksum","comm","csplit","cut","expand","fmt","fold","head","join","md5sum","nl","numfmt","od","paste","ptx","pr","sha1sum","sha224sum","sha256sum","sha384sum","sha512sum","shuf","sort","split","sum","tac","tail","tr","tsort","unexpand","uniq","wc","arch","basename","chroot","date","dirname","du","echo","env","expr","factor","groups","hostid","id","link","logname","nice","nohup","nproc","pathchk","pinky","printenv","printf","pwd","readlink","runcon","seq","sleep","stat","stdbuf","stty","tee","test","timeout","tty","uname","unlink","uptime","users","who","whoami","yes",]},contains:[l,e.SHEBANG(),o,s,e.HASH_COMMENT_MODE,i,{match:/(\/[a-z._-]+)+/},r,{className:"",begin:/\\"/},{className:"string",begin:/'/,end:/'/},t,]}},grmr_c(e){let n=e.regex,t=e.COMMENT("//","$",{contains:[{begin:/\\\n/}]}),a="[a-zA-Z_]\\w*::",i="(decltype\\(auto\\)|"+n.optional(a)+"[a-zA-Z_]\\w*"+n.optional("<[^<>]+>")+")",r={className:"type",variants:[{begin:"\\b[a-z\\d_]*_t\\b"},{match:/\batomic_[a-z]{3,6}\b/},]},s={className:"string",variants:[{begin:'(u8?|U|L)?"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE]},{begin:"(u8?|U|L)?'(\\\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4,8}|[0-7]{3}|\\S)|.)",end:"'",illegal:"."},e.END_SAME_AS_BEGIN({begin:/(?:u8?|U|L)?R"([^()\\ ]{0,16})\(/,end:/\)([^()\\ ]{0,16})"/}),]},l={className:"number",variants:[{begin:"\\b(0b[01']+)"},{begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)((ll|LL|l|L)(u|U)?|(u|U)(ll|LL|l|L)?|f|F|b|B)"},{begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)"},],relevance:0},o={className:"meta",begin:/#\s*[a-z]+\b/,end:/$/,keywords:{keyword:"if else elif endif define undef warning error line pragma _Pragma ifdef ifndef include"},contains:[{begin:/\\\n/,relevance:0},e.inherit(s,{className:"string"}),{className:"string",begin:/<.*?>/},t,e.C_BLOCK_COMMENT_MODE,]},c={className:"title",begin:n.optional(a)+e.IDENT_RE,relevance:0},d=n.optional(a)+e.IDENT_RE+"\\s*\\(",g={keyword:["asm","auto","break","case","continue","default","do","else","enum","extern","for","fortran","goto","if","inline","register","restrict","return","sizeof","struct","switch","typedef","union","volatile","while","_Alignas","_Alignof","_Atomic","_Generic","_Noreturn","_Static_assert","_Thread_local","alignas","alignof","noreturn","static_assert","thread_local","_Pragma",],type:["float","double","signed","unsigned","int","short","long","char","void","_Bool","_Complex","_Imaginary","_Decimal32","_Decimal64","_Decimal128","const","static","complex","bool","imaginary",],literal:"true false NULL",built_in:"std string wstring cin cout cerr clog stdin stdout stderr stringstream istringstream ostringstream auto_ptr deque list queue stack vector map set pair bitset multiset multimap unordered_set unordered_map unordered_multiset unordered_multimap priority_queue make_pair array shared_ptr abort terminate abs acos asin atan2 atan calloc ceil cosh cos exit exp fabs floor fmod fprintf fputs free frexp fscanf future isalnum isalpha iscntrl isdigit isgraph islower isprint ispunct isspace isupper isxdigit tolower toupper labs ldexp log10 log malloc realloc memchr memcmp memcpy memset modf pow printf putchar puts scanf sinh sin snprintf sprintf sqrt sscanf strcat strchr strcmp strcpy strcspn strlen strncat strncmp strncpy strpbrk strrchr strspn strstr tanh tan vfprintf vprintf vsprintf endl initializer_list unique_ptr"},u=[o,r,t,e.C_BLOCK_COMMENT_MODE,l,s],b={variants:[{begin:/=/,end:/;/},{begin:/\(/,end:/\)/},{beginKeywords:"new throw return else",end:/;/},],keywords:g,contains:u.concat([{begin:/\(/,end:/\)/,keywords:g,contains:u.concat(["self"]),relevance:0},]),relevance:0},m={begin:"("+i+"[\\*&\\s]+)+"+d,returnBegin:!0,end:/[{;=]/,excludeEnd:!0,keywords:g,illegal:/[^\w\s\*&:<>.]/,contains:[{begin:"decltype\\(auto\\)",keywords:g,relevance:0},{begin:d,returnBegin:!0,contains:[e.inherit(c,{className:"title.function"}),],relevance:0},{relevance:0,match:/,/},{className:"params",begin:/\(/,end:/\)/,keywords:g,relevance:0,contains:[t,e.C_BLOCK_COMMENT_MODE,s,l,r,{begin:/\(/,end:/\)/,keywords:g,relevance:0,contains:["self",t,e.C_BLOCK_COMMENT_MODE,s,l,r]},]},r,t,e.C_BLOCK_COMMENT_MODE,o,]};return{name:"C",aliases:["h"],keywords:g,disableAutodetect:!0,illegal:"=]/,contains:[{beginKeywords:"final class struct"},e.TITLE_MODE,]},]),exports:{preprocessor:o,strings:s,keywords:g}}},grmr_cpp(e){let n=e.regex,t=e.COMMENT("//","$",{contains:[{begin:/\\\n/}]}),a="[a-zA-Z_]\\w*::",i="(?!struct)(decltype\\(auto\\)|"+n.optional(a)+"[a-zA-Z_]\\w*"+n.optional("<[^<>]+>")+")",r={className:"type",begin:"\\b[a-z\\d_]*_t\\b"},s={className:"string",variants:[{begin:'(u8?|U|L)?"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE]},{begin:"(u8?|U|L)?'(\\\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4,8}|[0-7]{3}|\\S)|.)",end:"'",illegal:"."},e.END_SAME_AS_BEGIN({begin:/(?:u8?|U|L)?R"([^()\\ ]{0,16})\(/,end:/\)([^()\\ ]{0,16})"/}),]},l={className:"number",variants:[{begin:"\\b(0b[01']+)"},{begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)((ll|LL|l|L)(u|U)?|(u|U)(ll|LL|l|L)?|f|F|b|B)"},{begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)"},],relevance:0},o={className:"meta",begin:/#\s*[a-z]+\b/,end:/$/,keywords:{keyword:"if else elif endif define undef warning error line pragma _Pragma ifdef ifndef include"},contains:[{begin:/\\\n/,relevance:0},e.inherit(s,{className:"string"}),{className:"string",begin:/<.*?>/},t,e.C_BLOCK_COMMENT_MODE,]},c={className:"title",begin:n.optional(a)+e.IDENT_RE,relevance:0},d=n.optional(a)+e.IDENT_RE+"\\s*\\(",g={type:["bool","char","char16_t","char32_t","char8_t","double","float","int","long","short","void","wchar_t","unsigned","signed","const","static",],keyword:["alignas","alignof","and","and_eq","asm","atomic_cancel","atomic_commit","atomic_noexcept","auto","bitand","bitor","break","case","catch","class","co_await","co_return","co_yield","compl","concept","const_cast|10","consteval","constexpr","constinit","continue","decltype","default","delete","do","dynamic_cast|10","else","enum","explicit","export","extern","false","final","for","friend","goto","if","import","inline","module","mutable","namespace","new","noexcept","not","not_eq","nullptr","operator","or","or_eq","override","private","protected","public","reflexpr","register","reinterpret_cast|10","requires","return","sizeof","static_assert","static_cast|10","struct","switch","synchronized","template","this","thread_local","throw","transaction_safe","transaction_safe_dynamic","true","try","typedef","typeid","typename","union","using","virtual","volatile","while","xor","xor_eq",],literal:["NULL","false","nullopt","nullptr","true"],built_in:["_Pragma"],_type_hints:["any","auto_ptr","barrier","binary_semaphore","bitset","complex","condition_variable","condition_variable_any","counting_semaphore","deque","false_type","future","imaginary","initializer_list","istringstream","jthread","latch","lock_guard","multimap","multiset","mutex","optional","ostringstream","packaged_task","pair","promise","priority_queue","queue","recursive_mutex","recursive_timed_mutex","scoped_lock","set","shared_future","shared_lock","shared_mutex","shared_timed_mutex","shared_ptr","stack","string_view","stringstream","timed_mutex","thread","true_type","tuple","unique_lock","unique_ptr","unordered_map","unordered_multimap","unordered_multiset","unordered_set","variant","vector","weak_ptr","wstring","wstring_view",]},u={className:"function.dispatch",relevance:0,keywords:{_hint:["abort","abs","acos","apply","as_const","asin","atan","atan2","calloc","ceil","cerr","cin","clog","cos","cosh","cout","declval","endl","exchange","exit","exp","fabs","floor","fmod","forward","fprintf","fputs","free","frexp","fscanf","future","invoke","isalnum","isalpha","iscntrl","isdigit","isgraph","islower","isprint","ispunct","isspace","isupper","isxdigit","labs","launder","ldexp","log","log10","make_pair","make_shared","make_shared_for_overwrite","make_tuple","make_unique","malloc","memchr","memcmp","memcpy","memset","modf","move","pow","printf","putchar","puts","realloc","scanf","sin","sinh","snprintf","sprintf","sqrt","sscanf","std","stderr","stdin","stdout","strcat","strchr","strcmp","strcpy","strcspn","strlen","strncat","strncmp","strncpy","strpbrk","strrchr","strspn","strstr","swap","tan","tanh","terminate","to_underlying","tolower","toupper","vfprintf","visit","vprintf","vsprintf",]},begin:n.concat(/\b/,/(?!decltype)/,/(?!if)/,/(?!for)/,/(?!switch)/,/(?!while)/,e.IDENT_RE,n.lookahead(/(<[^<>]+>|)\s*\(/))},b=[u,o,r,t,e.C_BLOCK_COMMENT_MODE,l,s],m={variants:[{begin:/=/,end:/;/},{begin:/\(/,end:/\)/},{beginKeywords:"new throw return else",end:/;/},],keywords:g,contains:b.concat([{begin:/\(/,end:/\)/,keywords:g,contains:b.concat(["self"]),relevance:0},]),relevance:0},p={className:"function",begin:"("+i+"[\\*&\\s]+)+"+d,returnBegin:!0,end:/[{;=]/,excludeEnd:!0,keywords:g,illegal:/[^\w\s\*&:<>.]/,contains:[{begin:"decltype\\(auto\\)",keywords:g,relevance:0},{begin:d,returnBegin:!0,contains:[c],relevance:0},{begin:/::/,relevance:0},{begin:/:/,endsWithParent:!0,contains:[s,l]},{relevance:0,match:/,/},{className:"params",begin:/\(/,end:/\)/,keywords:g,relevance:0,contains:[t,e.C_BLOCK_COMMENT_MODE,s,l,r,{begin:/\(/,end:/\)/,keywords:g,relevance:0,contains:["self",t,e.C_BLOCK_COMMENT_MODE,s,l,r]},]},r,t,e.C_BLOCK_COMMENT_MODE,o,]};return{name:"C++",aliases:["cc","c++","h++","hpp","hh","hxx","cxx"],keywords:g,illegal:"",keywords:g,contains:["self",r]},{begin:e.IDENT_RE+"::",keywords:g},{match:[/\b(?:enum(?:\s+(?:class|struct))?|class|struct|union)/,/\s+/,/\w+/,],className:{1:"keyword",3:"title.class"}},])}},grmr_csharp(e){let n={keyword:["abstract","as","base","break","case","catch","class","const","continue","do","else","event","explicit","extern","finally","fixed","for","foreach","goto","if","implicit","in","interface","internal","is","lock","namespace","new","operator","out","override","params","private","protected","public","readonly","record","ref","return","scoped","sealed","sizeof","stackalloc","static","struct","switch","this","throw","try","typeof","unchecked","unsafe","using","virtual","void","volatile","while",].concat(["add","alias","and","ascending","async","await","by","descending","equals","from","get","global","group","init","into","join","let","nameof","not","notnull","on","or","orderby","partial","remove","select","set","unmanaged","value|0","var","when","where","with","yield",]),built_in:["bool","byte","char","decimal","delegate","double","dynamic","enum","float","int","long","nint","nuint","object","sbyte","short","string","ulong","uint","ushort",],literal:["default","false","null","true"]},t=e.inherit(e.TITLE_MODE,{begin:"[a-zA-Z](\\.?\\w)*"}),a={className:"number",variants:[{begin:"\\b(0b[01']+)"},{begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)(u|U|l|L|ul|UL|f|F|b|B)"},{begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)"},],relevance:0},i={className:"string",begin:'@"',end:'"',contains:[{begin:'""'}]},r=e.inherit(i,{illegal:/\n/}),s={className:"subst",begin:/\{/,end:/\}/,keywords:n},l=e.inherit(s,{illegal:/\n/}),o={className:"string",begin:/\$"/,end:'"',illegal:/\n/,contains:[{begin:/\{\{/},{begin:/\}\}/},e.BACKSLASH_ESCAPE,l,]},c={className:"string",begin:/\$@"/,end:'"',contains:[{begin:/\{\{/},{begin:/\}\}/},{begin:'""'},s,]},d=e.inherit(c,{illegal:/\n/,contains:[{begin:/\{\{/},{begin:/\}\}/},{begin:'""'},l]});s.contains=[c,o,i,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,a,e.C_BLOCK_COMMENT_MODE,],l.contains=[d,o,r,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,a,e.inherit(e.C_BLOCK_COMMENT_MODE,{illegal:/\n/}),];let g={variants:[c,o,i,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]},u={begin:"<",end:">",contains:[{beginKeywords:"in out"},t]},b=e.IDENT_RE+"(<"+e.IDENT_RE+"(\\s*,\\s*"+e.IDENT_RE+")*>)?(\\[\\])?",m={begin:"@"+e.IDENT_RE,relevance:0};return{name:"C#",aliases:["cs","c#"],keywords:n,illegal:/::/,contains:[e.COMMENT("///","$",{returnBegin:!0,contains:[{className:"doctag",variants:[{begin:"///",relevance:0},{begin:""},{begin:""},]},]}),e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,{className:"meta",begin:"#",end:"$",keywords:{keyword:"if else elif endif define undef warning error line region endregion pragma checksum"}},g,a,{beginKeywords:"class interface",relevance:0,end:/[{;=]/,illegal:/[^\s:,]/,contains:[{beginKeywords:"where class"},t,u,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,]},{beginKeywords:"namespace",relevance:0,end:/[{;=]/,illegal:/[^\s:]/,contains:[t,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{beginKeywords:"record",relevance:0,end:/[{;=]/,illegal:/[^\s:]/,contains:[t,u,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{className:"meta",begin:"^\\s*\\[(?=[\\w])",excludeBegin:!0,end:"\\]",excludeEnd:!0,contains:[{className:"string",begin:/"/,end:/"/},]},{beginKeywords:"new return throw await else",relevance:0},{className:"function",begin:"("+b+"\\s+)+"+e.IDENT_RE+"\\s*(<[^=]+>\\s*)?\\(",returnBegin:!0,end:/\s*[{;=]/,excludeEnd:!0,keywords:n,contains:[{beginKeywords:"public private protected static internal protected abstract async extern override unsafe virtual new sealed partial",relevance:0},{begin:e.IDENT_RE+"\\s*(<[^=]+>\\s*)?\\(",returnBegin:!0,contains:[e.TITLE_MODE,u],relevance:0},{match:/\(\)/},{className:"params",begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:n,relevance:0,contains:[g,a,e.C_BLOCK_COMMENT_MODE]},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,]},m,]}},grmr_css(e){let n=e.regex,t=X(e),a=[e.APOS_STRING_MODE,e.QUOTE_STRING_MODE];return{name:"CSS",case_insensitive:!0,illegal:/[=|'\$]/,keywords:{keyframePosition:"from to"},classNameAliases:{keyframePosition:"selector-tag"},contains:[t.BLOCK_COMMENT,{begin:/-(webkit|moz|ms|o)-(?=[a-z])/},t.CSS_NUMBER_MODE,{className:"selector-id",begin:/#[A-Za-z0-9_-]+/,relevance:0},{className:"selector-class",begin:"\\.[a-zA-Z-][a-zA-Z0-9_-]*",relevance:0},t.ATTRIBUTE_SELECTOR_MODE,{className:"selector-pseudo",variants:[{begin:":("+Y.join("|")+")"},{begin:":(:)?("+ee.join("|")+")"},]},t.CSS_VARIABLE,{className:"attribute",begin:"\\b("+en.join("|")+")\\b"},{begin:/:/,end:/[;}{]/,contains:[t.BLOCK_COMMENT,t.HEXCOLOR,t.IMPORTANT,t.CSS_NUMBER_MODE,...a,{begin:/(url|data-uri)\(/,end:/\)/,relevance:0,keywords:{built_in:"url data-uri"},contains:[...a,{className:"string",begin:/[^)]/,endsWithParent:!0,excludeEnd:!0},]},t.FUNCTION_DISPATCH,]},{begin:n.lookahead(/@/),end:"[{;]",relevance:0,illegal:/:/,contains:[{className:"keyword",begin:/@-?\w[\w]*(-\w+)*/},{begin:/\s/,endsWithParent:!0,excludeEnd:!0,relevance:0,keywords:{$pattern:/[a-z-]+/,keyword:"and or not only",attribute:J.join(" ")},contains:[{begin:/[a-z-]+(?=:)/,className:"attribute"},...a,t.CSS_NUMBER_MODE,]},]},{className:"selector-tag",begin:"\\b("+V.join("|")+")\\b"},]}},grmr_diff(e){let n=e.regex;return{name:"Diff",aliases:["patch"],contains:[{className:"meta",relevance:10,match:n.either(/^@@ +-\d+,\d+ +\+\d+,\d+ +@@/,/^\*\*\* +\d+,\d+ +\*\*\*\*$/,/^--- +\d+,\d+ +----$/)},{className:"comment",variants:[{begin:n.either(/Index: /,/^index/,/={3,}/,/^-{3}/,/^\*{3} /,/^\+{3}/,/^diff --git/),end:/$/},{match:/^\*{15}$/},]},{className:"addition",begin:/^\+/,end:/$/},{className:"deletion",begin:/^-/,end:/$/},{className:"addition",begin:/^!/,end:/$/},]}},grmr_go(e){let n={keyword:["break","case","chan","const","continue","default","defer","else","fallthrough","for","func","go","goto","if","import","interface","map","package","range","return","select","struct","switch","type","var",],type:["bool","byte","complex64","complex128","error","float32","float64","int8","int16","int32","int64","string","uint8","uint16","uint32","uint64","int","uint","uintptr","rune",],literal:["true","false","iota","nil"],built_in:["append","cap","close","complex","copy","imag","len","make","new","panic","print","println","real","recover","delete",]};return{name:"Go",aliases:["golang"],keywords:n,illegal:"e(n,t,a-1))}("(?:<"+t+"~~~(?:\\s*,\\s*"+t+"~~~)*>)?",/~~~/g,2),i={keyword:["synchronized","abstract","private","var","static","if","const ","for","while","strictfp","finally","protected","import","native","final","void","enum","else","break","transient","catch","instanceof","volatile","case","assert","package","default","public","try","switch","continue","throws","protected","public","private","module","requires","exports","do","sealed","yield","permits",],literal:["false","true","null"],type:["char","boolean","long","float","int","byte","short","double",],built_in:["super","this"]},r={className:"meta",begin:"@"+t,contains:[{begin:/\(/,end:/\)/,contains:["self"]},]},s={className:"params",begin:/\(/,end:/\)/,keywords:i,relevance:0,contains:[e.C_BLOCK_COMMENT_MODE],endsParent:!0};return{name:"Java",aliases:["jsp"],keywords:i,illegal:/<\/|#/,contains:[e.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{begin:/\w+@/,relevance:0},{className:"doctag",begin:"@[A-Za-z]+"},]}),{begin:/import java\.[a-z]+\./,keywords:"import",relevance:2},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,{begin:/"""/,end:/"""/,className:"string",contains:[e.BACKSLASH_ESCAPE]},e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,{match:[/\b(?:class|interface|enum|extends|implements|new)/,/\s+/,t,],className:{1:"keyword",3:"title.class"}},{match:/non-sealed/,scope:"keyword"},{begin:[n.concat(/(?!else)/,t),/\s+/,t,/\s+/,/=(?!=)/],className:{1:"type",3:"variable",5:"operator"}},{begin:[/record/,/\s+/,t],className:{1:"keyword",3:"title.class"},contains:[s,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{beginKeywords:"new throw return else",relevance:0},{begin:["(?:"+a+"\\s+)",e.UNDERSCORE_IDENT_RE,/\s*(?=\()/],className:{2:"title.function"},keywords:i,contains:[{className:"params",begin:/\(/,end:/\)/,keywords:i,relevance:0,contains:[r,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,er,e.C_BLOCK_COMMENT_MODE,]},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,]},er,r,]}},grmr_javascript:em,grmr_json(e){let n=["true","false","null"],t={scope:"literal",beginKeywords:n.join(" ")};return{name:"JSON",keywords:{literal:n},contains:[{className:"attr",begin:/"(\\.|[^\\"\r\n])*"(?=\s*:)/,relevance:1.01},{match:/[{}[\],:]/,className:"punctuation",relevance:0},e.QUOTE_STRING_MODE,t,e.C_NUMBER_MODE,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,],illegal:"\\S"}},grmr_kotlin(e){let n={keyword:"abstract as val var vararg get set class object open private protected public noinline crossinline dynamic final enum if else do while for when throw try catch finally import package is in fun override companion reified inline lateinit init interface annotation data sealed internal infix operator out by constructor super tailrec where const inner suspend typealias external expect actual",built_in:"Byte Short Char Int Long Boolean Float Double Void Unit Nothing",literal:"true false null"},t={className:"symbol",begin:e.UNDERSCORE_IDENT_RE+"@"},a={className:"subst",begin:/\$\{/,end:/\}/,contains:[e.C_NUMBER_MODE]},i={className:"variable",begin:"\\$"+e.UNDERSCORE_IDENT_RE},r={className:"string",variants:[{begin:'"""',end:'"""(?=[^"])',contains:[i,a]},{begin:"'",end:"'",illegal:/\n/,contains:[e.BACKSLASH_ESCAPE]},{begin:'"',end:'"',illegal:/\n/,contains:[e.BACKSLASH_ESCAPE,i,a]},]};a.contains.push(r);let s={className:"meta",begin:"@(?:file|property|field|get|set|receiver|param|setparam|delegate)\\s*:(?:\\s*"+e.UNDERSCORE_IDENT_RE+")?"},l={className:"meta",begin:"@"+e.UNDERSCORE_IDENT_RE,contains:[{begin:/\(/,end:/\)/,contains:[e.inherit(r,{className:"string"}),"self"]},]},o=e.COMMENT("/\\*","\\*/",{contains:[e.C_BLOCK_COMMENT_MODE]}),c={variants:[{className:"type",begin:e.UNDERSCORE_IDENT_RE},{begin:/\(/,end:/\)/,contains:[]},]},d=c;return d.variants[1].contains=[c],c.variants[1].contains=[d],{name:"Kotlin",aliases:["kt","kts"],keywords:n,contains:[e.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{className:"doctag",begin:"@[A-Za-z]+"}]}),e.C_LINE_COMMENT_MODE,o,{className:"keyword",begin:/\b(break|continue|return|this)\b/,starts:{contains:[{className:"symbol",begin:/@\w+/}]}},t,s,l,{className:"function",beginKeywords:"fun",end:"[(]|$",returnBegin:!0,excludeEnd:!0,keywords:n,relevance:5,contains:[{begin:e.UNDERSCORE_IDENT_RE+"\\s*\\(",returnBegin:!0,relevance:0,contains:[e.UNDERSCORE_TITLE_MODE]},{className:"type",begin://,keywords:"reified",relevance:0},{className:"params",begin:/\(/,end:/\)/,endsParent:!0,keywords:n,relevance:0,contains:[{begin:/:/,end:/[=,\/]/,endsWithParent:!0,contains:[c,e.C_LINE_COMMENT_MODE,o],relevance:0},e.C_LINE_COMMENT_MODE,o,s,l,r,e.C_NUMBER_MODE,]},o,]},{begin:[/class|interface|trait/,/\s+/,e.UNDERSCORE_IDENT_RE],beginScope:{3:"title.class"},keywords:"class interface trait",end:/[:\{(]|$/,excludeEnd:!0,illegal:"extends implements",contains:[{beginKeywords:"public protected internal private constructor"},e.UNDERSCORE_TITLE_MODE,{className:"type",begin://,excludeBegin:!0,excludeEnd:!0,relevance:0},{className:"type",begin:/[,:]\s*/,end:/[<\(,){\s]|$/,excludeBegin:!0,returnEnd:!0},s,l,]},r,{className:"meta",begin:"^#!/usr/bin/env",end:"$",illegal:"\n"},er,]}},grmr_less(e){let n=X(e),t="([\\w-]+|@\\{[\\w-]+\\})",a=[],i=[],r=e=>({className:"string",begin:"~?"+e+".*?"+e}),s=(e,n,t)=>({className:e,begin:n,relevance:t}),l={$pattern:/[a-z-]+/,keyword:"and or not only",attribute:J.join(" ")};i.push(e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,r("'"),r('"'),n.CSS_NUMBER_MODE,{begin:"(url|data-uri)\\(",starts:{className:"string",end:"[\\)\\n]",excludeEnd:!0}},n.HEXCOLOR,{begin:"\\(",end:"\\)",contains:i,keywords:l,relevance:0},s("variable","@@?[\\w-]+",10),s("variable","@\\{[\\w-]+\\}"),s("built_in","~?`[^`]*?`"),{className:"attribute",begin:"[\\w-]+\\s*:",end:":",returnBegin:!0,excludeEnd:!0},n.IMPORTANT,{beginKeywords:"and not"},n.FUNCTION_DISPATCH);let o=i.concat({begin:/\{/,end:/\}/,contains:a}),c={beginKeywords:"when",endsWithParent:!0,contains:[{beginKeywords:"and not"}].concat(i)},d={begin:t+"\\s*:",returnBegin:!0,end:/[;}]/,relevance:0,contains:[{begin:/-(webkit|moz|ms|o)-/},n.CSS_VARIABLE,{className:"attribute",begin:"\\b("+en.join("|")+")\\b",end:/(?=:)/,starts:{endsWithParent:!0,illegal:"[<=$]",relevance:0,contains:i}},]},g={variants:[{begin:"[\\.#:&\\[>]",end:"[;{}]"},{begin:t,end:/\{/},],returnBegin:!0,returnEnd:!0,illegal:"[<='$\"]",relevance:0,contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,c,s("keyword","all\\b"),s("variable","@\\{[\\w-]+\\}"),{begin:"\\b("+V.join("|")+")\\b",className:"selector-tag"},n.CSS_NUMBER_MODE,s("selector-tag",t,0),s("selector-id","#"+t),s("selector-class","\\."+t,0),s("selector-tag","&",0),n.ATTRIBUTE_SELECTOR_MODE,{className:"selector-pseudo",begin:":("+Y.join("|")+")"},{className:"selector-pseudo",begin:":(:)?("+ee.join("|")+")"},{begin:/\(/,end:/\)/,relevance:0,contains:o},{begin:"!important"},n.FUNCTION_DISPATCH,]},u={begin:`[\\w-]+:(:)?(${et.join("|")})`,returnBegin:!0,contains:[g]};return a.push(e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,{className:"keyword",begin:"@(import|media|charset|font-face|(-[a-z]+-)?keyframes|supports|document|namespace|page|viewport|host)\\b",starts:{end:"[;{}]",keywords:l,returnEnd:!0,contains:i,relevance:0}},{className:"variable",variants:[{begin:"@[\\w-]+\\s*:",relevance:15},{begin:"@[\\w-]+"},],starts:{end:"[;}]",returnEnd:!0,contains:o}},u,d,g,c,n.FUNCTION_DISPATCH),{name:"Less",case_insensitive:!0,illegal:"[=>'/<($\"]",contains:a}},grmr_lua(e){let n="\\[=*\\[",t="\\]=*\\]",a={begin:n,end:t,contains:["self"]},i=[e.COMMENT("--(?!\\[=*\\[)","$"),e.COMMENT("--\\[=*\\[",t,{contains:[a],relevance:10}),];return{name:"Lua",keywords:{$pattern:e.UNDERSCORE_IDENT_RE,literal:"true false nil",keyword:"and break do else elseif end for goto if in local not or repeat return then until while",built_in:"_G _ENV _VERSION __index __newindex __mode __call __metatable __tostring __len __gc __add __sub __mul __div __mod __pow __concat __unm __eq __lt __le assert collectgarbage dofile error getfenv getmetatable ipairs load loadfile loadstring module next pairs pcall print rawequal rawget rawset require select setfenv setmetatable tonumber tostring type unpack xpcall arg self coroutine resume yield status wrap create running debug getupvalue debug sethook getmetatable gethook setmetatable setlocal traceback setfenv getinfo setupvalue getlocal getregistry getfenv io lines write close flush open output type read stderr stdin input stdout popen tmpfile math log max acos huge ldexp pi cos tanh pow deg tan cosh sinh random randomseed frexp ceil floor rad abs sqrt modf asin min mod fmod log10 atan2 exp sin atan os exit setlocale date getenv difftime remove time clock tmpname rename execute package preload loadlib loaded loaders cpath config path seeall string sub upper len gfind rep find match char dump gmatch reverse byte format gsub lower table setn insert getn foreachi maxn foreach concat sort remove"},contains:i.concat([{className:"function",beginKeywords:"function",end:"\\)",contains:[e.inherit(e.TITLE_MODE,{begin:"([_a-zA-Z]\\w*\\.)*([_a-zA-Z]\\w*:)?[_a-zA-Z]\\w*"}),{className:"params",begin:"\\(",endsWithParent:!0,contains:i},].concat(i)},e.C_NUMBER_MODE,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,{className:"string",begin:n,end:t,contains:[a],relevance:5},])}},grmr_makefile(e){let n={className:"variable",variants:[{begin:"\\$\\("+e.UNDERSCORE_IDENT_RE+"\\)",contains:[e.BACKSLASH_ESCAPE]},{begin:/\$[@%`]+/},]},]},]};return{name:"HTML, XML",aliases:["html","xhtml","rss","atom","xjb","xsd","xsl","plist","wsf","svg",],case_insensitive:!0,unicodeRegex:!0,contains:[{className:"meta",begin://,relevance:10,contains:[i,l,s,r,{begin:/\[/,end:/\]/,contains:[{className:"meta",begin://,contains:[i,r,l,s]},]},]},e.COMMENT(//,{relevance:10}),{begin://,relevance:10},a,{className:"meta",end:/\?>/,variants:[{begin:/<\?xml/,relevance:10,contains:[l]},{begin:/<\?[a-z][a-z0-9]+/},]},{className:"tag",begin:/)/,end:/>/,keywords:{name:"style"},contains:[o],starts:{end:/<\/style>/,returnEnd:!0,subLanguage:["css","xml"]}},{className:"tag",begin:/)/,end:/>/,keywords:{name:"script"},contains:[o],starts:{end:/<\/script>/,returnEnd:!0,subLanguage:["javascript","handlebars","xml"]}},{className:"tag",begin:/<>|<\/>/},{className:"tag",begin:n.concat(//,/>/,/\s/)))),end:/\/?>/,contains:[{className:"name",begin:t,relevance:0,starts:o},]},{className:"tag",begin:n.concat(/<\//,n.lookahead(n.concat(t,/>/))),contains:[{className:"name",begin:t,relevance:0},{begin:/>/,relevance:0,endsParent:!0},]},]}},grmr_markdown(e){let n={begin:/<\/?[A-Za-z_]/,end:">",subLanguage:"xml",relevance:0},t={variants:[{begin:/\[.+?\]\[.*?\]/,relevance:0},{begin:/\[.+?\]\(((data|javascript|mailto):|(?:http|ftp)s?:\/\/).*?\)/,relevance:2},{begin:e.regex.concat(/\[.+?\]\(/,/[A-Za-z][A-Za-z0-9+.-]*/,/:\/\/.*?\)/),relevance:2},{begin:/\[.+?\]\([./?&#].*?\)/,relevance:1},{begin:/\[.*?\]\(.*?\)/,relevance:0},],returnBegin:!0,contains:[{match:/\[(?=\])/},{className:"string",relevance:0,begin:"\\[",end:"\\]",excludeBegin:!0,returnEnd:!0},{className:"link",relevance:0,begin:"\\]\\(",end:"\\)",excludeBegin:!0,excludeEnd:!0},{className:"symbol",relevance:0,begin:"\\]\\[",end:"\\]",excludeBegin:!0,excludeEnd:!0},]},a={className:"strong",contains:[],variants:[{begin:/_{2}(?!\s)/,end:/_{2}/},{begin:/\*{2}(?!\s)/,end:/\*{2}/},]},i={className:"emphasis",contains:[],variants:[{begin:/\*(?![*\s])/,end:/\*/},{begin:/_(?![_\s])/,end:/_/,relevance:0},]},r=e.inherit(a,{contains:[]}),s=e.inherit(i,{contains:[]});a.contains.push(s),i.contains.push(r);let l=[n,t];return[a,i,r,s].forEach(e=>{e.contains=e.contains.concat(l)}),{name:"Markdown",aliases:["md","mkdown","mkd"],contains:[{className:"section",variants:[{begin:"^#{1,6}",end:"$",contains:l=l.concat(a,i)},{begin:"(?=^.+?\\n[=-]{2,}$)",contains:[{begin:"^[=-]*$"},{begin:"^",end:"\\n",contains:l},]},]},n,{className:"bullet",begin:"^[ ]*([*+-]|(\\d+\\.))(?=\\s+)",end:"\\s+",excludeEnd:!0},a,i,{className:"quote",begin:"^>\\s+",contains:l,end:"$"},{className:"code",variants:[{begin:"(`{3,})[^`](.|\\n)*?\\1`*[ ]*"},{begin:"(~{3,})[^~](.|\\n)*?\\1~*[ ]*"},{begin:"```",end:"```+[ ]*$"},{begin:"~~~",end:"~~~+[ ]*$"},{begin:"`.+?`"},{begin:"(?=^( {4}|\\t))",contains:[{begin:"^( {4}|\\t)",end:"(\\n)$"}],relevance:0},]},{begin:"^[-\\*]{3,}",end:"$"},t,{begin:/^\[[^\n]+\]:/,returnBegin:!0,contains:[{className:"symbol",begin:/\[/,end:/\]/,excludeBegin:!0,excludeEnd:!0},{className:"link",begin:/:\s*/,end:/$/,excludeBegin:!0},]},]}},grmr_objectivec(e){let n=/[a-zA-Z@][a-zA-Z0-9_]*/,t={$pattern:n,keyword:["@interface","@class","@protocol","@implementation"]};return{name:"Objective-C",aliases:["mm","objc","obj-c","obj-c++","objective-c++"],keywords:{"variable.language":["this","super"],$pattern:n,keyword:["while","export","sizeof","typedef","const","struct","for","union","volatile","static","mutable","if","do","return","goto","enum","else","break","extern","asm","case","default","register","explicit","typename","switch","continue","inline","readonly","assign","readwrite","self","@synchronized","id","typeof","nonatomic","IBOutlet","IBAction","strong","weak","copy","in","out","inout","bycopy","byref","oneway","__strong","__weak","__block","__autoreleasing","@private","@protected","@public","@try","@property","@end","@throw","@catch","@finally","@autoreleasepool","@synthesize","@dynamic","@selector","@optional","@required","@encode","@package","@import","@defs","@compatibility_alias","__bridge","__bridge_transfer","__bridge_retained","__bridge_retain","__covariant","__contravariant","__kindof","_Nonnull","_Nullable","_Null_unspecified","__FUNCTION__","__PRETTY_FUNCTION__","__attribute__","getter","setter","retain","unsafe_unretained","nonnull","nullable","null_unspecified","null_resettable","class","instancetype","NS_DESIGNATED_INITIALIZER","NS_UNAVAILABLE","NS_REQUIRES_SUPER","NS_RETURNS_INNER_POINTER","NS_INLINE","NS_AVAILABLE","NS_DEPRECATED","NS_ENUM","NS_OPTIONS","NS_SWIFT_UNAVAILABLE","NS_ASSUME_NONNULL_BEGIN","NS_ASSUME_NONNULL_END","NS_REFINED_FOR_SWIFT","NS_SWIFT_NAME","NS_SWIFT_NOTHROW","NS_DURING","NS_HANDLER","NS_ENDHANDLER","NS_VALUERETURN","NS_VOIDRETURN",],literal:["false","true","FALSE","TRUE","nil","YES","NO","NULL",],built_in:["dispatch_once_t","dispatch_queue_t","dispatch_sync","dispatch_async","dispatch_once",],type:["int","float","char","unsigned","signed","short","long","double","wchar_t","unichar","void","bool","BOOL","id|0","_Bool",]},illegal:"/,end:/$/,illegal:"\\n"},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,]},{className:"class",begin:"("+t.keyword.join("|")+")\\b",end:/(\{|$)/,excludeEnd:!0,keywords:t,contains:[e.UNDERSCORE_TITLE_MODE]},{begin:"\\."+e.UNDERSCORE_IDENT_RE,relevance:0},]}},grmr_perl(e){let n=e.regex,t=/[dualxmsipngr]{0,12}/,a={$pattern:/[\w.]+/,keyword:"abs accept alarm and atan2 bind binmode bless break caller chdir chmod chomp chop chown chr chroot close closedir connect continue cos crypt dbmclose dbmopen defined delete die do dump each else elsif endgrent endhostent endnetent endprotoent endpwent endservent eof eval exec exists exit exp fcntl fileno flock for foreach fork format formline getc getgrent getgrgid getgrnam gethostbyaddr gethostbyname gethostent getlogin getnetbyaddr getnetbyname getnetent getpeername getpgrp getpriority getprotobyname getprotobynumber getprotoent getpwent getpwnam getpwuid getservbyname getservbyport getservent getsockname getsockopt given glob gmtime goto grep gt hex if index int ioctl join keys kill last lc lcfirst length link listen local localtime log lstat lt ma map mkdir msgctl msgget msgrcv msgsnd my ne next no not oct open opendir or ord our pack package pipe pop pos print printf prototype push q|0 qq quotemeta qw qx rand read readdir readline readlink readpipe recv redo ref rename require reset return reverse rewinddir rindex rmdir say scalar seek seekdir select semctl semget semop send setgrent sethostent setnetent setpgrp setpriority setprotoent setpwent setservent setsockopt shift shmctl shmget shmread shmwrite shutdown sin sleep socket socketpair sort splice split sprintf sqrt srand stat state study sub substr symlink syscall sysopen sysread sysseek system syswrite tell telldir tie tied time times tr truncate uc ucfirst umask undef unless unlink unpack unshift untie until use utime values vec wait waitpid wantarray warn when while write x|0 xor y|0"},i={className:"subst",begin:"[$@]\\{",end:"\\}",keywords:a},r={begin:/->\{/,end:/\}/},s={variants:[{begin:/\$\d/},{begin:n.concat(/[$%@](\^\w\b|#\w+(::\w+)*|\{\w+\}|\w+(::\w*)*)/,"(?![A-Za-z])(?![@$%])")},{begin:/[$%@][^\s\w{]/,relevance:0},]},l=[e.BACKSLASH_ESCAPE,i,s],o=[/!/,/\//,/\|/,/\?/,/'/,/"/,/#/],c=(e,a,i="\\1")=>{let r="\\1"===i?i:n.concat(i,a);return n.concat(n.concat("(?:",e,")"),a,/(?:\\.|[^\\\/])*?/,r,/(?:\\.|[^\\\/])*?/,i,t)},d=(e,a,i)=>n.concat(n.concat("(?:",e,")"),a,/(?:\\.|[^\\\/])*?/,i,t),g=[s,e.HASH_COMMENT_MODE,e.COMMENT(/^=\w/,/=cut/,{endsWithParent:!0}),r,{className:"string",contains:l,variants:[{begin:"q[qwxr]?\\s*\\(",end:"\\)",relevance:5},{begin:"q[qwxr]?\\s*\\[",end:"\\]",relevance:5},{begin:"q[qwxr]?\\s*\\{",end:"\\}",relevance:5},{begin:"q[qwxr]?\\s*\\|",end:"\\|",relevance:5},{begin:"q[qwxr]?\\s*<",end:">",relevance:5},{begin:"qw\\s+q",end:"q",relevance:5},{begin:"'",end:"'",contains:[e.BACKSLASH_ESCAPE]},{begin:'"',end:'"'},{begin:"`",end:"`",contains:[e.BACKSLASH_ESCAPE]},{begin:/\{\w+\}/,relevance:0},{begin:"-?\\w+\\s*=>",relevance:0},]},{className:"number",begin:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",relevance:0},{begin:"(\\/\\/|"+e.RE_STARTERS_RE+"|\\b(split|return|print|reverse|grep)\\b)\\s*",keywords:"split return print reverse grep",relevance:0,contains:[e.HASH_COMMENT_MODE,{className:"regexp",variants:[{begin:c("s|tr|y",n.either(...o,{capture:!0}))},{begin:c("s|tr|y","\\(","\\)")},{begin:c("s|tr|y","\\[","\\]")},{begin:c("s|tr|y","\\{","\\}")},],relevance:2},{className:"regexp",variants:[{begin:/(m|qr)\/\//,relevance:0},{begin:d("(?:m|qr)?",/\//,/\//)},{begin:d("m|qr",n.either(...o,{capture:!0}),/\1/)},{begin:d("m|qr",/\(/,/\)/)},{begin:d("m|qr",/\[/,/\]/)},{begin:d("m|qr",/\{/,/\}/)},]},]},{className:"function",beginKeywords:"sub",end:"(\\s*\\(.*?\\))?[;{]",excludeEnd:!0,relevance:5,contains:[e.TITLE_MODE]},{begin:"-\\w\\b",relevance:0},{begin:"^__DATA__$",end:"^__END__$",subLanguage:"mojolicious",contains:[{begin:"^@@.*",end:"$",className:"comment"}]},];return i.contains=g,r.contains=g,{name:"Perl",aliases:["pl","pm"],keywords:a,contains:g}},grmr_php(e){let n=e.regex,t=/(?![A-Za-z0-9])(?![$])/,a=n.concat(/[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*/,t),i=n.concat(/(\\?[A-Z][a-z0-9_\x7f-\xff]+|\\?[A-Z]+(?=[A-Z][a-z0-9_\x7f-\xff])){1,}/,t),r={scope:"variable",match:"\\$+"+a},s={scope:"subst",variants:[{begin:/\$\w+/},{begin:/\{\$/,end:/\}/},]},l=e.inherit(e.APOS_STRING_MODE,{illegal:null}),o="[ \n]",c={scope:"string",variants:[e.inherit(e.QUOTE_STRING_MODE,{illegal:null,contains:e.QUOTE_STRING_MODE.contains.concat(s)}),l,e.END_SAME_AS_BEGIN({begin:/<<<[ \t]*(\w+)\n/,end:/[ \t]*(\w+)\b/,contains:e.QUOTE_STRING_MODE.contains.concat(s)}),]},d={scope:"number",variants:[{begin:"\\b0[bB][01]+(?:_[01]+)*\\b"},{begin:"\\b0[oO][0-7]+(?:_[0-7]+)*\\b"},{begin:"\\b0[xX][\\da-fA-F]+(?:_[\\da-fA-F]+)*\\b"},{begin:"(?:\\b\\d+(?:_\\d+)*(\\.(?:\\d+(?:_\\d+)*))?|\\B\\.\\d+)(?:[eE][+-]?\\d+)?"},],relevance:0},g=["false","null","true"],u=["__CLASS__","__DIR__","__FILE__","__FUNCTION__","__COMPILER_HALT_OFFSET__","__LINE__","__METHOD__","__NAMESPACE__","__TRAIT__","die","echo","exit","include","include_once","print","require","require_once","array","abstract","and","as","binary","bool","boolean","break","callable","case","catch","class","clone","const","continue","declare","default","do","double","else","elseif","empty","enddeclare","endfor","endforeach","endif","endswitch","endwhile","enum","eval","extends","final","finally","float","for","foreach","from","global","goto","if","implements","instanceof","insteadof","int","integer","interface","isset","iterable","list","match|0","mixed","new","never","object","or","private","protected","public","readonly","real","return","string","switch","throw","trait","try","unset","use","var","void","while","xor","yield",],b=["Error|0","AppendIterator","ArgumentCountError","ArithmeticError","ArrayIterator","ArrayObject","AssertionError","BadFunctionCallException","BadMethodCallException","CachingIterator","CallbackFilterIterator","CompileError","Countable","DirectoryIterator","DivisionByZeroError","DomainException","EmptyIterator","ErrorException","Exception","FilesystemIterator","FilterIterator","GlobIterator","InfiniteIterator","InvalidArgumentException","IteratorIterator","LengthException","LimitIterator","LogicException","MultipleIterator","NoRewindIterator","OutOfBoundsException","OutOfRangeException","OuterIterator","OverflowException","ParentIterator","ParseError","RangeException","RecursiveArrayIterator","RecursiveCachingIterator","RecursiveCallbackFilterIterator","RecursiveDirectoryIterator","RecursiveFilterIterator","RecursiveIterator","RecursiveIteratorIterator","RecursiveRegexIterator","RecursiveTreeIterator","RegexIterator","RuntimeException","SeekableIterator","SplDoublyLinkedList","SplFileInfo","SplFileObject","SplFixedArray","SplHeap","SplMaxHeap","SplMinHeap","SplObjectStorage","SplObserver","SplPriorityQueue","SplQueue","SplStack","SplSubject","SplTempFileObject","TypeError","UnderflowException","UnexpectedValueException","UnhandledMatchError","ArrayAccess","BackedEnum","Closure","Fiber","Generator","Iterator","IteratorAggregate","Serializable","Stringable","Throwable","Traversable","UnitEnum","WeakReference","WeakMap","Directory","__PHP_Incomplete_Class","parent","php_user_filter","self","static","stdClass",],m={keyword:u,literal:(e=>{let n=[];return e.forEach(e=>{n.push(e),e.toLowerCase()===e?n.push(e.toUpperCase()):n.push(e.toLowerCase())}),n})(g),built_in:b},p=e=>e.map(e=>e.replace(/\|\d+$/,"")),h={variants:[{match:[/new/,n.concat(o,"+"),n.concat("(?!",p(b).join("\\b|"),"\\b)"),i,],scope:{1:"keyword",4:"title.class"}},]},f=n.concat(a,"\\b(?!\\()"),E={variants:[{match:[n.concat(/::/,n.lookahead(/(?!class\b)/)),f],scope:{2:"variable.constant"}},{match:[/::/,/class/],scope:{2:"variable.language"}},{match:[i,n.concat(/::/,n.lookahead(/(?!class\b)/)),f],scope:{1:"title.class",3:"variable.constant"}},{match:[i,n.concat("::",n.lookahead(/(?!class\b)/))],scope:{1:"title.class"}},{match:[i,/::/,/class/],scope:{1:"title.class",3:"variable.language"}},]},$={scope:"attr",match:n.concat(a,n.lookahead(":"),n.lookahead(/(?!::)/))},y={relevance:0,begin:/\(/,end:/\)/,keywords:m,contains:[$,r,E,e.C_BLOCK_COMMENT_MODE,c,d,h]},N={relevance:0,match:[/\b/,n.concat("(?!fn\\b|function\\b|",p(u).join("\\b|"),"|",p(b).join("\\b|"),"\\b)"),a,n.concat(o,"*"),n.lookahead(/(?=\()/),],scope:{3:"title.function.invoke"},contains:[y]};y.contains.push(N);let w=[$,E,e.C_BLOCK_COMMENT_MODE,c,d,h];return{case_insensitive:!1,keywords:m,contains:[{begin:n.concat(/#\[\s*/,i),beginScope:"meta",end:/]/,endScope:"meta",keywords:{literal:g,keyword:["new","array"]},contains:[{begin:/\[/,end:/]/,keywords:{literal:g,keyword:["new","array"]},contains:["self",...w]},...w,{scope:"meta",match:i},]},e.HASH_COMMENT_MODE,e.COMMENT("//","$"),e.COMMENT("/\\*","\\*/",{contains:[{scope:"doctag",match:"@[A-Za-z]+"},]}),{match:/__halt_compiler\(\);/,keywords:"__halt_compiler",starts:{scope:"comment",end:e.MATCH_NOTHING_RE,contains:[{match:/\?>/,scope:"meta",endsParent:!0}]}},{scope:"meta",variants:[{begin:/<\?php/,relevance:10},{begin:/<\?=/},{begin:/<\?/,relevance:.1},{begin:/\?>/},]},{scope:"variable.language",match:/\$this\b/},r,N,E,{match:[/const/,/\s/,a],scope:{1:"keyword",3:"variable.constant"}},h,{scope:"function",relevance:0,beginKeywords:"fn function",end:/[;{]/,excludeEnd:!0,illegal:"[$%\\[]",contains:[{beginKeywords:"use"},e.UNDERSCORE_TITLE_MODE,{begin:"=>",endsParent:!0},{scope:"params",begin:"\\(",end:"\\)",excludeBegin:!0,excludeEnd:!0,keywords:m,contains:["self",r,E,e.C_BLOCK_COMMENT_MODE,c,d]},]},{scope:"class",variants:[{beginKeywords:"enum",illegal:/[($"]/},{beginKeywords:"class interface trait",illegal:/[:($"]/},],relevance:0,end:/\{/,excludeEnd:!0,contains:[{beginKeywords:"extends implements"},e.UNDERSCORE_TITLE_MODE,]},{beginKeywords:"namespace",relevance:0,end:";",illegal:/[.']/,contains:[e.inherit(e.UNDERSCORE_TITLE_MODE,{scope:"title.class"}),]},{beginKeywords:"use",relevance:0,end:";",contains:[{match:/\b(as|const|function)\b/,scope:"keyword"},e.UNDERSCORE_TITLE_MODE,]},c,d,]}},grmr_php_template:e=>({name:"PHP template",subLanguage:"xml",contains:[{begin:/<\?(php|=)?/,end:/\?>/,subLanguage:"php",contains:[{begin:"/\\*",end:"\\*/",skip:!0},{begin:'b"',end:'"',skip:!0},{begin:"b'",end:"'",skip:!0},e.inherit(e.APOS_STRING_MODE,{illegal:null,className:null,contains:null,skip:!0}),e.inherit(e.QUOTE_STRING_MODE,{illegal:null,className:null,contains:null,skip:!0}),]},]}),grmr_plaintext:e=>({name:"Plain text",aliases:["text","txt"],disableAutodetect:!0}),grmr_python(e){let n=e.regex,t=/[\p{XID_Start}_]\p{XID_Continue}*/u,a=["and","as","assert","async","await","break","case","class","continue","def","del","elif","else","except","finally","for","from","global","if","import","in","is","lambda","match","nonlocal|10","not","or","pass","raise","return","try","while","with","yield",],i={$pattern:/[A-Za-z]\w+|__\w+__/,keyword:a,built_in:["__import__","abs","all","any","ascii","bin","bool","breakpoint","bytearray","bytes","callable","chr","classmethod","compile","complex","delattr","dict","dir","divmod","enumerate","eval","exec","filter","float","format","frozenset","getattr","globals","hasattr","hash","help","hex","id","input","int","isinstance","issubclass","iter","len","list","locals","map","max","memoryview","min","next","object","oct","open","ord","pow","print","property","range","repr","reversed","round","set","setattr","slice","sorted","staticmethod","str","sum","super","tuple","type","vars","zip",],literal:["__debug__","Ellipsis","False","None","NotImplemented","True",],type:["Any","Callable","Coroutine","Dict","List","Literal","Generic","Optional","Sequence","Set","Tuple","Type","Union",]},r={className:"meta",begin:/^(>>>|\.\.\.) /},s={className:"subst",begin:/\{/,end:/\}/,keywords:i,illegal:/#/},l={begin:/\{\{/,relevance:0},o={className:"string",contains:[e.BACKSLASH_ESCAPE],variants:[{begin:/([uU]|[bB]|[rR]|[bB][rR]|[rR][bB])?'''/,end:/'''/,contains:[e.BACKSLASH_ESCAPE,r],relevance:10},{begin:/([uU]|[bB]|[rR]|[bB][rR]|[rR][bB])?"""/,end:/"""/,contains:[e.BACKSLASH_ESCAPE,r],relevance:10},{begin:/([fF][rR]|[rR][fF]|[fF])'''/,end:/'''/,contains:[e.BACKSLASH_ESCAPE,r,l,s]},{begin:/([fF][rR]|[rR][fF]|[fF])"""/,end:/"""/,contains:[e.BACKSLASH_ESCAPE,r,l,s]},{begin:/([uU]|[rR])'/,end:/'/,relevance:10},{begin:/([uU]|[rR])"/,end:/"/,relevance:10},{begin:/([bB]|[bB][rR]|[rR][bB])'/,end:/'/},{begin:/([bB]|[bB][rR]|[rR][bB])"/,end:/"/},{begin:/([fF][rR]|[rR][fF]|[fF])'/,end:/'/,contains:[e.BACKSLASH_ESCAPE,l,s]},{begin:/([fF][rR]|[rR][fF]|[fF])"/,end:/"/,contains:[e.BACKSLASH_ESCAPE,l,s]},e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,]},c="[0-9](_?[0-9])*",d=`(\\b(${c}))?\\.(${c})|\\b(${c})\\.`,g="\\b|"+a.join("|"),u={className:"number",relevance:0,variants:[{begin:`(\\b(${c})|(${d}))[eE][+-]?(${c})[jJ]?(?=${g})`},{begin:`(${d})[jJ]?`},{begin:`\\b([1-9](_?[0-9])*|0+(_?0)*)[lLjJ]?(?=${g})`},{begin:`\\b0[bB](_?[01])+[lL]?(?=${g})`},{begin:`\\b0[oO](_?[0-7])+[lL]?(?=${g})`},{begin:`\\b0[xX](_?[0-9a-fA-F])+[lL]?(?=${g})`},{begin:`\\b(${c})[jJ](?=${g})`},]},b={className:"comment",begin:n.lookahead(/# type:/),end:/$/,keywords:i,contains:[{begin:/# type:/},{begin:/#/,end:/\b\B/,endsWithParent:!0},]},m={className:"params",variants:[{className:"",begin:/\(\s*\)/,skip:!0},{begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:i,contains:["self",r,u,o,e.HASH_COMMENT_MODE]},]};return s.contains=[o,u,r],{name:"Python",aliases:["py","gyp","ipython"],unicodeRegex:!0,keywords:i,illegal:/(<\/|->|\?)|=>/,contains:[r,u,{begin:/\bself\b/},{beginKeywords:"if",relevance:0},o,b,e.HASH_COMMENT_MODE,{match:[/\bdef/,/\s+/,t],scope:{1:"keyword",3:"title.function"},contains:[m]},{variants:[{match:[/\bclass/,/\s+/,t,/\s*/,/\(\s*/,t,/\s*\)/]},{match:[/\bclass/,/\s+/,t]},],scope:{1:"keyword",3:"title.class",6:"title.class.inherited"}},{className:"meta",begin:/^[\t ]*@/,end:/(?=#)|$/,contains:[u,m,o]},]}},grmr_python_repl:e=>({aliases:["pycon"],contains:[{className:"meta.prompt",starts:{end:/ |$/,starts:{end:"$",subLanguage:"python"}},variants:[{begin:/^>>>(?=[ ]|$)/},{begin:/^\.\.\.(?=[ ]|$)/},]},]}),grmr_r(e){let n=e.regex,t=/(?:(?:[a-zA-Z]|\.[._a-zA-Z])[._a-zA-Z0-9]*)|\.(?!\d)/,a=n.either(/0[xX][0-9a-fA-F]+\.[0-9a-fA-F]*[pP][+-]?\d+i?/,/0[xX][0-9a-fA-F]+(?:[pP][+-]?\d+)?[Li]?/,/(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][+-]?\d+)?[Li]?/),i=/[=!<>:]=|\|\||&&|:::?|<-|<<-|->>|->|\|>|[-+*\/?!$&|:<=>@^~]|\*\*/,r=n.either(/[()]/,/[{}]/,/\[\[/,/[[\]]/,/\\/,/,/);return{name:"R",keywords:{$pattern:t,keyword:"function if in break next repeat else for while",literal:"NULL NA TRUE FALSE Inf NaN NA_integer_|10 NA_real_|10 NA_character_|10 NA_complex_|10",built_in:"LETTERS letters month.abb month.name pi T F abs acos acosh all any anyNA Arg as.call as.character as.complex as.double as.environment as.integer as.logical as.null.default as.numeric as.raw asin asinh atan atanh attr attributes baseenv browser c call ceiling class Conj cos cosh cospi cummax cummin cumprod cumsum digamma dim dimnames emptyenv exp expression floor forceAndCall gamma gc.time globalenv Im interactive invisible is.array is.atomic is.call is.character is.complex is.double is.environment is.expression is.finite is.function is.infinite is.integer is.language is.list is.logical is.matrix is.na is.name is.nan is.null is.numeric is.object is.pairlist is.raw is.recursive is.single is.symbol lazyLoadDBfetch length lgamma list log max min missing Mod names nargs nzchar oldClass on.exit pos.to.env proc.time prod quote range Re rep retracemem return round seq_along seq_len seq.int sign signif sin sinh sinpi sqrt standardGeneric substitute sum switch tan tanh tanpi tracemem trigamma trunc unclass untracemem UseMethod xtfrm"},contains:[e.COMMENT(/#'/,/$/,{contains:[{scope:"doctag",match:/@examples/,starts:{end:n.lookahead(n.either(/\n^#'\s*(?=@[a-zA-Z]+)/,/\n^(?!#')/)),endsParent:!0}},{scope:"doctag",begin:"@param",end:/$/,contains:[{scope:"variable",variants:[{match:t},{match:/`(?:\\.|[^`\\])+`/}],endsParent:!0},]},{scope:"doctag",match:/@[a-zA-Z]+/},{scope:"keyword",match:/\\[a-zA-Z]+/},]}),e.HASH_COMMENT_MODE,{scope:"string",contains:[e.BACKSLASH_ESCAPE],variants:[e.END_SAME_AS_BEGIN({begin:/[rR]"(-*)\(/,end:/\)(-*)"/}),e.END_SAME_AS_BEGIN({begin:/[rR]"(-*)\{/,end:/\}(-*)"/}),e.END_SAME_AS_BEGIN({begin:/[rR]"(-*)\[/,end:/\](-*)"/}),e.END_SAME_AS_BEGIN({begin:/[rR]'(-*)\(/,end:/\)(-*)'/}),e.END_SAME_AS_BEGIN({begin:/[rR]'(-*)\{/,end:/\}(-*)'/}),e.END_SAME_AS_BEGIN({begin:/[rR]'(-*)\[/,end:/\](-*)'/}),{begin:'"',end:'"',relevance:0},{begin:"'",end:"'",relevance:0},]},{relevance:0,variants:[{scope:{1:"operator",2:"number"},match:[i,a]},{scope:{1:"operator",2:"number"},match:[/%[^%]*%/,a]},{scope:{1:"punctuation",2:"number"},match:[r,a]},{scope:{2:"number"},match:[/[^a-zA-Z0-9._]|^/,a]},]},{scope:{3:"operator"},match:[t,/\s+/,/<-/,/\s+/]},{scope:"operator",relevance:0,variants:[{match:i},{match:/%[^%]*%/},]},{scope:"punctuation",relevance:0,match:r},{begin:"`",end:"`",contains:[{begin:/\\./}]},]}},grmr_ruby(e){let n=e.regex,t="([a-zA-Z_]\\w*[!?=]?|[-+~]@|<<|>>|=~|===?|<=>|[<>]=?|\\*\\*|[-/+%^&*~`|]|\\[\\]=?)",a=n.either(/\b([A-Z]+[a-z0-9]+)+/,/\b([A-Z]+[a-z0-9]+)+[A-Z]+/),i=n.concat(a,/(::\w+)*/),r={"variable.constant":["__FILE__","__LINE__","__ENCODING__"],"variable.language":["self","super"],keyword:["alias","and","begin","BEGIN","break","case","class","defined","do","else","elsif","end","END","ensure","for","if","in","module","next","not","or","redo","require","rescue","retry","return","then","undef","unless","until","when","while","yield","include","extend","prepend","public","private","protected","raise","throw",],built_in:["proc","lambda","attr_accessor","attr_reader","attr_writer","define_method","private_constant","module_function",],literal:["true","false","nil"]},s={className:"doctag",begin:"@[A-Za-z]+"},l={begin:"#<",end:">"},o=[e.COMMENT("#","$",{contains:[s]}),e.COMMENT("^=begin","^=end",{contains:[s],relevance:10}),e.COMMENT("^__END__",e.MATCH_NOTHING_RE),],c={className:"subst",begin:/#\{/,end:/\}/,keywords:r},d={className:"string",contains:[e.BACKSLASH_ESCAPE,c],variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/`/,end:/`/},{begin:/%[qQwWx]?\(/,end:/\)/},{begin:/%[qQwWx]?\[/,end:/\]/},{begin:/%[qQwWx]?\{/,end:/\}/},{begin:/%[qQwWx]?/},{begin:/%[qQwWx]?\//,end:/\//},{begin:/%[qQwWx]?%/,end:/%/},{begin:/%[qQwWx]?-/,end:/-/},{begin:/%[qQwWx]?\|/,end:/\|/},{begin:/\B\?(\\\d{1,3})/},{begin:/\B\?(\\x[A-Fa-f0-9]{1,2})/},{begin:/\B\?(\\u\{?[A-Fa-f0-9]{1,6}\}?)/},{begin:/\B\?(\\M-\\C-|\\M-\\c|\\c\\M-|\\M-|\\C-\\M-)[\x20-\x7e]/},{begin:/\B\?\\(c|C-)[\x20-\x7e]/},{begin:/\B\?\\?\S/},{begin:n.concat(/<<[-~]?'?/,n.lookahead(/(\w+)(?=\W)[^\n]*\n(?:[^\n]*\n)*?\s*\1\b/)),contains:[e.END_SAME_AS_BEGIN({begin:/(\w+)/,end:/(\w+)/,contains:[e.BACKSLASH_ESCAPE,c]}),]},]},g="[0-9](_?[0-9])*",u={className:"number",relevance:0,variants:[{begin:`\\b([1-9](_?[0-9])*|0)(\\.(${g}))?([eE][+-]?(${g})|r)?i?\\b`},{begin:"\\b0[dD][0-9](_?[0-9])*r?i?\\b"},{begin:"\\b0[bB][0-1](_?[0-1])*r?i?\\b"},{begin:"\\b0[oO][0-7](_?[0-7])*r?i?\\b"},{begin:"\\b0[xX][0-9a-fA-F](_?[0-9a-fA-F])*r?i?\\b"},{begin:"\\b0(_?[0-7])+r?i?\\b"},]},b={variants:[{match:/\(\)/},{className:"params",begin:/\(/,end:/(?=\))/,excludeBegin:!0,endsParent:!0,keywords:r},]},m=[d,{variants:[{match:[/class\s+/,i,/\s+<\s+/,i]},{match:[/\b(class|module)\s+/,i]},],scope:{2:"title.class",4:"title.class.inherited"},keywords:r},{match:[/(include|extend)\s+/,i],scope:{2:"title.class"},keywords:r},{relevance:0,match:[i,/\.new[. (]/],scope:{1:"title.class"}},{relevance:0,match:/\b[A-Z][A-Z_0-9]+\b/,className:"variable.constant"},{relevance:0,match:a,scope:"title.class"},{match:[/def/,/\s+/,t],scope:{1:"keyword",3:"title.function"},contains:[b]},{begin:e.IDENT_RE+"::"},{className:"symbol",begin:e.UNDERSCORE_IDENT_RE+"(!|\\?)?:",relevance:0},{className:"symbol",begin:":(?!\\s)",contains:[d,{begin:t}],relevance:0},u,{className:"variable",begin:"(\\$\\W)|((\\$|@@?)(\\w+))(?=[^@$?])(?![A-Za-z])(?![@$?'])"},{className:"params",begin:/\|/,end:/\|/,excludeBegin:!0,excludeEnd:!0,relevance:0,keywords:r},{begin:"("+e.RE_STARTERS_RE+"|unless)\\s*",keywords:"unless",contains:[{className:"regexp",contains:[e.BACKSLASH_ESCAPE,c],illegal:/\n/,variants:[{begin:"/",end:"/[a-z]*"},{begin:/%r\{/,end:/\}[a-z]*/},{begin:"%r\\(",end:"\\)[a-z]*"},{begin:"%r!",end:"![a-z]*"},{begin:"%r\\[",end:"\\][a-z]*"},]},].concat(l,o),relevance:0},].concat(l,o);return c.contains=m,b.contains=m,o.unshift(l),{name:"Ruby",aliases:["rb","gemspec","podspec","thor","irb"],keywords:r,illegal:/\/\*/,contains:[e.SHEBANG({binary:"ruby"})].concat([{begin:/^\s*=>/,starts:{end:"$",contains:m}},{className:"meta.prompt",begin:"^([>?]>|[\\w#]+\\(\\w+\\):\\d+:\\d+[>*]|(\\w+-)?\\d+\\.\\d+\\.\\d+(p\\d+)?[^\\d][^>]+>)(?=[ ])",starts:{end:"$",keywords:r,contains:m}},]).concat(o).concat(m)}},grmr_rust(e){let n=e.regex,t={className:"title.function.invoke",relevance:0,begin:n.concat(/\b/,/(?!let\b)/,e.IDENT_RE,n.lookahead(/\s*\(/))},a="([ui](8|16|32|64|128|size)|f(32|64))?",i=["drop ","Copy","Send","Sized","Sync","Drop","Fn","FnMut","FnOnce","ToOwned","Clone","Debug","PartialEq","PartialOrd","Eq","Ord","AsRef","AsMut","Into","From","Default","Iterator","Extend","IntoIterator","DoubleEndedIterator","ExactSizeIterator","SliceConcatExt","ToString","assert!","assert_eq!","bitflags!","bytes!","cfg!","col!","concat!","concat_idents!","debug_assert!","debug_assert_eq!","env!","panic!","file!","format!","format_args!","include_bytes!","include_str!","line!","local_data_key!","module_path!","option_env!","print!","println!","select!","stringify!","try!","unimplemented!","unreachable!","vec!","write!","writeln!","macro_rules!","assert_ne!","debug_assert_ne!",],r=["i8","i16","i32","i64","i128","isize","u8","u16","u32","u64","u128","usize","f32","f64","str","char","bool","Box","Option","Result","String","Vec",];return{name:"Rust",aliases:["rs"],keywords:{$pattern:e.IDENT_RE+"!?",type:r,keyword:["abstract","as","async","await","become","box","break","const","continue","crate","do","dyn","else","enum","extern","false","final","fn","for","if","impl","in","let","loop","macro","match","mod","move","mut","override","priv","pub","ref","return","self","Self","static","struct","super","trait","true","try","type","typeof","unsafe","unsized","use","virtual","where","while","yield",],literal:["true","false","Some","None","Ok","Err"],built_in:i},illegal:""},t,]}},grmr_scss(e){let n=X(e),t="@[a-z-]+",a={className:"variable",begin:"(\\$[a-zA-Z-][a-zA-Z0-9_-]*)\\b",relevance:0};return{name:"SCSS",case_insensitive:!0,illegal:"[=/|']",contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,n.CSS_NUMBER_MODE,{className:"selector-id",begin:"#[A-Za-z0-9_-]+",relevance:0},{className:"selector-class",begin:"\\.[A-Za-z0-9_-]+",relevance:0},n.ATTRIBUTE_SELECTOR_MODE,{className:"selector-tag",begin:"\\b("+V.join("|")+")\\b",relevance:0},{className:"selector-pseudo",begin:":("+Y.join("|")+")"},{className:"selector-pseudo",begin:":(:)?("+ee.join("|")+")"},a,{begin:/\(/,end:/\)/,contains:[n.CSS_NUMBER_MODE]},n.CSS_VARIABLE,{className:"attribute",begin:"\\b("+en.join("|")+")\\b"},{begin:"\\b(whitespace|wait|w-resize|visible|vertical-text|vertical-ideographic|uppercase|upper-roman|upper-alpha|underline|transparent|top|thin|thick|text|text-top|text-bottom|tb-rl|table-header-group|table-footer-group|sw-resize|super|strict|static|square|solid|small-caps|separate|se-resize|scroll|s-resize|rtl|row-resize|ridge|right|repeat|repeat-y|repeat-x|relative|progress|pointer|overline|outside|outset|oblique|nowrap|not-allowed|normal|none|nw-resize|no-repeat|no-drop|newspaper|ne-resize|n-resize|move|middle|medium|ltr|lr-tb|lowercase|lower-roman|lower-alpha|loose|list-item|line|line-through|line-edge|lighter|left|keep-all|justify|italic|inter-word|inter-ideograph|inside|inset|inline|inline-block|inherit|inactive|ideograph-space|ideograph-parenthesis|ideograph-numeric|ideograph-alpha|horizontal|hidden|help|hand|groove|fixed|ellipsis|e-resize|double|dotted|distribute|distribute-space|distribute-letter|distribute-all-lines|disc|disabled|default|decimal|dashed|crosshair|collapse|col-resize|circle|char|center|capitalize|break-word|break-all|bottom|both|bolder|bold|block|bidi-override|below|baseline|auto|always|all-scroll|absolute|table|table-cell)\\b"},{begin:/:/,end:/[;}{]/,relevance:0,contains:[n.BLOCK_COMMENT,a,n.HEXCOLOR,n.CSS_NUMBER_MODE,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,n.IMPORTANT,n.FUNCTION_DISPATCH,]},{begin:"@(page|font-face)",keywords:{$pattern:t,keyword:"@page @font-face"}},{begin:"@",end:"[{;]",returnBegin:!0,keywords:{$pattern:/[a-z-]+/,keyword:"and or not only",attribute:J.join(" ")},contains:[{begin:t,className:"keyword"},{begin:/[a-z-]+(?=:)/,className:"attribute"},a,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,n.HEXCOLOR,n.CSS_NUMBER_MODE,]},n.FUNCTION_DISPATCH,]}},grmr_shell:e=>({name:"Shell Session",aliases:["console","shellsession"],contains:[{className:"meta.prompt",begin:/^\s{0,3}[/~\w\d[\]()@-]*[>%$#][ ]?/,starts:{end:/[^\\](?=\s*$)/,subLanguage:"bash"}},]}),grmr_sql(e){let n=e.regex,t=e.COMMENT("--","$"),a=["true","false","unknown"],i=["bigint","binary","blob","boolean","char","character","clob","date","dec","decfloat","decimal","float","int","integer","interval","nchar","nclob","national","numeric","real","row","smallint","time","timestamp","varchar","varying","varbinary",],r=["abs","acos","array_agg","asin","atan","avg","cast","ceil","ceiling","coalesce","corr","cos","cosh","count","covar_pop","covar_samp","cume_dist","dense_rank","deref","element","exp","extract","first_value","floor","json_array","json_arrayagg","json_exists","json_object","json_objectagg","json_query","json_table","json_table_primitive","json_value","lag","last_value","lead","listagg","ln","log","log10","lower","max","min","mod","nth_value","ntile","nullif","percent_rank","percentile_cont","percentile_disc","position","position_regex","power","rank","regr_avgx","regr_avgy","regr_count","regr_intercept","regr_r2","regr_slope","regr_sxx","regr_sxy","regr_syy","row_number","sin","sinh","sqrt","stddev_pop","stddev_samp","substring","substring_regex","sum","tan","tanh","translate","translate_regex","treat","trim","trim_array","unnest","upper","value_of","var_pop","var_samp","width_bucket",],s=["create table","insert into","primary key","foreign key","not null","alter table","add constraint","grouping sets","on overflow","character set","respect nulls","ignore nulls","nulls first","nulls last","depth first","breadth first",],l=r,o=["abs","acos","all","allocate","alter","and","any","are","array","array_agg","array_max_cardinality","as","asensitive","asin","asymmetric","at","atan","atomic","authorization","avg","begin","begin_frame","begin_partition","between","bigint","binary","blob","boolean","both","by","call","called","cardinality","cascaded","case","cast","ceil","ceiling","char","char_length","character","character_length","check","classifier","clob","close","coalesce","collate","collect","column","commit","condition","connect","constraint","contains","convert","copy","corr","corresponding","cos","cosh","count","covar_pop","covar_samp","create","cross","cube","cume_dist","current","current_catalog","current_date","current_default_transform_group","current_path","current_role","current_row","current_schema","current_time","current_timestamp","current_path","current_role","current_transform_group_for_type","current_user","cursor","cycle","date","day","deallocate","dec","decimal","decfloat","declare","default","define","delete","dense_rank","deref","describe","deterministic","disconnect","distinct","double","drop","dynamic","each","element","else","empty","end","end_frame","end_partition","end-exec","equals","escape","every","except","exec","execute","exists","exp","external","extract","false","fetch","filter","first_value","float","floor","for","foreign","frame_row","free","from","full","function","fusion","get","global","grant","group","grouping","groups","having","hold","hour","identity","in","indicator","initial","inner","inout","insensitive","insert","int","integer","intersect","intersection","interval","into","is","join","json_array","json_arrayagg","json_exists","json_object","json_objectagg","json_query","json_table","json_table_primitive","json_value","lag","language","large","last_value","lateral","lead","leading","left","like","like_regex","listagg","ln","local","localtime","localtimestamp","log","log10","lower","match","match_number","match_recognize","matches","max","member","merge","method","min","minute","mod","modifies","module","month","multiset","national","natural","nchar","nclob","new","no","none","normalize","not","nth_value","ntile","null","nullif","numeric","octet_length","occurrences_regex","of","offset","old","omit","on","one","only","open","or","order","out","outer","over","overlaps","overlay","parameter","partition","pattern","per","percent","percent_rank","percentile_cont","percentile_disc","period","portion","position","position_regex","power","precedes","precision","prepare","primary","procedure","ptf","range","rank","reads","real","recursive","ref","references","referencing","regr_avgx","regr_avgy","regr_count","regr_intercept","regr_r2","regr_slope","regr_sxx","regr_sxy","regr_syy","release","result","return","returns","revoke","right","rollback","rollup","row","row_number","rows","running","savepoint","scope","scroll","search","second","seek","select","sensitive","session_user","set","show","similar","sin","sinh","skip","smallint","some","specific","specifictype","sql","sqlexception","sqlstate","sqlwarning","sqrt","start","static","stddev_pop","stddev_samp","submultiset","subset","substring","substring_regex","succeeds","sum","symmetric","system","system_time","system_user","table","tablesample","tan","tanh","then","time","timestamp","timezone_hour","timezone_minute","to","trailing","translate","translate_regex","translation","treat","trigger","trim","trim_array","true","truncate","uescape","union","unique","unknown","unnest","update","upper","user","using","value","values","value_of","var_pop","var_samp","varbinary","varchar","varying","versioning","when","whenever","where","width_bucket","window","with","within","without","year","add","asc","collation","desc","final","first","last","view",].filter(e=>!r.includes(e)),c={begin:n.concat(/\b/,n.either(...l),/\s*\(/),relevance:0,keywords:{built_in:l}};return{name:"SQL",case_insensitive:!0,illegal:/[{}]|<\//,keywords:{$pattern:/\b[\w\.]+/,keyword:((e,{exceptions:n,when:t}={})=>{let a=t;return n=n||[],e.map(e=>e.match(/\|\d+$/)||n.includes(e)?e:a(e)?e+"|0":e)})(o,{when:e=>e.length<3}),literal:a,type:i,built_in:["current_catalog","current_date","current_default_transform_group","current_path","current_role","current_schema","current_transform_group_for_type","current_user","session_user","system_time","system_user","current_time","localtime","current_timestamp","localtimestamp",]},contains:[{begin:n.either(...s),relevance:0,keywords:{$pattern:/[\w\.]+/,keyword:o.concat(s),literal:a,type:i}},{className:"type",begin:n.either("double precision","large object","with timezone","without timezone")},c,{className:"variable",begin:/@[a-z0-9]+/},{className:"string",variants:[{begin:/'/,end:/'/,contains:[{begin:/''/}]},]},{begin:/"/,end:/"/,contains:[{begin:/""/},]},e.C_NUMBER_MODE,e.C_BLOCK_COMMENT_MODE,t,{className:"operator",begin:/[-+*/=%^~]|&&?|\|\|?|!=?|<(?:=>?|<|>)?|>[>=]?/,relevance:0},]}},grmr_swift(e){let n={match:/\s+/,relevance:0},t=e.COMMENT("/\\*","\\*/",{contains:["self"]}),a=[e.C_LINE_COMMENT_MODE,t],i={match:[/\./,p(...e8,...eh)],className:{2:"keyword"}},r={match:m(/\./,p(...eE)),relevance:0},s=eE.filter(e=>"string"==typeof e).concat(["_|0"]),l={variants:[{className:"keyword",match:p(...eE.filter(e=>"string"!=typeof e).concat(ef).map(ep),...eh)},]},o={$pattern:p(/\b\w+/,/#\w+/),keyword:s.concat(eN),literal:e$},c=[i,r,l],d=[{match:m(/\./,p(...ew)),relevance:0},{className:"built_in",match:m(/\b/,p(...ew),/(?=\()/)},],u={match:/->/,relevance:0},b=[u,{className:"operator",relevance:0,variants:[{match:ek},{match:`\\.(\\.|${ex})+`}]},],h="([0-9a-fA-F]_*)+",f={className:"number",relevance:0,variants:[{match:"\\b(([0-9]_*)+)(\\.(([0-9]_*)+))?([eE][+-]?(([0-9]_*)+))?\\b"},{match:`\\b0x(${h})(\\.(${h}))?([pP][+-]?(([0-9]_*)+))?\\b`},{match:/\b0o([0-7]_*)+\b/},{match:/\b0b([01]_*)+\b/},]},E=(e="")=>({className:"subst",variants:[{match:m(/\\/,e,/[0\\tnr"']/)},{match:m(/\\/,e,/u\{[0-9a-fA-F]{1,8}\}/)},]}),$=(e="")=>({className:"subst",match:m(/\\/,e,/[\t ]*(?:[\r\n]|\r\n)/)}),y=(e="")=>({className:"subst",label:"interpol",begin:m(/\\/,e,/\(/),end:/\)/}),N=(e="")=>({begin:m(e,/"""/),end:m(/"""/,e),contains:[E(e),$(e),y(e)]}),w=(e="")=>({begin:m(e,/"/),end:m(/"/,e),contains:[E(e),y(e)]}),v={className:"string",variants:[N(),N("#"),N("##"),N("###"),w(),w("#"),w("##"),w("###"),]},x={match:m(/`/,eS,/`/)},k=[x,{className:"variable",match:/\$\d+/},{className:"variable",match:`\\$${eO}+`},],M=[{match:/(@|#(un)?)available/,className:"keyword",starts:{contains:[{begin:/\(/,end:/\)/,keywords:eT,contains:[...b,f,v]},]}},{className:"keyword",match:m(/@/,p(...eC))},{className:"meta",match:m(/@/,eS)},],O={match:g(/\b[A-Z]/),relevance:0,contains:[{className:"type",match:m(/(AV|CA|CF|CG|CI|CL|CM|CN|CT|MK|MP|MTK|MTL|NS|SCN|SK|UI|WK|XC)/,eO,"+")},{className:"type",match:eA,relevance:0},{match:/[?!]+/,relevance:0},{match:/\.\.\./,relevance:0},{match:m(/\s+&\s+/,g(eA)),relevance:0},]};O.contains.push({begin://,keywords:o,contains:[...a,...c,...M,u,O]});let S={begin:/\(/,end:/\)/,relevance:0,keywords:o,contains:["self",{match:m(eS,/\s*:/),keywords:"_|0",relevance:0},...a,...c,...d,...b,f,v,...k,...M,O,]},A={begin://,contains:[...a,O]},C={begin:/\(/,end:/\)/,keywords:o,contains:[{begin:p(g(m(eS,/\s*:/)),g(m(eS,/\s+/,eS,/\s*:/))),end:/:/,relevance:0,contains:[{className:"keyword",match:/\b_\b/},{className:"params",match:eS},]},...a,...c,...b,f,v,...M,O,S,],endsParent:!0,illegal:/["']/},T={match:[/func/,/\s+/,p(x.match,eS,ek)],className:{1:"keyword",3:"title.function"},contains:[A,C,n],illegal:[/\[/,/%/]};for(let R of v.variants){let D=R.contains.find(e=>"interpol"===e.label);D.keywords=o;let I=[...c,...d,...b,f,v,...k];D.contains=[...I,{begin:/\(/,end:/\)/,contains:["self",...I]},]}return{name:"Swift",keywords:o,contains:[...a,T,{match:[/\b(?:subscript|init[?!]?)/,/\s*(?=[<(])/],className:{1:"keyword"},contains:[A,C,n],illegal:/\[|%/},{beginKeywords:"struct protocol class extension enum actor",end:"\\{",excludeEnd:!0,keywords:o,contains:[e.inherit(e.TITLE_MODE,{className:"title.class",begin:/[A-Za-z$_][\u00C0-\u02B80-9A-Za-z$_]*/}),...c,]},{match:[/operator/,/\s+/,ek],className:{1:"keyword",3:"title"}},{begin:[/precedencegroup/,/\s+/,eA],className:{1:"keyword",3:"title"},contains:[O],keywords:[...ey,...e$],end:/}/},{beginKeywords:"import",end:/$/,contains:[...a],relevance:0},...c,...d,...b,f,v,...k,...M,O,S,]}},grmr_typescript(e){let n=em(e),t=["any","void","number","boolean","string","object","never","symbol","bigint","unknown",],a={beginKeywords:"namespace",end:/\{/,excludeEnd:!0,contains:[n.exports.CLASS_REFERENCE]},i={beginKeywords:"interface",end:/\{/,excludeEnd:!0,keywords:{keyword:"interface extends",built_in:t},contains:[n.exports.CLASS_REFERENCE]},r={$pattern:es,keyword:el.concat(["type","namespace","interface","public","private","protected","implements","declare","abstract","readonly","enum","override",]),literal:eo,built_in:eb.concat(t),"variable.language":eu},s={className:"meta",begin:"@[A-Za-z$_][0-9A-Za-z$_]*"},l=(e,n,t)=>{let a=e.contains.findIndex(e=>e.label===n);if(-1===a)throw Error("can not find mode to replace");e.contains.splice(a,1,t)};return Object.assign(n.keywords,r),n.exports.PARAMS_CONTAINS.push(s),n.contains=n.contains.concat([s,a,i]),l(n,"shebang",e.SHEBANG()),l(n,"use_strict",{className:"meta",relevance:10,begin:/^\s*['"]use strict['"]/}),n.contains.find(e=>"func.def"===e.label).relevance=0,Object.assign(n,{name:"TypeScript",aliases:["ts","tsx"]}),n},grmr_vbnet(e){let n=e.regex,t=/\d{1,2}\/\d{1,2}\/\d{4}/,a=/\d{4}-\d{1,2}-\d{1,2}/,i=/(\d|1[012])(:\d+){0,2} *(AM|PM)/,r=/\d{1,2}(:\d{1,2}){1,2}/,s={className:"literal",variants:[{begin:n.concat(/# */,n.either(a,t),/ *#/)},{begin:n.concat(/# */,r,/ *#/)},{begin:n.concat(/# */,i,/ *#/)},{begin:n.concat(/# */,n.either(a,t),/ +/,n.either(i,r),/ *#/)},]},l=e.COMMENT(/'''/,/$/,{contains:[{className:"doctag",begin:/<\/?/,end:/>/}]}),o=e.COMMENT(null,/$/,{variants:[{begin:/'/},{begin:/([\t ]|^)REM(?=\s)/}]});return{name:"Visual Basic .NET",aliases:["vb"],case_insensitive:!0,classNameAliases:{label:"symbol"},keywords:{keyword:"addhandler alias aggregate ansi as async assembly auto binary by byref byval call case catch class compare const continue custom declare default delegate dim distinct do each equals else elseif end enum erase error event exit explicit finally for friend from function get global goto group handles if implements imports in inherits interface into iterator join key let lib loop me mid module mustinherit mustoverride mybase myclass namespace narrowing new next notinheritable notoverridable of off on operator option optional order overloads overridable overrides paramarray partial preserve private property protected public raiseevent readonly redim removehandler resume return select set shadows shared skip static step stop structure strict sub synclock take text then throw to try unicode until using when where while widening with withevents writeonly yield",built_in:"addressof and andalso await directcast gettype getxmlnamespace is isfalse isnot istrue like mod nameof new not or orelse trycast typeof xor cbool cbyte cchar cdate cdbl cdec cint clng cobj csbyte cshort csng cstr cuint culng cushort",type:"boolean byte char date decimal double integer long object sbyte short single string uinteger ulong ushort",literal:"true false nothing"},illegal:"//|\\{|\\}|endif|gosub|variant|wend|^\\$ ",contains:[{className:"string",begin:/"(""|[^/n])"C\b/},{className:"string",begin:/"/,end:/"/,illegal:/\n/,contains:[{begin:/""/}]},s,{className:"number",relevance:0,variants:[{begin:/\b\d[\d_]*((\.[\d_]+(E[+-]?[\d_]+)?)|(E[+-]?[\d_]+))[RFD@!#]?/},{begin:/\b\d[\d_]*((U?[SIL])|[%&])?/},{begin:/&H[\dA-F_]+((U?[SIL])|[%&])?/},{begin:/&O[0-7_]+((U?[SIL])|[%&])?/},{begin:/&B[01_]+((U?[SIL])|[%&])?/},]},{className:"label",begin:/^\w+:/},l,o,{className:"meta",begin:/[\t ]*#(const|disable|else|elseif|enable|end|externalsource|if|region)\b/,end:/$/,keywords:{keyword:"const disable else elseif enable end externalsource if region then"},contains:[o]},]}},grmr_wasm(e){e.regex;let n=e.COMMENT(/\(;/,/;\)/);return n.contains.push("self"),{name:"WebAssembly",keywords:{$pattern:/[\w.]+/,keyword:["anyfunc","block","br","br_if","br_table","call","call_indirect","data","drop","elem","else","end","export","func","global.get","global.set","local.get","local.set","local.tee","get_global","get_local","global","if","import","local","loop","memory","memory.grow","memory.size","module","mut","nop","offset","param","result","return","select","set_global","set_local","start","table","tee_local","then","type","unreachable",]},contains:[e.COMMENT(/;;/,/$/),n,{match:[/(?:offset|align)/,/\s*/,/=/],className:{1:"keyword",3:"operator"}},{className:"variable",begin:/\$[\w_]+/},{match:/(\((?!;)|\))+/,className:"punctuation",relevance:0},{begin:[/(?:func|call|call_indirect)/,/\s+/,/\$[^\s)]+/],className:{1:"keyword",3:"title.function"}},e.QUOTE_STRING_MODE,{match:/(i32|i64|f32|f64)(?!\.)/,className:"type"},{className:"keyword",match:/\b(f32|f64|i32|i64)(?:\.(?:abs|add|and|ceil|clz|const|convert_[su]\/i(?:32|64)|copysign|ctz|demote\/f64|div(?:_[su])?|eqz?|extend_[su]\/i32|floor|ge(?:_[su])?|gt(?:_[su])?|le(?:_[su])?|load(?:(?:8|16|32)_[su])?|lt(?:_[su])?|max|min|mul|nearest|neg?|or|popcnt|promote\/f32|reinterpret\/[fi](?:32|64)|rem_[su]|rot[lr]|shl|shr_[su]|store(?:8|16|32)?|sqrt|sub|trunc(?:_[su]\/f(?:32|64))?|wrap\/i64|xor))\b/},{className:"number",relevance:0,match:/[+-]?\b(?:\d(?:_?\d)*(?:\.\d(?:_?\d)*)?(?:[eE][+-]?\d(?:_?\d)*)?|0x[\da-fA-F](?:_?[\da-fA-F])*(?:\.[\da-fA-F](?:_?[\da-fA-D])*)?(?:[pP][+-]?\d(?:_?\d)*)?)\b|\binf\b|\bnan(?::0x[\da-fA-F](?:_?[\da-fA-D])*)?\b/},]}},grmr_yaml(e){let n="true false yes no null",t="[\\w#;/?:@&=+$,.~*'()[\\]]+",a={className:"string",relevance:0,variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/\S+/},],contains:[e.BACKSLASH_ESCAPE,{className:"template-variable",variants:[{begin:/\{\{/,end:/\}\}/},{begin:/%\{/,end:/\}/},]},]},i=e.inherit(a,{variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/[^\s,{}[\]]+/},]}),r={end:",",endsWithParent:!0,excludeEnd:!0,keywords:n,relevance:0},s=[{className:"attr",variants:[{begin:"\\w[\\w :\\/.-]*:(?=[ ]|$)"},{begin:'"\\w[\\w :\\/.-]*":(?=[ ]|$)'},{begin:"'\\w[\\w :\\/.-]*':(?=[ ]|$)"},]},{className:"meta",begin:"^---\\s*$",relevance:10},{className:"string",begin:"[\\|>]([1-9]?[+-])?[ ]*\\n( +)[^ ][^\\n]*\\n(\\2[^\\n]+\\n?)*"},{begin:"<%[%=-]?",end:"[%-]?%>",subLanguage:"ruby",excludeBegin:!0,excludeEnd:!0,relevance:0},{className:"type",begin:"!\\w+!"+t},{className:"type",begin:"!<"+t+">"},{className:"type",begin:"!"+t},{className:"type",begin:"!!"+t},{className:"meta",begin:"&"+e.UNDERSCORE_IDENT_RE+"$"},{className:"meta",begin:"\\*"+e.UNDERSCORE_IDENT_RE+"$"},{className:"bullet",begin:"-(?=[ ]|$)",relevance:0},e.HASH_COMMENT_MODE,{beginKeywords:n,keywords:{literal:n}},{className:"number",begin:"\\b[0-9]{4}(-[0-9][0-9]){0,2}([Tt \\t][0-9][0-9]?(:[0-9][0-9]){2})?(\\.[0-9]*)?([ \\t])*(Z|[-+][0-9][0-9]?(:[0-9][0-9])?)?\\b"},{className:"number",begin:e.C_NUMBER_RE+"\\b",relevance:0},{begin:/\{/,end:/\}/,contains:[r],illegal:"\\n",relevance:0},{begin:"\\[",end:"\\]",contains:[r],illegal:"\\n",relevance:0},a,],l=[...s];return l.pop(),l.push(i),r.contains=l,{name:"YAML",case_insensitive:!0,aliases:["yml"],contains:s}}});let eD=Q;for(let eI of Object.keys(eR)){let eL=eI.replace("grmr_","").replace("_","-");eD.registerLanguage(eL,eR[eI])}return eD}();"object"==typeof exports&&"undefined"!=typeof module&&(module.exports=hljs); \ No newline at end of file diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/models.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/models.ts deleted file mode 100644 index 181288d8d6a2cc1a852c9087dcbb8a3392e70051..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/models.ts +++ /dev/null @@ -1,4 +0,0 @@ -import type { Model } from "$lib/types/Model"; - -export const findCurrentModel = (models: Model[], id?: string) => - models.find((m) => m.id === id) ?? models[0]; diff --git a/spaces/AgentVerse/agentVerse/agentverse/agents/tasksolving_agent/evaluator.py b/spaces/AgentVerse/agentVerse/agentverse/agents/tasksolving_agent/evaluator.py deleted file mode 100644 index abd40642b8dd04397f74b73a420588affad0556f..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/agentverse/agents/tasksolving_agent/evaluator.py +++ /dev/null @@ -1,86 +0,0 @@ -from __future__ import annotations - -import asyncio -from colorama import Fore - -from agentverse.logging import get_logger -import bdb -from string import Template -from typing import TYPE_CHECKING, List, Tuple - -from agentverse.message import EvaluatorMessage, Message - -from agentverse.agents import agent_registry -from agentverse.agents.base import BaseAgent - - -logger = get_logger() - - -@agent_registry.register("evaluator") -class EvaluatorAgent(BaseAgent): - def step( - self, - solution: str, - result: str, - task_description: str, - all_role_description: str, - ) -> EvaluatorMessage: - logger.debug("", self.name, Fore.MAGENTA) - prepend_prompt, append_prompt = self.get_all_prompts( - solution=solution, - result=result, - task_description=task_description, - all_role_description=all_role_description, - ) - history = self.memory.to_messages(self.name) - parsed_response = None - for i in range(self.max_retry): - try: - response = self.llm.generate_response( - prepend_prompt, history, append_prompt - ) - parsed_response = self.output_parser.parse(response) - break - except (KeyboardInterrupt, bdb.BdbQuit): - raise - except Exception as e: - logger.error(e) - logger.warn("Retrying...") - continue - - if parsed_response is None: - logger.error(f"{self.name} failed to generate valid response.") - message = EvaluatorMessage( - sender=self.name, - sender_agent=self, - score=parsed_response[0] if parsed_response is not None else 0, - advice=parsed_response[1] if parsed_response is not None else "", - ) - return message - # return parsed_response - - async def astep(self, solution: str) -> EvaluatorMessage: - """Asynchronous version of step""" - pass - - def _fill_prompt_template(self, solution: str, task_description: str) -> str: - """Fill the placeholders in the prompt template - - In the role_assigner agent, three placeholders are supported: - - ${task_description} - - ${solution} - """ - input_arguments = { - "task_description": task_description, - "solution": solution, - } - return Template(self.prompt_template).safe_substitute(input_arguments) - - def add_message_to_memory(self, messages: List[Message]) -> None: - self.memory.add_message(messages) - - def reset(self) -> None: - """Reset the agent""" - self.memory.reset() - # TODO: reset receiver diff --git a/spaces/AlexWang/lama/fetch_data/places_standard_train_prepare.sh b/spaces/AlexWang/lama/fetch_data/places_standard_train_prepare.sh deleted file mode 100644 index b5389e7096bade08526162733658e221808716fd..0000000000000000000000000000000000000000 --- a/spaces/AlexWang/lama/fetch_data/places_standard_train_prepare.sh +++ /dev/null @@ -1,16 +0,0 @@ -mkdir -p places_standard_dataset/train - -# untar without folder structure -tar -xvf train_large_places365standard.tar --transform='s/.*\///' -C places_standard_dataset/train - -# create location config places.yaml -PWD=$(pwd) -DATASET=${PWD}/places_standard_dataset -PLACES=${PWD}/configs/training/location/places_standard.yaml - -touch $PLACES -echo "# @package _group_" >> $PLACES -echo "data_root_dir: ${DATASET}/" >> $PLACES -echo "out_root_dir: ${PWD}/experiments/" >> $PLACES -echo "tb_dir: ${PWD}/tb_logs/" >> $PLACES -echo "pretrained_models: ${PWD}/" >> $PLACES diff --git a/spaces/Aloento/9Nine-PITS/text/frontend/zh_normalization/num.py b/spaces/Aloento/9Nine-PITS/text/frontend/zh_normalization/num.py deleted file mode 100644 index 74f4595248ff0b51f4abe4ed991742445baf4fc2..0000000000000000000000000000000000000000 --- a/spaces/Aloento/9Nine-PITS/text/frontend/zh_normalization/num.py +++ /dev/null @@ -1,238 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Rules to verbalize numbers into Chinese characters. -https://zh.wikipedia.org/wiki/中文数字#現代中文 -""" -import re -from collections import OrderedDict -from typing import List - -DIGITS = {str(i): tran for i, tran in enumerate('零一二三四五六七八九')} -UNITS = OrderedDict({ - 1: '十', - 2: '百', - 3: '千', - 4: '万', - 8: '亿', -}) - -COM_QUANTIFIERS = '(所|朵|匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|毫|厘|(公)分|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|小时|旬|纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块|元|(亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|美|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)' - -# 分数表达式 -RE_FRAC = re.compile(r'(-?)(\d+)/(\d+)') - - -def replace_frac(match) -> str: - """ - Args: - match (re.Match) - Returns: - str - """ - sign = match.group(1) - nominator = match.group(2) - denominator = match.group(3) - sign: str = "负" if sign else "" - nominator: str = num2str(nominator) - denominator: str = num2str(denominator) - result = f"{sign}{denominator}分之{nominator}" - return result - - -# 百分数表达式 -RE_PERCENTAGE = re.compile(r'(-?)(\d+(\.\d+)?)%') - - -def replace_percentage(match) -> str: - """ - Args: - match (re.Match) - Returns: - str - """ - sign = match.group(1) - percent = match.group(2) - sign: str = "负" if sign else "" - percent: str = num2str(percent) - result = f"{sign}百分之{percent}" - return result - - -# 整数表达式 -# 带负号的整数 -10 -RE_INTEGER = re.compile(r'(-)' r'(\d+)') - - -def replace_negative_num(match) -> str: - """ - Args: - match (re.Match) - Returns: - str - """ - sign = match.group(1) - number = match.group(2) - sign: str = "负" if sign else "" - number: str = num2str(number) - result = f"{sign}{number}" - return result - - -# 编号-无符号整形 -# 00078 -RE_DEFAULT_NUM = re.compile(r'\d{3}\d*') - - -def replace_default_num(match): - """ - Args: - match (re.Match) - Returns: - str - """ - number = match.group(0) - return verbalize_digit(number) - - -# 数字表达式 -# 纯小数 -RE_DECIMAL_NUM = re.compile(r'(-?)((\d+)(\.\d+))' r'|(\.(\d+))') -# 正整数 + 量词 -RE_POSITIVE_QUANTIFIERS = re.compile(r"(\d+)([多余几\+])?" + COM_QUANTIFIERS) -RE_NUMBER = re.compile(r'(-?)((\d+)(\.\d+)?)' r'|(\.(\d+))') - - -def replace_positive_quantifier(match) -> str: - """ - Args: - match (re.Match) - Returns: - str - """ - number = match.group(1) - match_2 = match.group(2) - if match_2 == "+": - match_2 = "多" - match_2: str = match_2 if match_2 else "" - quantifiers: str = match.group(3) - number: str = num2str(number) - result = f"{number}{match_2}{quantifiers}" - return result - - -def replace_number(match) -> str: - """ - Args: - match (re.Match) - Returns: - str - """ - sign = match.group(1) - number = match.group(2) - pure_decimal = match.group(5) - if pure_decimal: - result = num2str(pure_decimal) - else: - sign: str = "负" if sign else "" - number: str = num2str(number) - result = f"{sign}{number}" - return result - - -# 范围表达式 -# match.group(1) and match.group(8) are copy from RE_NUMBER - -RE_RANGE = re.compile( - r'((-?)((\d+)(\.\d+)?)|(\.(\d+)))[-~]((-?)((\d+)(\.\d+)?)|(\.(\d+)))') - - -def replace_range(match) -> str: - """ - Args: - match (re.Match) - Returns: - str - """ - first, second = match.group(1), match.group(8) - first = RE_NUMBER.sub(replace_number, first) - second = RE_NUMBER.sub(replace_number, second) - result = f"{first}到{second}" - return result - - -def _get_value(value_string: str, use_zero: bool = True) -> List[str]: - stripped = value_string.lstrip('0') - if len(stripped) == 0: - return [] - elif len(stripped) == 1: - if use_zero and len(stripped) < len(value_string): - return [DIGITS['0'], DIGITS[stripped]] - else: - return [DIGITS[stripped]] - else: - largest_unit = next( - power for power in reversed(UNITS.keys()) if power < len(stripped)) - first_part = value_string[:-largest_unit] - second_part = value_string[-largest_unit:] - return _get_value(first_part) + [UNITS[largest_unit]] + _get_value( - second_part) - - -def verbalize_cardinal(value_string: str) -> str: - if not value_string: - return '' - - # 000 -> '零' , 0 -> '零' - value_string = value_string.lstrip('0') - if len(value_string) == 0: - return DIGITS['0'] - - result_symbols = _get_value(value_string) - # verbalized number starting with '一十*' is abbreviated as `十*` - if len(result_symbols) >= 2 and result_symbols[0] == DIGITS[ - '1'] and result_symbols[1] == UNITS[1]: - result_symbols = result_symbols[1:] - return ''.join(result_symbols) - - -def verbalize_digit(value_string: str, alt_one=False) -> str: - result_symbols = [DIGITS[digit] for digit in value_string] - result = ''.join(result_symbols) - if alt_one: - result = result.replace("一", "幺") - return result - - -def num2str(value_string: str) -> str: - integer_decimal = value_string.split('.') - if len(integer_decimal) == 1: - integer = integer_decimal[0] - decimal = '' - elif len(integer_decimal) == 2: - integer, decimal = integer_decimal - else: - raise ValueError( - f"The value string: '${value_string}' has more than one point in it." - ) - - result = verbalize_cardinal(integer) - - decimal = decimal.rstrip('0') - if decimal: - # '.22' is verbalized as '零点二二' - # '3.20' is verbalized as '三点二 - result = result if result else "零" - result += '点' + verbalize_digit(decimal) - return result diff --git a/spaces/Amrrs/DragGan-Inversion/torch_utils/misc.py b/spaces/Amrrs/DragGan-Inversion/torch_utils/misc.py deleted file mode 100644 index d67d234396ca97b72d8549184fd1d2252bab466d..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/torch_utils/misc.py +++ /dev/null @@ -1,295 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import re -import contextlib -import numpy as np -import torch -import warnings -import dnnlib - -# ---------------------------------------------------------------------------- -# Cached construction of constant tensors. Avoids CPU=>GPU copy when the -# same constant is used multiple times. - -_constant_cache = dict() - - -def constant(value, shape=None, dtype=None, device=None, memory_format=None): - value = np.asarray(value) - if shape is not None: - shape = tuple(shape) - if dtype is None: - dtype = torch.get_default_dtype() - if device is None: - device = torch.device('cpu') - if memory_format is None: - memory_format = torch.contiguous_format - - key = (value.shape, value.dtype, value.tobytes(), - shape, dtype, device, memory_format) - tensor = _constant_cache.get(key, None) - if tensor is None: - tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device) - if shape is not None: - tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape)) - tensor = tensor.contiguous(memory_format=memory_format) - _constant_cache[key] = tensor - return tensor - -# ---------------------------------------------------------------------------- -# Replace NaN/Inf with specified numerical values. - - -try: - nan_to_num = torch.nan_to_num # 1.8.0a0 -except AttributeError: - def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin - assert isinstance(input, torch.Tensor) - if posinf is None: - posinf = torch.finfo(input.dtype).max - if neginf is None: - neginf = torch.finfo(input.dtype).min - assert nan == 0 - return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out) - -# ---------------------------------------------------------------------------- -# Symbolic assert. - -try: - symbolic_assert = torch._assert # 1.8.0a0 # pylint: disable=protected-access -except AttributeError: - symbolic_assert = torch.Assert # 1.7.0 - -# ---------------------------------------------------------------------------- -# Context manager to temporarily suppress known warnings in torch.jit.trace(). -# Note: Cannot use catch_warnings because of https://bugs.python.org/issue29672 - - -@contextlib.contextmanager -def suppress_tracer_warnings(): - flt = ('ignore', None, torch.jit.TracerWarning, None, 0) - warnings.filters.insert(0, flt) - yield - warnings.filters.remove(flt) - -# ---------------------------------------------------------------------------- -# Assert that the shape of a tensor matches the given list of integers. -# None indicates that the size of a dimension is allowed to vary. -# Performs symbolic assertion when used in torch.jit.trace(). - - -def assert_shape(tensor, ref_shape): - if tensor.ndim != len(ref_shape): - raise AssertionError( - f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}') - for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)): - if ref_size is None: - pass - elif isinstance(ref_size, torch.Tensor): - with suppress_tracer_warnings(): # as_tensor results are registered as constants - symbolic_assert(torch.equal(torch.as_tensor( - size), ref_size), f'Wrong size for dimension {idx}') - elif isinstance(size, torch.Tensor): - with suppress_tracer_warnings(): # as_tensor results are registered as constants - symbolic_assert(torch.equal(size, torch.as_tensor( - ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}') - elif size != ref_size: - raise AssertionError( - f'Wrong size for dimension {idx}: got {size}, expected {ref_size}') - -# ---------------------------------------------------------------------------- -# Function decorator that calls torch.autograd.profiler.record_function(). - - -def profiled_function(fn): - def decorator(*args, **kwargs): - with torch.autograd.profiler.record_function(fn.__name__): - return fn(*args, **kwargs) - decorator.__name__ = fn.__name__ - return decorator - -# ---------------------------------------------------------------------------- -# Sampler for torch.utils.data.DataLoader that loops over the dataset -# indefinitely, shuffling items as it goes. - - -class InfiniteSampler(torch.utils.data.Sampler): - def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5): - assert len(dataset) > 0 - assert num_replicas > 0 - assert 0 <= rank < num_replicas - assert 0 <= window_size <= 1 - super().__init__(dataset) - self.dataset = dataset - self.rank = rank - self.num_replicas = num_replicas - self.shuffle = shuffle - self.seed = seed - self.window_size = window_size - - def __iter__(self): - order = np.arange(len(self.dataset)) - rnd = None - window = 0 - if self.shuffle: - rnd = np.random.RandomState(self.seed) - rnd.shuffle(order) - window = int(np.rint(order.size * self.window_size)) - - idx = 0 - while True: - i = idx % order.size - if idx % self.num_replicas == self.rank: - yield order[i] - if window >= 2: - j = (i - rnd.randint(window)) % order.size - order[i], order[j] = order[j], order[i] - idx += 1 - -# ---------------------------------------------------------------------------- -# Utilities for operating with torch.nn.Module parameters and buffers. - - -def params_and_buffers(module): - assert isinstance(module, torch.nn.Module) - return list(module.parameters()) + list(module.buffers()) - - -def named_params_and_buffers(module): - assert isinstance(module, torch.nn.Module) - return list(module.named_parameters()) + list(module.named_buffers()) - - -def copy_params_and_buffers(src_module, dst_module, require_all=False): - assert isinstance(src_module, torch.nn.Module) - assert isinstance(dst_module, torch.nn.Module) - src_tensors = dict(named_params_and_buffers(src_module)) - for name, tensor in named_params_and_buffers(dst_module): - assert (name in src_tensors) or (not require_all) - if name in src_tensors: - tensor.copy_(src_tensors[name].detach()).requires_grad_( - tensor.requires_grad) - -# ---------------------------------------------------------------------------- -# Context manager for easily enabling/disabling DistributedDataParallel -# synchronization. - - -@contextlib.contextmanager -def ddp_sync(module, sync): - assert isinstance(module, torch.nn.Module) - if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel): - yield - else: - with module.no_sync(): - yield - -# ---------------------------------------------------------------------------- -# Check DistributedDataParallel consistency across processes. - - -def check_ddp_consistency(module, ignore_regex=None): - assert isinstance(module, torch.nn.Module) - for name, tensor in named_params_and_buffers(module): - fullname = type(module).__name__ + '.' + name - if ignore_regex is not None and re.fullmatch(ignore_regex, fullname): - continue - tensor = tensor.detach() - if tensor.is_floating_point(): - tensor = nan_to_num(tensor) - other = tensor.clone() - torch.distributed.broadcast(tensor=other, src=0) - assert (tensor == other).all(), fullname - -# ---------------------------------------------------------------------------- -# Print summary table of module hierarchy. - - -def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True): - assert isinstance(module, torch.nn.Module) - assert not isinstance(module, torch.jit.ScriptModule) - assert isinstance(inputs, (tuple, list)) - - # Register hooks. - entries = [] - nesting = [0] - - def pre_hook(_mod, _inputs): - nesting[0] += 1 - - def post_hook(mod, _inputs, outputs): - nesting[0] -= 1 - if nesting[0] <= max_nesting: - outputs = list(outputs) if isinstance( - outputs, (tuple, list)) else [outputs] - outputs = [t for t in outputs if isinstance(t, torch.Tensor)] - entries.append(dnnlib.EasyDict(mod=mod, outputs=outputs)) - hooks = [mod.register_forward_pre_hook( - pre_hook) for mod in module.modules()] - hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()] - - # Run module. - outputs = module(*inputs) - for hook in hooks: - hook.remove() - - # Identify unique outputs, parameters, and buffers. - tensors_seen = set() - for e in entries: - e.unique_params = [ - t for t in e.mod.parameters() if id(t) not in tensors_seen] - e.unique_buffers = [ - t for t in e.mod.buffers() if id(t) not in tensors_seen] - e.unique_outputs = [t for t in e.outputs if id(t) not in tensors_seen] - tensors_seen |= {id(t) for t in e.unique_params + - e.unique_buffers + e.unique_outputs} - - # Filter out redundant entries. - if skip_redundant: - entries = [e for e in entries if len(e.unique_params) or len( - e.unique_buffers) or len(e.unique_outputs)] - - # Construct table. - rows = [[type(module).__name__, 'Parameters', - 'Buffers', 'Output shape', 'Datatype']] - rows += [['---'] * len(rows[0])] - param_total = 0 - buffer_total = 0 - submodule_names = {mod: name for name, mod in module.named_modules()} - for e in entries: - name = '' if e.mod is module else submodule_names[e.mod] - param_size = sum(t.numel() for t in e.unique_params) - buffer_size = sum(t.numel() for t in e.unique_buffers) - output_shapes = [str(list(t.shape)) for t in e.outputs] - output_dtypes = [str(t.dtype).split('.')[-1] for t in e.outputs] - rows += [[ - name + (':0' if len(e.outputs) >= 2 else ''), - str(param_size) if param_size else '-', - str(buffer_size) if buffer_size else '-', - (output_shapes + ['-'])[0], - (output_dtypes + ['-'])[0], - ]] - for idx in range(1, len(e.outputs)): - rows += [[name + f':{idx}', '-', '-', - output_shapes[idx], output_dtypes[idx]]] - param_total += param_size - buffer_total += buffer_size - rows += [['---'] * len(rows[0])] - rows += [['Total', str(param_total), str(buffer_total), '-', '-']] - - # Print table. - widths = [max(len(cell) for cell in column) for column in zip(*rows)] - print() - for row in rows: - print(' '.join(cell + ' ' * (width - len(cell)) - for cell, width in zip(row, widths))) - print() - return outputs - -# ---------------------------------------------------------------------------- diff --git a/spaces/Amrrs/DragGan-Inversion/visualizer_drag_gradio.py b/spaces/Amrrs/DragGan-Inversion/visualizer_drag_gradio.py deleted file mode 100644 index a4e14e9b81e21325a38e99064a755b24f15afac4..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/visualizer_drag_gradio.py +++ /dev/null @@ -1,934 +0,0 @@ -# https://huggingface.co/DragGan/DragGan-Models -# https://arxiv.org/abs/2305.10973 -import os -import os.path as osp -from argparse import ArgumentParser -from functools import partial -from pathlib import Path -import time - -import psutil - -import gradio as gr -import numpy as np -import torch -from PIL import Image - -import dnnlib -from gradio_utils import (ImageMask, draw_mask_on_image, draw_points_on_image, - get_latest_points_pair, get_valid_mask, - on_change_single_global_state) -from viz.renderer import Renderer, add_watermark_np - - -# download models from Hugging Face hub -from huggingface_hub import snapshot_download - -model_dir = Path('./checkpoints') -snapshot_download('DragGan/DragGan-Models', - repo_type='model', local_dir=model_dir) - -cache_dir = model_dir - -device = 'cuda' -IS_SPACE = "DragGan/DragGan" in os.environ.get('SPACE_ID', '') -TIMEOUT = 80 - - -def reverse_point_pairs(points): - new_points = [] - for p in points: - new_points.append([p[1], p[0]]) - return new_points - - -def clear_state(global_state, target=None): - """Clear target history state from global_state - If target is not defined, points and mask will be both removed. - 1. set global_state['points'] as empty dict - 2. set global_state['mask'] as full-one mask. - """ - if target is None: - target = ['point', 'mask'] - if not isinstance(target, list): - target = [target] - if 'point' in target: - global_state['points'] = dict() - print('Clear Points State!') - if 'mask' in target: - image_raw = global_state["images"]["image_raw"] - global_state['mask'] = np.ones((image_raw.size[1], image_raw.size[0]), - dtype=np.uint8) - print('Clear mask State!') - - return global_state - - -def init_images(global_state): - """This function is called only ones with Gradio App is started. - 0. pre-process global_state, unpack value from global_state of need - 1. Re-init renderer - 2. run `renderer._render_drag_impl` with `is_drag=False` to generate - new image - 3. Assign images to global state and re-generate mask - """ - - if isinstance(global_state, gr.State): - state = global_state.value - else: - state = global_state - - state['renderer'].init_network( - state['generator_params'], # res - valid_checkpoints_dict[state['pretrained_weight']], # pkl - state['params']['seed'], # w0_seed, - None, # w_load - state['params']['latent_space'] == 'w+', # w_plus - 'const', - state['params']['trunc_psi'], # trunc_psi, - state['params']['trunc_cutoff'], # trunc_cutoff, - None, # input_transform - state['params']['lr'] # lr, - ) - - state['renderer']._render_drag_impl(state['generator_params'], - is_drag=False, - to_pil=True) - - init_image = state['generator_params'].image - state['images']['image_orig'] = init_image - state['images']['image_raw'] = init_image - state['images']['image_show'] = Image.fromarray( - add_watermark_np(np.array(init_image))) - state['mask'] = np.ones((init_image.size[1], init_image.size[0]), - dtype=np.uint8) - return global_state - - -def update_image_draw(image, points, mask, show_mask, global_state=None): - - image_draw = draw_points_on_image(image, points) - if show_mask and mask is not None and not (mask == 0).all() and not ( - mask == 1).all(): - image_draw = draw_mask_on_image(image_draw, mask) - - image_draw = Image.fromarray(add_watermark_np(np.array(image_draw))) - if global_state is not None: - global_state['images']['image_show'] = image_draw - return image_draw - - -def preprocess_mask_info(global_state, image): - """Function to handle mask information. - 1. last_mask is None: Do not need to change mask, return mask - 2. last_mask is not None: - 2.1 global_state is remove_mask: - 2.2 global_state is add_mask: - """ - if isinstance(image, dict): - last_mask = get_valid_mask(image['mask']) - else: - last_mask = None - mask = global_state['mask'] - - # mask in global state is a placeholder with all 1. - if (mask == 1).all(): - mask = last_mask - - # last_mask = global_state['last_mask'] - editing_mode = global_state['editing_state'] - - if last_mask is None: - return global_state - - if editing_mode == 'remove_mask': - updated_mask = np.clip(mask - last_mask, 0, 1) - print(f'Last editing_state is {editing_mode}, do remove.') - elif editing_mode == 'add_mask': - updated_mask = np.clip(mask + last_mask, 0, 1) - print(f'Last editing_state is {editing_mode}, do add.') - else: - updated_mask = mask - print(f'Last editing_state is {editing_mode}, ' - 'do nothing to mask.') - - global_state['mask'] = updated_mask - # global_state['last_mask'] = None # clear buffer - return global_state - - -def print_memory_usage(): - # Print system memory usage - print(f"System memory usage: {psutil.virtual_memory().percent}%") - - # Print GPU memory usage - if torch.cuda.is_available(): - device = torch.device("cuda") - print(f"GPU memory usage: {torch.cuda.memory_allocated() / 1e9} GB") - print( - f"Max GPU memory usage: {torch.cuda.max_memory_allocated() / 1e9} GB") - device_properties = torch.cuda.get_device_properties(device) - available_memory = device_properties.total_memory - \ - torch.cuda.max_memory_allocated() - print(f"Available GPU memory: {available_memory / 1e9} GB") - else: - print("No GPU available") - - -# filter large models running on SPACES -allowed_checkpoints = [] # all checkpoints -if IS_SPACE: - allowed_checkpoints = ["stylegan_human_v2_512.pkl", - "stylegan2_dogs_1024_pytorch.pkl"] - -valid_checkpoints_dict = { - f.name.split('.')[0]: str(f) - for f in Path(cache_dir).glob('*.pkl') - if f.name in allowed_checkpoints or not IS_SPACE -} -print('Valid checkpoint file:') -print(valid_checkpoints_dict) - -init_pkl = 'stylegan_human_v2_512' - -with gr.Blocks() as app: - gr.Markdown(""" -# DragGAN - Drag Your GAN -## Interactive Point-based Manipulation on the Generative Image Manifold -### Unofficial Gradio Demo - -**Due to high demand, only one model can be run at a time, or you can duplicate the space and run your own copy.** - - -Duplicate Space for no queue on your own hardware.

    - -* Official Repo: [XingangPan](https://github.com/XingangPan/DragGAN) -* Gradio Demo by: [LeoXing1996](https://github.com/LeoXing1996) © [OpenMMLab MMagic](https://github.com/open-mmlab/mmagic) -""") - - # renderer = Renderer() - global_state = gr.State({ - "images": { - # image_orig: the original image, change with seed/model is changed - # image_raw: image with mask and points, change durning optimization - # image_show: image showed on screen - }, - "temporal_params": { - # stop - }, - 'mask': - None, # mask for visualization, 1 for editing and 0 for unchange - 'last_mask': None, # last edited mask - 'show_mask': True, # add button - "generator_params": dnnlib.EasyDict(), - "params": { - "seed": int(np.random.randint(0, 2**32 - 1)), - "motion_lambda": 20, - "r1_in_pixels": 3, - "r2_in_pixels": 12, - "magnitude_direction_in_pixels": 1.0, - "latent_space": "w+", - "trunc_psi": 0.7, - "trunc_cutoff": None, - "lr": 0.001, - }, - "device": device, - "draw_interval": 1, - "renderer": Renderer(disable_timing=True), - "points": {}, - "curr_point": None, - "curr_type_point": "start", - 'editing_state': 'add_points', - 'pretrained_weight': init_pkl - }) - - # init image - global_state = init_images(global_state) - with gr.Row(): - - with gr.Row(): - - # Left --> tools - with gr.Column(scale=3): - - # Pickle - with gr.Row(): - - with gr.Column(scale=1, min_width=10): - gr.Markdown(value='Pickle', show_label=False) - - with gr.Column(scale=4, min_width=10): - form_pretrained_dropdown = gr.Dropdown( - choices=list(valid_checkpoints_dict.keys()), - label="Pretrained Model", - value=init_pkl, - ) - - # Latent - with gr.Row(): - with gr.Column(scale=1, min_width=10): - gr.Markdown(value='Latent', show_label=False) - - with gr.Column(scale=4, min_width=10): - form_seed_number = gr.Slider( - mininium=0, - maximum=2**32-1, - step=1, - value=global_state.value['params']['seed'], - interactive=True, - # randomize=True, - label="Seed", - ) - form_lr_number = gr.Number( - value=global_state.value["params"]["lr"], - interactive=True, - label="Step Size") - - with gr.Row(): - with gr.Column(scale=2, min_width=10): - form_reset_image = gr.Button("Reset Image") - with gr.Column(scale=3, min_width=10): - form_latent_space = gr.Radio( - ['w', 'w+'], - value=global_state.value['params'] - ['latent_space'], - interactive=True, - label='Latent space to optimize', - show_label=False, - ) - - # Drag - with gr.Row(): - with gr.Column(scale=1, min_width=10): - gr.Markdown(value='Drag', show_label=False) - with gr.Column(scale=4, min_width=10): - with gr.Row(): - with gr.Column(scale=1, min_width=10): - enable_add_points = gr.Button('Add Points') - with gr.Column(scale=1, min_width=10): - undo_points = gr.Button('Reset Points') - with gr.Row(): - with gr.Column(scale=1, min_width=10): - form_start_btn = gr.Button("Start") - with gr.Column(scale=1, min_width=10): - form_stop_btn = gr.Button("Stop") - - form_steps_number = gr.Number(value=0, - label="Steps", - interactive=False) - - # Mask - with gr.Row(): - with gr.Column(scale=1, min_width=10): - gr.Markdown(value='Mask', show_label=False) - with gr.Column(scale=4, min_width=10): - enable_add_mask = gr.Button('Edit Flexible Area') - with gr.Row(): - with gr.Column(scale=1, min_width=10): - form_reset_mask_btn = gr.Button("Reset mask") - with gr.Column(scale=1, min_width=10): - show_mask = gr.Checkbox( - label='Show Mask', - value=global_state.value['show_mask'], - show_label=False) - - with gr.Row(): - form_lambda_number = gr.Number( - value=global_state.value["params"] - ["motion_lambda"], - interactive=True, - label="Lambda", - ) - - form_draw_interval_number = gr.Number( - value=global_state.value["draw_interval"], - label="Draw Interval (steps)", - interactive=True, - visible=False) - - # Right --> Image - with gr.Column(scale=8): - form_image = ImageMask( - value=global_state.value['images']['image_show'], - brush_radius=20).style( - width=768, - height=768) # NOTE: hard image size code here. - gr.Markdown(""" - ## Quick Start - - 1. Select desired `Pretrained Model` and adjust `Seed` to generate an - initial image. - 2. Click on image to add control points. - 3. Click `Start` and enjoy it! - - ## Advance Usage - - 1. Change `Step Size` to adjust learning rate in drag optimization. - 2. Select `w` or `w+` to change latent space to optimize: - * Optimize on `w` space may cause greater influence to the image. - * Optimize on `w+` space may work slower than `w`, but usually achieve - better results. - * Note that changing the latent space will reset the image, points and - mask (this has the same effect as `Reset Image` button). - 3. Click `Edit Flexible Area` to create a mask and constrain the - unmasked region to remain unchanged. - - - """) - gr.HTML(""" - -
    - Gradio demo supported by - - OpenMMLab MMagic -
    - """) - # Network & latents tab listeners - - def on_change_pretrained_dropdown(pretrained_value, global_state): - """Function to handle model change. - 1. Set pretrained value to global_state - 2. Re-init images and clear all states - """ - - global_state['pretrained_weight'] = pretrained_value - init_images(global_state) - clear_state(global_state) - - return global_state, global_state["images"]['image_show'] - - form_pretrained_dropdown.change( - on_change_pretrained_dropdown, - inputs=[form_pretrained_dropdown, global_state], - outputs=[global_state, form_image], - queue=True, - ) - - def on_click_reset_image(global_state): - """Reset image to the original one and clear all states - 1. Re-init images - 2. Clear all states - """ - - init_images(global_state) - clear_state(global_state) - - return global_state, global_state['images']['image_show'] - - form_reset_image.click( - on_click_reset_image, - inputs=[global_state], - outputs=[global_state, form_image], - queue=False, - ) - - # Update parameters - def on_change_update_image_seed(seed, global_state): - """Function to handle generation seed change. - 1. Set seed to global_state - 2. Re-init images and clear all states - """ - - global_state["params"]["seed"] = int(seed) - init_images(global_state) - clear_state(global_state) - - return global_state, global_state['images']['image_show'] - - form_seed_number.change( - on_change_update_image_seed, - inputs=[form_seed_number, global_state], - outputs=[global_state, form_image], - ) - - def on_click_latent_space(latent_space, global_state): - """Function to reset latent space to optimize. - NOTE: this function we reset the image and all controls - 1. Set latent-space to global_state - 2. Re-init images and clear all state - """ - - global_state['params']['latent_space'] = latent_space - init_images(global_state) - clear_state(global_state) - - return global_state, global_state['images']['image_show'] - - form_latent_space.change(on_click_latent_space, - inputs=[form_latent_space, global_state], - outputs=[global_state, form_image]) - - # ==== Params - form_lambda_number.change( - partial(on_change_single_global_state, ["params", "motion_lambda"]), - inputs=[form_lambda_number, global_state], - outputs=[global_state], - ) - - def on_change_lr(lr, global_state): - if lr == 0: - print('lr is 0, do nothing.') - return global_state - else: - global_state["params"]["lr"] = lr - renderer = global_state['renderer'] - renderer.update_lr(lr) - print('New optimizer: ') - print(renderer.w_optim) - return global_state - - form_lr_number.change( - on_change_lr, - inputs=[form_lr_number, global_state], - outputs=[global_state], - queue=False, - ) - - def on_click_start(global_state, image): - p_in_pixels = [] - t_in_pixels = [] - valid_points = [] - - # handle of start drag in mask editing mode - global_state = preprocess_mask_info(global_state, image) - - # Prepare the points for the inference - if len(global_state["points"]) == 0: - # yield on_click_start_wo_points(global_state, image) - image_raw = global_state['images']['image_raw'] - update_image_draw( - image_raw, - global_state['points'], - global_state['mask'], - global_state['show_mask'], - global_state, - ) - - yield ( - global_state, - 0, - global_state['images']['image_show'], - # gr.File.update(visible=False), - gr.Button.update(interactive=True), - gr.Button.update(interactive=True), - gr.Button.update(interactive=True), - gr.Button.update(interactive=True), - gr.Button.update(interactive=True), - # latent space - gr.Radio.update(interactive=True), - gr.Button.update(interactive=True), - # NOTE: disable stop button - gr.Button.update(interactive=False), - - # update other comps - gr.Dropdown.update(interactive=True), - gr.Number.update(interactive=True), - gr.Number.update(interactive=True), - gr.Button.update(interactive=True), - gr.Button.update(interactive=True), - gr.Checkbox.update(interactive=True), - # gr.Number.update(interactive=True), - gr.Number.update(interactive=True), - ) - else: - - # Transform the points into torch tensors - for key_point, point in global_state["points"].items(): - try: - p_start = point.get("start_temp", point["start"]) - p_end = point["target"] - - if p_start is None or p_end is None: - continue - - except KeyError: - continue - - p_in_pixels.append(p_start) - t_in_pixels.append(p_end) - valid_points.append(key_point) - - mask = torch.tensor(global_state['mask']).float() - drag_mask = 1 - mask - - renderer: Renderer = global_state["renderer"] - global_state['temporal_params']['stop'] = False - global_state['editing_state'] = 'running' - - # reverse points order - p_to_opt = reverse_point_pairs(p_in_pixels) - t_to_opt = reverse_point_pairs(t_in_pixels) - print('Running with:') - print(f' Source: {p_in_pixels}') - print(f' Target: {t_in_pixels}') - step_idx = 0 - last_time = time.time() - while True: - print_memory_usage() - # add a TIMEOUT break - print(f'Running time: {time.time() - last_time}') - if IS_SPACE and time.time() - last_time > TIMEOUT: - print('Timeout break!') - break - if global_state["temporal_params"]["stop"] or global_state['generator_params']["stop"]: - break - - # do drage here! - renderer._render_drag_impl( - global_state['generator_params'], - p_to_opt, # point - t_to_opt, # target - drag_mask, # mask, - global_state['params']['motion_lambda'], # lambda_mask - reg=0, - feature_idx=5, # NOTE: do not support change for now - r1=global_state['params']['r1_in_pixels'], # r1 - r2=global_state['params']['r2_in_pixels'], # r2 - # random_seed = 0, - # noise_mode = 'const', - trunc_psi=global_state['params']['trunc_psi'], - # force_fp32 = False, - # layer_name = None, - # sel_channels = 3, - # base_channel = 0, - # img_scale_db = 0, - # img_normalize = False, - # untransform = False, - is_drag=True, - to_pil=True) - - if step_idx % global_state['draw_interval'] == 0: - print('Current Source:') - for key_point, p_i, t_i in zip(valid_points, p_to_opt, - t_to_opt): - global_state["points"][key_point]["start_temp"] = [ - p_i[1], - p_i[0], - ] - global_state["points"][key_point]["target"] = [ - t_i[1], - t_i[0], - ] - start_temp = global_state["points"][key_point][ - "start_temp"] - print(f' {start_temp}') - - image_result = global_state['generator_params']['image'] - image_draw = update_image_draw( - image_result, - global_state['points'], - global_state['mask'], - global_state['show_mask'], - global_state, - ) - global_state['images']['image_raw'] = image_result - - yield ( - global_state, - step_idx, - global_state['images']['image_show'], - # gr.File.update(visible=False), - gr.Button.update(interactive=False), - gr.Button.update(interactive=False), - gr.Button.update(interactive=False), - gr.Button.update(interactive=False), - gr.Button.update(interactive=False), - # latent space - gr.Radio.update(interactive=False), - gr.Button.update(interactive=False), - # enable stop button in loop - gr.Button.update(interactive=True), - - # update other comps - gr.Dropdown.update(interactive=False), - gr.Number.update(interactive=False), - gr.Number.update(interactive=False), - gr.Button.update(interactive=False), - gr.Button.update(interactive=False), - gr.Checkbox.update(interactive=False), - # gr.Number.update(interactive=False), - gr.Number.update(interactive=False), - ) - - # increate step - step_idx += 1 - - image_result = global_state['generator_params']['image'] - global_state['images']['image_raw'] = image_result - image_draw = update_image_draw(image_result, - global_state['points'], - global_state['mask'], - global_state['show_mask'], - global_state) - - # fp = NamedTemporaryFile(suffix=".png", delete=False) - # image_result.save(fp, "PNG") - - global_state['editing_state'] = 'add_points' - - yield ( - global_state, - 0, # reset step to 0 after stop. - global_state['images']['image_show'], - # gr.File.update(visible=True, value=fp.name), - gr.Button.update(interactive=True), - gr.Button.update(interactive=True), - gr.Button.update(interactive=True), - gr.Button.update(interactive=True), - gr.Button.update(interactive=True), - # latent space - gr.Radio.update(interactive=True), - gr.Button.update(interactive=True), - # NOTE: disable stop button with loop finish - gr.Button.update(interactive=False), - - # update other comps - gr.Dropdown.update(interactive=True), - gr.Number.update(interactive=True), - gr.Number.update(interactive=True), - gr.Checkbox.update(interactive=True), - gr.Number.update(interactive=True), - ) - - form_start_btn.click( - on_click_start, - inputs=[global_state, form_image], - outputs=[ - global_state, - form_steps_number, - form_image, - # form_download_result_file, - # >>> buttons - form_reset_image, - enable_add_points, - enable_add_mask, - undo_points, - form_reset_mask_btn, - form_latent_space, - form_start_btn, - form_stop_btn, - # <<< buttonm - # >>> inputs comps - form_pretrained_dropdown, - form_seed_number, - form_lr_number, - show_mask, - form_lambda_number, - ], - ) - - def on_click_stop(global_state): - """Function to handle stop button is clicked. - 1. send a stop signal by set global_state["temporal_params"]["stop"] as True - 2. Disable Stop button - """ - global_state["temporal_params"]["stop"] = True - - return global_state, gr.Button.update(interactive=False) - - form_stop_btn.click(on_click_stop, - inputs=[global_state], - outputs=[global_state, form_stop_btn], - queue=False) - - form_draw_interval_number.change( - partial( - on_change_single_global_state, - "draw_interval", - map_transform=lambda x: int(x), - ), - inputs=[form_draw_interval_number, global_state], - outputs=[global_state], - queue=False, - ) - - def on_click_remove_point(global_state): - choice = global_state["curr_point"] - del global_state["points"][choice] - - choices = list(global_state["points"].keys()) - - if len(choices) > 0: - global_state["curr_point"] = choices[0] - - return ( - gr.Dropdown.update(choices=choices, value=choices[0]), - global_state, - ) - - # Mask - def on_click_reset_mask(global_state): - global_state['mask'] = np.ones( - ( - global_state["images"]["image_raw"].size[1], - global_state["images"]["image_raw"].size[0], - ), - dtype=np.uint8, - ) - image_draw = update_image_draw(global_state['images']['image_raw'], - global_state['points'], - global_state['mask'], - global_state['show_mask'], global_state) - return global_state, image_draw - - form_reset_mask_btn.click( - on_click_reset_mask, - inputs=[global_state], - outputs=[global_state, form_image], - ) - - # Image - def on_click_enable_draw(global_state, image): - """Function to start add mask mode. - 1. Preprocess mask info from last state - 2. Change editing state to add_mask - 3. Set curr image with points and mask - """ - global_state = preprocess_mask_info(global_state, image) - global_state['editing_state'] = 'add_mask' - image_raw = global_state['images']['image_raw'] - image_draw = update_image_draw(image_raw, global_state['points'], - global_state['mask'], True, - global_state) - return (global_state, - gr.Image.update(value=image_draw, interactive=True)) - - def on_click_remove_draw(global_state, image): - """Function to start remove mask mode. - 1. Preprocess mask info from last state - 2. Change editing state to remove_mask - 3. Set curr image with points and mask - """ - global_state = preprocess_mask_info(global_state, image) - global_state['edinting_state'] = 'remove_mask' - image_raw = global_state['images']['image_raw'] - image_draw = update_image_draw(image_raw, global_state['points'], - global_state['mask'], True, - global_state) - return (global_state, - gr.Image.update(value=image_draw, interactive=True)) - - enable_add_mask.click(on_click_enable_draw, - inputs=[global_state, form_image], - outputs=[ - global_state, - form_image, - ], - queue=False) - - def on_click_add_point(global_state, image: dict): - """Function switch from add mask mode to add points mode. - 1. Updaste mask buffer if need - 2. Change global_state['editing_state'] to 'add_points' - 3. Set current image with mask - """ - - global_state = preprocess_mask_info(global_state, image) - global_state['editing_state'] = 'add_points' - mask = global_state['mask'] - image_raw = global_state['images']['image_raw'] - image_draw = update_image_draw(image_raw, global_state['points'], mask, - global_state['show_mask'], global_state) - - return (global_state, - gr.Image.update(value=image_draw, interactive=False)) - - enable_add_points.click(on_click_add_point, - inputs=[global_state, form_image], - outputs=[global_state, form_image], - queue=False) - - def on_click_image(global_state, evt: gr.SelectData): - """This function only support click for point selection - """ - xy = evt.index - if global_state['editing_state'] != 'add_points': - print(f'In {global_state["editing_state"]} state. ' - 'Do not add points.') - - return global_state, global_state['images']['image_show'] - - points = global_state["points"] - - point_idx = get_latest_points_pair(points) - if point_idx is None: - points[0] = {'start': xy, 'target': None} - print(f'Click Image - Start - {xy}') - elif points[point_idx].get('target', None) is None: - points[point_idx]['target'] = xy - print(f'Click Image - Target - {xy}') - else: - points[point_idx + 1] = {'start': xy, 'target': None} - print(f'Click Image - Start - {xy}') - - image_raw = global_state['images']['image_raw'] - image_draw = update_image_draw( - image_raw, - global_state['points'], - global_state['mask'], - global_state['show_mask'], - global_state, - ) - - return global_state, image_draw - - form_image.select( - on_click_image, - inputs=[global_state], - outputs=[global_state, form_image], - queue=False, - ) - - def on_click_clear_points(global_state): - """Function to handle clear all control points - 1. clear global_state['points'] (clear_state) - 2. re-init network - 2. re-draw image - """ - clear_state(global_state, target='point') - - renderer: Renderer = global_state["renderer"] - renderer.feat_refs = None - - image_raw = global_state['images']['image_raw'] - image_draw = update_image_draw(image_raw, {}, global_state['mask'], - global_state['show_mask'], global_state) - return global_state, image_draw - - undo_points.click(on_click_clear_points, - inputs=[global_state], - outputs=[global_state, form_image], - queue=False) - - def on_click_show_mask(global_state, show_mask): - """Function to control whether show mask on image.""" - global_state['show_mask'] = show_mask - - image_raw = global_state['images']['image_raw'] - image_draw = update_image_draw( - image_raw, - global_state['points'], - global_state['mask'], - global_state['show_mask'], - global_state, - ) - return global_state, image_draw - - show_mask.change( - on_click_show_mask, - inputs=[global_state, show_mask], - outputs=[global_state, form_image], - queue=False, - ) - -gr.close_all() -app.queue(concurrency_count=1, max_size=200, api_open=False) -app.launch(show_api=False) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/audio_diffusion.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/audio_diffusion.md deleted file mode 100644 index cc52c70a8e9ec6814d9d2b928c70d0694a3b9e71..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/audio_diffusion.md +++ /dev/null @@ -1,37 +0,0 @@ - - -# Audio Diffusion - -[Audio Diffusion](https://github.com/teticio/audio-diffusion) is by Robert Dargavel Smith, and it leverages the recent advances in image generation from diffusion models by converting audio samples to and from Mel spectrogram images. - -The original codebase, training scripts and example notebooks can be found at [teticio/audio-diffusion](https://github.com/teticio/audio-diffusion). - - - -Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## AudioDiffusionPipeline -[[autodoc]] AudioDiffusionPipeline - - all - - __call__ - -## AudioPipelineOutput -[[autodoc]] pipelines.AudioPipelineOutput - -## ImagePipelineOutput -[[autodoc]] pipelines.ImagePipelineOutput - -## Mel -[[autodoc]] Mel diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/overview.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/overview.md deleted file mode 100644 index 2467b143d5dc86f9d044bd937ccd39d93779129b..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/overview.md +++ /dev/null @@ -1,36 +0,0 @@ - - -# Pipelines - -Pipelines provide a simple way to run state-of-the-art diffusion models in inference by bundling all of the necessary components (multiple independently-trained models, schedulers, and processors) into a single end-to-end class. Pipelines are flexible and they can be adapted to use different scheduler or even model components. - -All pipelines are built from the base [`DiffusionPipeline`] class which provides basic functionality for loading, downloading, and saving all the components. - - - -Pipelines do not offer any training functionality. You'll notice PyTorch's autograd is disabled by decorating the [`~DiffusionPipeline.__call__`] method with a [`torch.no_grad`](https://pytorch.org/docs/stable/generated/torch.no_grad.html) decorator because pipelines should not be used for training. If you're interested in training, please take a look at the [Training](../traininig/overview) guides instead! - - - -## DiffusionPipeline - -[[autodoc]] DiffusionPipeline - - all - - __call__ - - device - - to - - components - -## FlaxDiffusionPipeline - -[[autodoc]] pipelines.pipeline_flax_utils.FlaxDiffusionPipeline diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/overview.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/overview.md deleted file mode 100644 index 82b2597a7043294cff1e235614be612ed4d35d0b..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/overview.md +++ /dev/null @@ -1,180 +0,0 @@ - - -# Stable Diffusion pipelines - -Stable Diffusion is a text-to-image latent diffusion model created by the researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/) and [LAION](https://laion.ai/). Latent diffusion applies the diffusion process over a lower dimensional latent space to reduce memory and compute complexity. This specific type of diffusion model was proposed in [High-Resolution Image Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) by Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, Björn Ommer. - -Stable Diffusion is trained on 512x512 images from a subset of the LAION-5B dataset. This model uses a frozen CLIP ViT-L/14 text encoder to condition the model on text prompts. With its 860M UNet and 123M text encoder, the model is relatively lightweight and can run on consumer GPUs. - -For more details about how Stable Diffusion works and how it differs from the base latent diffusion model, take a look at the Stability AI [announcement](https://stability.ai/blog/stable-diffusion-announcement) and our own [blog post](https://huggingface.co/blog/stable_diffusion#how-does-stable-diffusion-work) for more technical details. - -You can find the original codebase for Stable Diffusion v1.0 at [CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion) and Stable Diffusion v2.0 at [Stability-AI/stablediffusion](https://github.com/Stability-AI/stablediffusion) as well as their original scripts for various tasks. Additional official checkpoints for the different Stable Diffusion versions and tasks can be found on the [CompVis](https://huggingface.co/CompVis), [Runway](https://huggingface.co/runwayml), and [Stability AI](https://huggingface.co/stabilityai) Hub organizations. Explore these organizations to find the best checkpoint for your use-case! - -The table below summarizes the available Stable Diffusion pipelines, their supported tasks, and an interactive demo: - -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Pipeline - - Supported tasks - - Space -
    - StableDiffusion - text-to-image -
    - StableDiffusionImg2Img - image-to-image -
    - StableDiffusionInpaint - inpainting -
    - StableDiffusionDepth2Img - depth-to-image -
    - StableDiffusionImageVariation - image variation -
    - StableDiffusionPipelineSafe - filtered text-to-image -
    - StableDiffusion2 - text-to-image, inpainting, depth-to-image, super-resolution -
    - StableDiffusionXL - text-to-image, image-to-image -
    - StableDiffusionLatentUpscale - super-resolution -
    - StableDiffusionUpscale - super-resolution
    - StableDiffusionLDM3D - text-to-rgb, text-to-depth -
    -
    -
    - -## Tips - -To help you get the most out of the Stable Diffusion pipelines, here are a few tips for improving performance and usability. These tips are applicable to all Stable Diffusion pipelines. - -### Explore tradeoff between speed and quality - -[`StableDiffusionPipeline`] uses the [`PNDMScheduler`] by default, but 🤗 Diffusers provides many other schedulers (some of which are faster or output better quality) that are compatible. For example, if you want to use the [`EulerDiscreteScheduler`] instead of the default: - -```py -from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler - -pipeline = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") -pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) - -# or -euler_scheduler = EulerDiscreteScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") -pipeline = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=euler_scheduler) -``` - -### Reuse pipeline components to save memory - -To save memory and use the same components across multiple pipelines, use the `.components` method to avoid loading weights into RAM more than once. - -```py -from diffusers import ( - StableDiffusionPipeline, - StableDiffusionImg2ImgPipeline, - StableDiffusionInpaintPipeline, -) - -text2img = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") -img2img = StableDiffusionImg2ImgPipeline(**text2img.components) -inpaint = StableDiffusionInpaintPipeline(**text2img.components) - -# now you can use text2img(...), img2img(...), inpaint(...) just like the call methods of each respective pipeline -``` \ No newline at end of file diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/collate.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/collate.py deleted file mode 100644 index ad749197df21b0d74297548be5f66a696adebf7f..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/collate.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections.abc import Mapping, Sequence - -import torch -import torch.nn.functional as F -from torch.utils.data.dataloader import default_collate - -from .data_container import DataContainer - - -def collate(batch, samples_per_gpu=1): - """Puts each data field into a tensor/DataContainer with outer dimension - batch size. - - Extend default_collate to add support for - :type:`~mmcv.parallel.DataContainer`. There are 3 cases. - - 1. cpu_only = True, e.g., meta data - 2. cpu_only = False, stack = True, e.g., images tensors - 3. cpu_only = False, stack = False, e.g., gt bboxes - """ - - if not isinstance(batch, Sequence): - raise TypeError(f'{batch.dtype} is not supported.') - - if isinstance(batch[0], DataContainer): - stacked = [] - if batch[0].cpu_only: - for i in range(0, len(batch), samples_per_gpu): - stacked.append( - [sample.data for sample in batch[i:i + samples_per_gpu]]) - return DataContainer( - stacked, batch[0].stack, batch[0].padding_value, cpu_only=True) - elif batch[0].stack: - for i in range(0, len(batch), samples_per_gpu): - assert isinstance(batch[i].data, torch.Tensor) - - if batch[i].pad_dims is not None: - ndim = batch[i].dim() - assert ndim > batch[i].pad_dims - max_shape = [0 for _ in range(batch[i].pad_dims)] - for dim in range(1, batch[i].pad_dims + 1): - max_shape[dim - 1] = batch[i].size(-dim) - for sample in batch[i:i + samples_per_gpu]: - for dim in range(0, ndim - batch[i].pad_dims): - assert batch[i].size(dim) == sample.size(dim) - for dim in range(1, batch[i].pad_dims + 1): - max_shape[dim - 1] = max(max_shape[dim - 1], - sample.size(-dim)) - padded_samples = [] - for sample in batch[i:i + samples_per_gpu]: - pad = [0 for _ in range(batch[i].pad_dims * 2)] - for dim in range(1, batch[i].pad_dims + 1): - pad[2 * dim - - 1] = max_shape[dim - 1] - sample.size(-dim) - padded_samples.append( - F.pad( - sample.data, pad, value=sample.padding_value)) - stacked.append(default_collate(padded_samples)) - elif batch[i].pad_dims is None: - stacked.append( - default_collate([ - sample.data - for sample in batch[i:i + samples_per_gpu] - ])) - else: - raise ValueError( - 'pad_dims should be either None or integers (1-3)') - - else: - for i in range(0, len(batch), samples_per_gpu): - stacked.append( - [sample.data for sample in batch[i:i + samples_per_gpu]]) - return DataContainer(stacked, batch[0].stack, batch[0].padding_value) - elif isinstance(batch[0], Sequence): - transposed = zip(*batch) - return [collate(samples, samples_per_gpu) for samples in transposed] - elif isinstance(batch[0], Mapping): - return { - key: collate([d[key] for d in batch], samples_per_gpu) - for key in batch[0] - } - else: - return default_collate(batch) diff --git a/spaces/AriaMei/TTSdemo/utils.py b/spaces/AriaMei/TTSdemo/utils.py deleted file mode 100644 index 9c52df8d2525d8eca67427ba90d09513b395ce46..0000000000000000000000000000000000000000 --- a/spaces/AriaMei/TTSdemo/utils.py +++ /dev/null @@ -1,267 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - print("%s is not in the checkpoint" % k) or logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - print("Loaded checkpoint '{}' (iteration {}) " .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - ckptname = checkpoint_path.split("/")[-1] - newest_step = int(ckptname.split(".")[0].split("_")[1]) - last_ckptname = checkpoint_path.replace(str(newest_step), str(newest_step-1200)) - if newest_step >= 1200: - os.system(f"rm {last_ckptname}") - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - print("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/metadata/importlib/_compat.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/metadata/importlib/_compat.py deleted file mode 100644 index 593bff23edecd3c517c96e119ee777bd4ee1d9d0..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/metadata/importlib/_compat.py +++ /dev/null @@ -1,55 +0,0 @@ -import importlib.metadata -from typing import Any, Optional, Protocol, cast - - -class BadMetadata(ValueError): - def __init__(self, dist: importlib.metadata.Distribution, *, reason: str) -> None: - self.dist = dist - self.reason = reason - - def __str__(self) -> str: - return f"Bad metadata in {self.dist} ({self.reason})" - - -class BasePath(Protocol): - """A protocol that various path objects conform. - - This exists because importlib.metadata uses both ``pathlib.Path`` and - ``zipfile.Path``, and we need a common base for type hints (Union does not - work well since ``zipfile.Path`` is too new for our linter setup). - - This does not mean to be exhaustive, but only contains things that present - in both classes *that we need*. - """ - - @property - def name(self) -> str: - raise NotImplementedError() - - @property - def parent(self) -> "BasePath": - raise NotImplementedError() - - -def get_info_location(d: importlib.metadata.Distribution) -> Optional[BasePath]: - """Find the path to the distribution's metadata directory. - - HACK: This relies on importlib.metadata's private ``_path`` attribute. Not - all distributions exist on disk, so importlib.metadata is correct to not - expose the attribute as public. But pip's code base is old and not as clean, - so we do this to avoid having to rewrite too many things. Hopefully we can - eliminate this some day. - """ - return getattr(d, "_path", None) - - -def get_dist_name(dist: importlib.metadata.Distribution) -> str: - """Get the distribution's project name. - - The ``name`` attribute is only available in Python 3.10 or later. We are - targeting exactly that, but Mypy does not know this. - """ - name = cast(Any, dist).name - if not isinstance(name, str): - raise BadMetadata(dist, reason="invalid metadata entry 'name'") - return name diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/_types.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/_types.py deleted file mode 100644 index d949412e03b29d70592c7721fe747e5085c2e280..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/_types.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: MIT -# SPDX-FileCopyrightText: 2021 Taneli Hukkinen -# Licensed to PSF under a Contributor Agreement. - -from typing import Any, Callable, Tuple - -# Type annotations -ParseFloat = Callable[[str], Any] -Key = Tuple[str, ...] -Pos = int diff --git a/spaces/BIOML-SVM/SVM/msa.py b/spaces/BIOML-SVM/SVM/msa.py deleted file mode 100644 index f06f83c1ba8db5eae4f03bbf6c15f07be4d7aebb..0000000000000000000000000000000000000000 --- a/spaces/BIOML-SVM/SVM/msa.py +++ /dev/null @@ -1,62 +0,0 @@ -import glob -import itertools -from pathlib import Path -from typing import List, Tuple, Optional, Dict, NamedTuple, Union, Callable -import string - -import numpy as np -import torch -from scipy.spatial.distance import squareform, pdist, cdist -from Bio import SeqIO -#import biotite.structure as bs -#from biotite.structure.io.pdbx import PDBxFile, get_structure -#from biotite.database import rcsb -from tqdm import tqdm -import pandas as pd - - -# This is an efficient way to delete lowercase characters and insertion characters from a string -deletekeys = dict.fromkeys(string.ascii_lowercase) -deletekeys["."] = None -deletekeys["*"] = None -translation = str.maketrans(deletekeys) - - -def read_sequence(filename: str) -> Tuple[str, str]: - """ Reads the first (reference) sequences from a fasta or MSA file.""" - record = next(SeqIO.parse(filename, "fasta")) - return record.description, str(record.seq) - -def remove_insertions(sequence: str) -> str: - """ Removes any insertions into the sequence. Needed to load aligned sequences in an MSA. """ - return sequence.translate(translation) - -def read_msa(filename: str) -> List[Tuple[str, str]]: - """ Reads the sequences from an MSA file, automatically removes insertions.""" - return [(record.description, remove_insertions(str(record.seq))) for record in SeqIO.parse(filename, "fasta")] - - -def greedy_select(msa: List[Tuple[str, str]], num_seqs: int, mode: str = "max") -> List[Tuple[str, str]]: - """ - Select sequences from the MSA to maximize the hamming distance - Alternatively, can use hhfilter - """ - assert mode in ("max", "min") - if len(msa) <= num_seqs: - return msa - - array = np.array([list(seq) for _, seq in msa], dtype=np.bytes_).view(np.uint8) - - optfunc = np.argmax if mode == "max" else np.argmin - all_indices = np.arange(len(msa)) - indices = [0] - pairwise_distances = np.zeros((0, len(msa))) - for _ in range(num_seqs - 1): - dist = cdist(array[indices[-1:]], array, "hamming") - pairwise_distances = np.concatenate([pairwise_distances, dist]) - shifted_distance = np.delete(pairwise_distances, indices, axis=1).mean(0) - shifted_index = optfunc(shifted_distance) - index = np.delete(all_indices, indices)[shifted_index] - indices.append(index) - indices = sorted(indices) - return [msa[idx] for idx in indices] \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Ataque Areo Comando Mod Apk.md b/spaces/Benson/text-generation/Examples/Ataque Areo Comando Mod Apk.md deleted file mode 100644 index b59055257ba8d5a9e5e2b2682791ed21fe82e22d..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Ataque Areo Comando Mod Apk.md +++ /dev/null @@ -1,92 +0,0 @@ -
    -

    Air Offense Command Mod APK: Un emocionante juego de árcade para Android

    -

    Si usted está buscando un juego de árcade divertido y desafiante que pondrá a prueba sus habilidades y reflejos, es posible que desee probar Air Offense Command Mod APK. Esta es una versión modificada del juego original de Air Offense Command, que está disponible en Google Play Store. En este juego, usted dirigirá una fuerza de bombardero ragtag para salvar a su país de la aniquilación mediante el lanzamiento de un ataque preventivo desesperado contra el enemigo. Tendrás que mejorar tus aviones con mejores bombas, cohetes, armaduras y motores, y esquivar el fuego enemigo, misiles y cazas. ¿Estás listo para asumir esta misión? Aquí está todo lo que necesita saber sobre Air Offense Command Mod APK.

    -

    ataque aéreo comando mod apk


    DOWNLOAD ››› https://bltlly.com/2v6IP7



    -

    ¿Qué es el Comando de Ataque Aéreo?

    -

    Air Offense Command es un juego árcade desarrollado por Ensit Media, un estudio de juegos indie de Corea del Sur. El juego fue lanzado en abril de 2022 y ha recibido más de 50.000 descargas y críticas positivas de los jugadores. El juego cuenta con gráficos de estilo retro, controles simples y un juego adictivo. El juego está inspirado en juegos de árcade clásicos como 1942, Raiden y Sky Force.

    -

    ¿Cómo se juega Air Offense Command?

    -

    El juego de Air Offense Command es simple pero desafiante. Controlarás un avión bombardero que vuela automáticamente de izquierda a derecha. Puede tocar la pantalla para lanzar bombas o deslizar para lanzar cohetes. También puede inclinar el dispositivo para mover el avión hacia arriba y hacia abajo. Su objetivo es destruir tantos objetivos enemigos como sea posible evitando sus ataques. Te enfrentarás a diferentes tipos de enemigos, como tanques, camiones, barcos, cañones, misiles y combatientes. También encontrarás batallas contra jefes que requerirán más estrategia y habilidad.

    -

    ¿Cómo actualizar su avión en Air Offense Command?

    - -

    ¿Qué es el comando de ataque aéreo Mod APK?

    -

    Air Offense Command Mod APK es una versión modificada del juego original que ofrece algunas ventajas y características que no están disponibles en la versión oficial. Algunos de los beneficios de usar Air Offense Command Mod APK son:

    -

    Monedas ilimitadas

    -

    Con Air Offense Command Mod APK, usted tendrá monedas ilimitadas que puede utilizar para actualizar su avión sin limitaciones. Puedes maximizar todas las mejoras y desbloquear todos los aviones sin gastar dinero real.

    -

    No hay anuncios

    -

    Con Air Offense Command Mod APK, no verá ningún anuncio que pueda interrumpir su juego o molestarlo. Puedes disfrutar del juego sin distracciones ni interrupciones.

    -

    Fácil instalación

    -

    Con Air Offense Command Mod APK, no es necesario rootear su dispositivo o instalar aplicaciones o archivos adicionales. Solo necesitas descargar el archivo APK de una fuente confiable e instalarlo en tu dispositivo como cualquier otra aplicación.

    -

    ¿Cómo descargar e instalar Air Offense Command Mod APK?

    -

    Si desea descargar e instalar Air Offense Command Mod APK en su dispositivo Android, puede seguir estos sencillos pasos:

    -

    -
      -
    1. Ir a un sitio web de confianza que ofrece Air Ofensiva Comando Mod APK para su descarga. Por ejemplo, puede utilizar [APKCombo]( 1 ), que es un descargador APK seguro y rápido.
    2. -
    3. Búsqueda de Air Offense Command Mod APK en el sitio web y haga clic en el botón de descarga.
    4. -
    5. Espere a que la descarga termine y localice el archivo APK en su dispositivo.
    6. -
    7. Antes de instalar el archivo APK, asegúrese de que ha habilitado la opción de instalar aplicaciones de fuentes desconocidas en la configuración del dispositivo.
    8. -
    9. Toque en el archivo APK y siga las instrucciones para instalarlo en su dispositivo.
    10. -
    11. Iniciar el juego y disfrutar de jugar Air Offense Command Mod APK.
    12. -
    -

    Una tabla de comparación de Air Offense Command vs Air Offense Command Mod APK

    - - - -Característica -Comando de Ataque Aéreo -Comando de ataque aéreo Mod APK - - -Monedas -Limitado, ganado por jugar el juego o comprar con dinero real -Ilimitado, disponible gratis - - -Anuncios -Sí, se muestra durante el juego o antes de iniciar un nivel -No, eliminado por completo - - -Instalación -Fácil, disponible en Google Play Store -Fácil, disponible en APKCombo u otros sitios web - - -Actualizaciones -Sí, automático o manual a través de Google Play Store -No, manual a través de la descarga e instalación de un nuevo archivo APK - - -Seguridad -Sí, verificado por Google Play Protect -No, no verificado por Google Play Protect, puede contener malware o virus - - -Soporte -Sí, proporcionado por el desarrollador a través de correo electrónico o redes sociales -No, no proporcionado por el desarrollador, puede encontrar errores o problemas técnicos - - -Compatibilidad -Sí, compatible con la mayoría de dispositivos Android con Android 4.4 o superior -No, no es compatible con algunos dispositivos o versiones de Android, puede bloquearse o no funciona correctamente - -

    Conclusión

    - -

    Preguntas frecuentes (preguntas frecuentes)

    -

    Q: ¿Es libre el comando de ataque aéreo Mod APK?

    -

    A: Sí, Air Offense Command Mod APK es gratis para descargar y jugar. No es necesario pagar dinero para usarlo.

    -

    Q: ¿Es legal el Comando de Ataque Aéreo Mod APK?

    -

    A: No, Comando de Ataque Aéreo Mod APK no es legal. Es una versión modificada del juego original que viola los términos y condiciones del desarrollador y Google Play Store. Usted puede enfrentar consecuencias legales si lo usa.

    -

    Q: ¿Es seguro el Comando de Ataque Aéreo Mod APK?

    -

    A: No, Comando de ataque aéreo Mod APK no es seguro. No está verificado por Google Play Protect y puede contener malware o virus que pueden dañar su dispositivo o datos. Siempre debe escanearlo para detectar cualquier amenaza antes de usarlo.

    -

    Q: Cómo desinstalar Air Offense Command Mod APK?

    -

    A: Para desinstalar Air Offense Command Mod APK de su dispositivo, puede seguir estos pasos:

    -
      -
    1. Ir a la configuración del dispositivo y toque en aplicaciones o aplicaciones.
    2. -
    3. Encontrar y toque en Air Ofensiva Comando Mod APK de la lista de aplicaciones.
    4. -
    5. Toque en Desinstalar y confirme su acción.
    6. -
    7. Espere a que el proceso de desinstalación termine y reinicie su dispositivo.
    8. -

      Q: ¿Cómo contactar al desarrollador de Air Offense Command?

      -

      A: Si tiene alguna pregunta o comentario sobre la versión original de Air Offense Command, puede ponerse en contacto con el desarrollador a través del correo electrónico ensitmedia@gmail.com o a través de su página de Facebook en https://www.facebook.com/ensitmedia/ Estarán encantados de saber de usted.

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/jupyter.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/jupyter.py deleted file mode 100644 index 22f4d716ac9764ee18005b9b852946d614152375..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/jupyter.py +++ /dev/null @@ -1,101 +0,0 @@ -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Sequence - -if TYPE_CHECKING: - from pip._vendor.rich.console import ConsoleRenderable - -from . import get_console -from .segment import Segment -from .terminal_theme import DEFAULT_TERMINAL_THEME - -if TYPE_CHECKING: - from pip._vendor.rich.console import ConsoleRenderable - -JUPYTER_HTML_FORMAT = """\ -
      {code}
      -""" - - -class JupyterRenderable: - """A shim to write html to Jupyter notebook.""" - - def __init__(self, html: str, text: str) -> None: - self.html = html - self.text = text - - def _repr_mimebundle_( - self, include: Sequence[str], exclude: Sequence[str], **kwargs: Any - ) -> Dict[str, str]: - data = {"text/plain": self.text, "text/html": self.html} - if include: - data = {k: v for (k, v) in data.items() if k in include} - if exclude: - data = {k: v for (k, v) in data.items() if k not in exclude} - return data - - -class JupyterMixin: - """Add to an Rich renderable to make it render in Jupyter notebook.""" - - __slots__ = () - - def _repr_mimebundle_( - self: "ConsoleRenderable", - include: Sequence[str], - exclude: Sequence[str], - **kwargs: Any, - ) -> Dict[str, str]: - console = get_console() - segments = list(console.render(self, console.options)) - html = _render_segments(segments) - text = console._render_buffer(segments) - data = {"text/plain": text, "text/html": html} - if include: - data = {k: v for (k, v) in data.items() if k in include} - if exclude: - data = {k: v for (k, v) in data.items() if k not in exclude} - return data - - -def _render_segments(segments: Iterable[Segment]) -> str: - def escape(text: str) -> str: - """Escape html.""" - return text.replace("&", "&").replace("<", "<").replace(">", ">") - - fragments: List[str] = [] - append_fragment = fragments.append - theme = DEFAULT_TERMINAL_THEME - for text, style, control in Segment.simplify(segments): - if control: - continue - text = escape(text) - if style: - rule = style.get_html_style(theme) - text = f'{text}' if rule else text - if style.link: - text = f'{text}' - append_fragment(text) - - code = "".join(fragments) - html = JUPYTER_HTML_FORMAT.format(code=code) - - return html - - -def display(segments: Iterable[Segment], text: str) -> None: - """Render segments to Jupyter.""" - html = _render_segments(segments) - jupyter_renderable = JupyterRenderable(html, text) - try: - from IPython.display import display as ipython_display - - ipython_display(jupyter_renderable) - except ModuleNotFoundError: - # Handle the case where the Console has force_jupyter=True, - # but IPython is not installed. - pass - - -def print(*args: Any, **kwargs: Any) -> None: - """Proxy for Console print.""" - console = get_console() - return console.print(*args, **kwargs) diff --git a/spaces/Billyosoro/ESRGAN/tests/test_model.py b/spaces/Billyosoro/ESRGAN/tests/test_model.py deleted file mode 100644 index c20bb1d56ed20222e929e9c94026f6ea383c6026..0000000000000000000000000000000000000000 --- a/spaces/Billyosoro/ESRGAN/tests/test_model.py +++ /dev/null @@ -1,126 +0,0 @@ -import torch -import yaml -from basicsr.archs.rrdbnet_arch import RRDBNet -from basicsr.data.paired_image_dataset import PairedImageDataset -from basicsr.losses.losses import GANLoss, L1Loss, PerceptualLoss - -from realesrgan.archs.discriminator_arch import UNetDiscriminatorSN -from realesrgan.models.realesrgan_model import RealESRGANModel -from realesrgan.models.realesrnet_model import RealESRNetModel - - -def test_realesrnet_model(): - with open('tests/data/test_realesrnet_model.yml', mode='r') as f: - opt = yaml.load(f, Loader=yaml.FullLoader) - - # build model - model = RealESRNetModel(opt) - # test attributes - assert model.__class__.__name__ == 'RealESRNetModel' - assert isinstance(model.net_g, RRDBNet) - assert isinstance(model.cri_pix, L1Loss) - assert isinstance(model.optimizers[0], torch.optim.Adam) - - # prepare data - gt = torch.rand((1, 3, 32, 32), dtype=torch.float32) - kernel1 = torch.rand((1, 5, 5), dtype=torch.float32) - kernel2 = torch.rand((1, 5, 5), dtype=torch.float32) - sinc_kernel = torch.rand((1, 5, 5), dtype=torch.float32) - data = dict(gt=gt, kernel1=kernel1, kernel2=kernel2, sinc_kernel=sinc_kernel) - model.feed_data(data) - # check dequeue - model.feed_data(data) - # check data shape - assert model.lq.shape == (1, 3, 8, 8) - assert model.gt.shape == (1, 3, 32, 32) - - # change probability to test if-else - model.opt['gaussian_noise_prob'] = 0 - model.opt['gray_noise_prob'] = 0 - model.opt['second_blur_prob'] = 0 - model.opt['gaussian_noise_prob2'] = 0 - model.opt['gray_noise_prob2'] = 0 - model.feed_data(data) - # check data shape - assert model.lq.shape == (1, 3, 8, 8) - assert model.gt.shape == (1, 3, 32, 32) - - # ----------------- test nondist_validation -------------------- # - # construct dataloader - dataset_opt = dict( - name='Demo', - dataroot_gt='tests/data/gt', - dataroot_lq='tests/data/lq', - io_backend=dict(type='disk'), - scale=4, - phase='val') - dataset = PairedImageDataset(dataset_opt) - dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=0) - assert model.is_train is True - model.nondist_validation(dataloader, 1, None, False) - assert model.is_train is True - - -def test_realesrgan_model(): - with open('tests/data/test_realesrgan_model.yml', mode='r') as f: - opt = yaml.load(f, Loader=yaml.FullLoader) - - # build model - model = RealESRGANModel(opt) - # test attributes - assert model.__class__.__name__ == 'RealESRGANModel' - assert isinstance(model.net_g, RRDBNet) # generator - assert isinstance(model.net_d, UNetDiscriminatorSN) # discriminator - assert isinstance(model.cri_pix, L1Loss) - assert isinstance(model.cri_perceptual, PerceptualLoss) - assert isinstance(model.cri_gan, GANLoss) - assert isinstance(model.optimizers[0], torch.optim.Adam) - assert isinstance(model.optimizers[1], torch.optim.Adam) - - # prepare data - gt = torch.rand((1, 3, 32, 32), dtype=torch.float32) - kernel1 = torch.rand((1, 5, 5), dtype=torch.float32) - kernel2 = torch.rand((1, 5, 5), dtype=torch.float32) - sinc_kernel = torch.rand((1, 5, 5), dtype=torch.float32) - data = dict(gt=gt, kernel1=kernel1, kernel2=kernel2, sinc_kernel=sinc_kernel) - model.feed_data(data) - # check dequeue - model.feed_data(data) - # check data shape - assert model.lq.shape == (1, 3, 8, 8) - assert model.gt.shape == (1, 3, 32, 32) - - # change probability to test if-else - model.opt['gaussian_noise_prob'] = 0 - model.opt['gray_noise_prob'] = 0 - model.opt['second_blur_prob'] = 0 - model.opt['gaussian_noise_prob2'] = 0 - model.opt['gray_noise_prob2'] = 0 - model.feed_data(data) - # check data shape - assert model.lq.shape == (1, 3, 8, 8) - assert model.gt.shape == (1, 3, 32, 32) - - # ----------------- test nondist_validation -------------------- # - # construct dataloader - dataset_opt = dict( - name='Demo', - dataroot_gt='tests/data/gt', - dataroot_lq='tests/data/lq', - io_backend=dict(type='disk'), - scale=4, - phase='val') - dataset = PairedImageDataset(dataset_opt) - dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=0) - assert model.is_train is True - model.nondist_validation(dataloader, 1, None, False) - assert model.is_train is True - - # ----------------- test optimize_parameters -------------------- # - model.feed_data(data) - model.optimize_parameters(1) - assert model.output.shape == (1, 3, 32, 32) - assert isinstance(model.log_dict, dict) - # check returned keys - expected_keys = ['l_g_pix', 'l_g_percep', 'l_g_gan', 'l_d_real', 'out_d_real', 'l_d_fake', 'out_d_fake'] - assert set(expected_keys).issubset(set(model.log_dict.keys())) diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/tutorials/datasets.md b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/tutorials/datasets.md deleted file mode 100644 index 37ed80a6364aabebde99c43c892022d1a1481a16..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/tutorials/datasets.md +++ /dev/null @@ -1,214 +0,0 @@ -# Use Custom Datasets - -If you want to use a custom dataset while also reusing detectron2's data loaders, -you will need to - -1. Register your dataset (i.e., tell detectron2 how to obtain your dataset). -2. Optionally, register metadata for your dataset. - -Next, we explain the above two concepts in details. - -The [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) -has a working example of how to register and train on a dataset of custom formats. - - -### Register a Dataset - -To let detectron2 know how to obtain a dataset named "my_dataset", you will implement -a function that returns the items in your dataset and then tell detectron2 about this -function: -```python -def get_dicts(): - ... - return list[dict] in the following format - -from detectron2.data import DatasetCatalog -DatasetCatalog.register("my_dataset", get_dicts) -``` - -Here, the snippet associates a dataset "my_dataset" with a function that returns the data. -The registration stays effective until the process exists. - -The function can processes data from its original format into either one of the following: -1. Detectron2's standard dataset dict, described below. This will work with many other builtin - features in detectron2, so it's recommended to use it when it's sufficient for your task. -2. Your custom dataset dict. You can also returns arbitrary dicts in your own format, - such as adding extra keys for new tasks. - Then you will need to handle them properly in the downstream as well. - See below for more details. - -#### Standard Dataset Dicts - -For standard tasks -(instance detection, instance/semantic/panoptic segmentation, keypoint detection), -we load the original dataset into `list[dict]` with a specification similar to COCO's json annotations. -This is our standard representation for a dataset. - -Each dict contains information about one image. -The dict may have the following fields. -The fields are often optional, and some functions may be able to -infer certain fields from others if needed, e.g., the data loader -will load the image from "file_name" and load "sem_seg" from "sem_seg_file_name". - -+ `file_name`: the full path to the image file. Will apply rotation and flipping if the image has such exif information. -+ `sem_seg_file_name`: the full path to the ground truth semantic segmentation file. -+ `sem_seg`: semantic segmentation ground truth in a 2D `torch.Tensor`. Values in the array represent - category labels starting from 0. -+ `height`, `width`: integer. The shape of image. -+ `image_id` (str or int): a unique id that identifies this image. Used - during evaluation to identify the images, but a dataset may use it for different purposes. -+ `annotations` (list[dict]): each dict corresponds to annotations of one instance - in this image. Images with empty `annotations` will by default be removed from training, - but can be included using `DATALOADER.FILTER_EMPTY_ANNOTATIONS`. - Each dict may contain the following keys: - + `bbox` (list[float]): list of 4 numbers representing the bounding box of the instance. - + `bbox_mode` (int): the format of bbox. - It must be a member of - [structures.BoxMode](../modules/structures.html#detectron2.structures.BoxMode). - Currently supports: `BoxMode.XYXY_ABS`, `BoxMode.XYWH_ABS`. - + `category_id` (int): an integer in the range [0, num_categories) representing the category label. - The value num_categories is reserved to represent the "background" category, if applicable. - + `segmentation` (list[list[float]] or dict): - + If `list[list[float]]`, it represents a list of polygons, one for each connected component - of the object. Each `list[float]` is one simple polygon in the format of `[x1, y1, ..., xn, yn]`. - The Xs and Ys are either relative coordinates in [0, 1], or absolute coordinates, - depend on whether "bbox_mode" is relative. - + If `dict`, it represents the per-pixel segmentation mask in COCO's RLE format. The dict should have - keys "size" and "counts". You can convert a uint8 segmentation mask of 0s and 1s into - RLE format by `pycocotools.mask.encode(np.asarray(mask, order="F"))`. - + `keypoints` (list[float]): in the format of [x1, y1, v1,..., xn, yn, vn]. - v[i] means the [visibility](http://cocodataset.org/#format-data) of this keypoint. - `n` must be equal to the number of keypoint categories. - The Xs and Ys are either relative coordinates in [0, 1], or absolute coordinates, - depend on whether "bbox_mode" is relative. - - Note that the coordinate annotations in COCO format are integers in range [0, H-1 or W-1]. - By default, detectron2 adds 0.5 to absolute keypoint coordinates to convert them from discrete - pixel indices to floating point coordinates. - + `iscrowd`: 0 or 1. Whether this instance is labeled as COCO's "crowd - region". Don't include this field if you don't know what it means. - -The following keys are used by Fast R-CNN style training, which is rare today. - -+ `proposal_boxes` (array): 2D numpy array with shape (K, 4) representing K precomputed proposal boxes for this image. -+ `proposal_objectness_logits` (array): numpy array with shape (K, ), which corresponds to the objectness - logits of proposals in 'proposal_boxes'. -+ `proposal_bbox_mode` (int): the format of the precomputed proposal bbox. - It must be a member of - [structures.BoxMode](../modules/structures.html#detectron2.structures.BoxMode). - Default is `BoxMode.XYXY_ABS`. - - -If your dataset is already a json file in COCO format, you can simply register it by -```python -from detectron2.data.datasets import register_coco_instances -register_coco_instances("my_dataset", {}, "json_annotation.json", "path/to/image/dir") -``` -which will take care of everything (including metadata) for you. - -If your dataset is in COCO format with custom per-instance annotations, -the [load_coco_json](../modules/data.html#detectron2.data.datasets.load_coco_json) function can be used. - -#### Custom Dataset Dicts - -In the `list[dict]` that your dataset function return, the dictionary can also has arbitrary custom data. -This can be useful when you're doing a new task and needs extra information not supported -by the standard dataset dicts. In this case, you need to make sure the downstream code can handle your data -correctly. Usually this requires writing a new `mapper` for the dataloader (see [Use Custom Dataloaders](data_loading.html)) - -When designing your custom format, note that all dicts are stored in memory -(sometimes serialized and with multiple copies). -To save memory, each dict is meant to contain small but sufficient information -about each sample, such as file names and annotations. -Loading full samples typically happens in the data loader. - -For attributes shared among the entire dataset, use `Metadata` (see below). -To avoid exmemory, do not save such information repeatly for each sample. - - -### "Metadata" for Datasets - -Each dataset is associated with some metadata, accessible through -`MetadataCatalog.get(dataset_name).some_metadata`. -Metadata is a key-value mapping that contains information that's shared among -the entire dataset, and usually is used to interpret what's in the dataset, e.g., -names of classes, colors of classes, root of files, etc. -This information will be useful for augmentation, evaluation, visualization, logging, etc. -The structure of metadata depends on the what is needed from the corresponding downstream code. - - -If you register a new dataset through `DatasetCatalog.register`, -you may also want to add its corresponding metadata through -`MetadataCatalog.get(dataset_name).set(name, value)`, to enable any features that need metadata. -You can do it like this (using the metadata field "thing_classes" as an example): - -```python -from detectron2.data import MetadataCatalog -MetadataCatalog.get("my_dataset").thing_classes = ["person", "dog"] -``` - -Here is a list of metadata keys that are used by builtin features in detectron2. -If you add your own dataset without these metadata, some features may be -unavailable to you: - -* `thing_classes` (list[str]): Used by all instance detection/segmentation tasks. - A list of names for each instance/thing category. - If you load a COCO format dataset, it will be automatically set by the function `load_coco_json`. - -* `thing_colors` (list[tuple(r, g, b)]): Pre-defined color (in [0, 255]) for each thing category. - Used for visualization. If not given, random colors are used. - -* `stuff_classes` (list[str]): Used by semantic and panoptic segmentation tasks. - A list of names for each stuff category. - -* `stuff_colors` (list[tuple(r, g, b)]): Pre-defined color (in [0, 255]) for each stuff category. - Used for visualization. If not given, random colors are used. - -* `keypoint_names` (list[str]): Used by keypoint localization. A list of names for each keypoint. - -* `keypoint_flip_map` (list[tuple[str]]): Used by the keypoint localization task. A list of pairs of names, - where each pair are the two keypoints that should be flipped if the image is - flipped during augmentation. -* `keypoint_connection_rules`: list[tuple(str, str, (r, g, b))]. Each tuple specifies a pair of keypoints - that are connected and the color to use for the line between them when visualized. - -Some additional metadata that are specific to the evaluation of certain datasets (e.g. COCO): - -* `thing_dataset_id_to_contiguous_id` (dict[int->int]): Used by all instance detection/segmentation tasks in the COCO format. - A mapping from instance class ids in the dataset to contiguous ids in range [0, #class). - Will be automatically set by the function `load_coco_json`. - -* `stuff_dataset_id_to_contiguous_id` (dict[int->int]): Used when generating prediction json files for - semantic/panoptic segmentation. - A mapping from semantic segmentation class ids in the dataset - to contiguous ids in [0, num_categories). It is useful for evaluation only. - -* `json_file`: The COCO annotation json file. Used by COCO evaluation for COCO-format datasets. -* `panoptic_root`, `panoptic_json`: Used by panoptic evaluation. -* `evaluator_type`: Used by the builtin main training script to select - evaluator. No need to use it if you write your own main script. - You can just provide the [DatasetEvaluator](../modules/evaluation.html#detectron2.evaluation.DatasetEvaluator) - for your dataset directly in your main script. - -NOTE: For background on the concept of "thing" and "stuff", see -[On Seeing Stuff: The Perception of Materials by Humans and Machines](http://persci.mit.edu/pub_pdfs/adelson_spie_01.pdf). -In detectron2, the term "thing" is used for instance-level tasks, -and "stuff" is used for semantic segmentation tasks. -Both are used in panoptic segmentation. - - -### Update the Config for New Datasets - -Once you've registered the dataset, you can use the name of the dataset (e.g., "my_dataset" in -example above) in `DATASETS.{TRAIN,TEST}`. -There are other configs you might want to change to train or evaluate on new datasets: - -* `MODEL.ROI_HEADS.NUM_CLASSES` and `MODEL.RETINANET.NUM_CLASSES` are the number of thing classes - for R-CNN and RetinaNet models. -* `MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS` sets the number of keypoints for Keypoint R-CNN. - You'll also need to set [Keypoint OKS](http://cocodataset.org/#keypoints-eval) - with `TEST.KEYPOINT_OKS_SIGMAS` for evaluation. -* `MODEL.SEM_SEG_HEAD.NUM_CLASSES` sets the number of stuff classes for Semantic FPN & Panoptic FPN. -* If you're training Fast R-CNN (with precomputed proposals), `DATASETS.PROPOSAL_FILES_{TRAIN,TEST}` - need to match the datasts. The format of proposal files are documented - [here](../modules/data.html#detectron2.data.load_proposals_into_dataset). diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/dataset_mapper.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/dataset_mapper.py deleted file mode 100644 index 3eadbe15dd1da6566bc51b32630b7e9b4909576b..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/dataset_mapper.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -import copy -import torch -from fvcore.common.file_io import PathManager - -from detectron2.data import MetadataCatalog -from detectron2.data import detection_utils as utils -from detectron2.data import transforms as T - -from .structures import DensePoseDataRelative, DensePoseList, DensePoseTransformData - - -class DatasetMapper: - """ - A customized version of `detectron2.data.DatasetMapper` - """ - - def __init__(self, cfg, is_train=True): - self.tfm_gens = utils.build_transform_gen(cfg, is_train) - - # fmt: off - self.img_format = cfg.INPUT.FORMAT - self.mask_on = cfg.MODEL.MASK_ON - self.keypoint_on = cfg.MODEL.KEYPOINT_ON - self.densepose_on = cfg.MODEL.DENSEPOSE_ON - assert not cfg.MODEL.LOAD_PROPOSALS, "not supported yet" - # fmt: on - if self.keypoint_on and is_train: - # Flip only makes sense in training - self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN) - else: - self.keypoint_hflip_indices = None - - if self.densepose_on: - densepose_transform_srcs = [ - MetadataCatalog.get(ds).densepose_transform_src - for ds in cfg.DATASETS.TRAIN + cfg.DATASETS.TEST - ] - assert len(densepose_transform_srcs) > 0 - # TODO: check that DensePose transformation data is the same for - # all the datasets. Otherwise one would have to pass DB ID with - # each entry to select proper transformation data. For now, since - # all DensePose annotated data uses the same data semantics, we - # omit this check. - densepose_transform_data_fpath = PathManager.get_local_path(densepose_transform_srcs[0]) - self.densepose_transform_data = DensePoseTransformData.load( - densepose_transform_data_fpath - ) - - self.is_train = is_train - - def __call__(self, dataset_dict): - """ - Args: - dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. - - Returns: - dict: a format that builtin models in detectron2 accept - """ - dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below - image = utils.read_image(dataset_dict["file_name"], format=self.img_format) - utils.check_image_size(dataset_dict, image) - - image, transforms = T.apply_transform_gens(self.tfm_gens, image) - image_shape = image.shape[:2] # h, w - dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32")) - - if not self.is_train: - dataset_dict.pop("annotations", None) - return dataset_dict - - for anno in dataset_dict["annotations"]: - if not self.mask_on: - anno.pop("segmentation", None) - if not self.keypoint_on: - anno.pop("keypoints", None) - - # USER: Implement additional transformations if you have other types of data - # USER: Don't call transpose_densepose if you don't need - annos = [ - self._transform_densepose( - utils.transform_instance_annotations( - obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices - ), - transforms, - ) - for obj in dataset_dict.pop("annotations") - if obj.get("iscrowd", 0) == 0 - ] - instances = utils.annotations_to_instances(annos, image_shape) - - if len(annos) and "densepose" in annos[0]: - gt_densepose = [obj["densepose"] for obj in annos] - instances.gt_densepose = DensePoseList(gt_densepose, instances.gt_boxes, image_shape) - - dataset_dict["instances"] = instances[instances.gt_boxes.nonempty()] - return dataset_dict - - def _transform_densepose(self, annotation, transforms): - if not self.densepose_on: - return annotation - - # Handle densepose annotations - is_valid, reason_not_valid = DensePoseDataRelative.validate_annotation(annotation) - if is_valid: - densepose_data = DensePoseDataRelative(annotation, cleanup=True) - densepose_data.apply_transform(transforms, self.densepose_transform_data) - annotation["densepose"] = densepose_data - else: - # logger = logging.getLogger(__name__) - # logger.debug("Could not load DensePose annotation: {}".format(reason_not_valid)) - DensePoseDataRelative.cleanup_annotation(annotation) - # NOTE: annotations for certain instances may be unavailable. - # 'None' is accepted by the DensePostList data structure. - annotation["densepose"] = None - return annotation diff --git a/spaces/CVPR/LIVE/model_download/yolov5_model_p6_all.sh b/spaces/CVPR/LIVE/model_download/yolov5_model_p6_all.sh deleted file mode 100644 index dfe8d9014e46cf8f7df244095d0115df55e0a209..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/model_download/yolov5_model_p6_all.sh +++ /dev/null @@ -1,8 +0,0 @@ -cd ./yolov5 - -# 下载YOLOv5模型 -wget -c -t 0 https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5n6.pt -wget -c -t 0 https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s6.pt -wget -c -t 0 https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5m6.pt -wget -c -t 0 https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5l6.pt -wget -c -t 0 https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5x6.pt \ No newline at end of file diff --git a/spaces/CVPR/LIVE/thrust/internal/test/thrust_nightly.pl b/spaces/CVPR/LIVE/thrust/internal/test/thrust_nightly.pl deleted file mode 100644 index 61e03bda4b7ca6a34fbf63bfc4383d6dbfe60445..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/internal/test/thrust_nightly.pl +++ /dev/null @@ -1,600 +0,0 @@ -#! /usr/bin/perl - -############################################################################### -# Copyright (c) 2018 NVIDIA Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -############################################################################### - -use strict; -use warnings; - -print(`perl --version`); - -use Getopt::Long; -use Cwd; -use Cwd "abs_path"; -use Config; # For signal names and numbers. -use IPC::Open2; -use File::Temp; -use POSIX "strftime"; - -my $have_time_hi_res = 0; - -if (eval { require Time::HiRes }) -{ - printf("#### CONFIG timestamp `gettimeofday`\n"); - - import Time::HiRes "gettimeofday"; - - $have_time_hi_res = 1; -} else { - printf("#### CONFIG timestamp `time`\n"); -} - -sub timestamp() -{ - if ($have_time_hi_res) { - return gettimeofday(); - } else { - return time(); - } -} - -my %CmdLineOption; -my $arch = ""; -my $abi = ""; -my $os = ""; -my $build = "release"; -my $bin_path; -my $filecheck_path; -my $filecheck_data_path = "internal/test"; -my $timeout_min = 15; - -# https://stackoverflow.com/questions/29862178/name-of-signal-number-2 -my @sig_names; -@sig_names[ split ' ', $Config{sig_num} ] = split ' ', $Config{sig_name}; -my %sig_nums; -@sig_nums{ split ' ', $Config{sig_name} } = split ' ', $Config{sig_num}; - -if (`uname` =~ m/CYGWIN/) { - $os = "win32"; -} elsif ($^O eq "MSWin32") { - $os = "win32"; -} else { - $os = `uname`; - chomp($os); -} - -if ($os eq "win32") { - $ENV{'PROCESSOR_ARCHITECTURE'} ||= ""; - $ENV{'PROCESSOR_ARCHITEW6432'} ||= ""; - - if ((lc($ENV{PROCESSOR_ARCHITECTURE}) ne "x86") || - (lc($ENV{PROCESSOR_ARCHITECTURE}) eq "amd64") || - (lc($ENV{PROCESSOR_ARCHITEW6432}) eq "amd64")) { - $arch = "x86_64"; - } else { - $arch = "i686"; - } -} else { - $arch = `uname -m`; - chomp($arch); -} - -sub usage() -{ - printf("Usage: thrust_nightly.pl \n"); - printf("Options:\n"); - printf(" -help : Print help message\n"); - printf(" -forcearch : i686|x86_64|ARMv7|aarch64 (default: $arch)\n"); - printf(" -forceabi : Specify abi to be used for arm (gnueabi|gnueabihf)\n"); - printf(" -forceos : win32|Linux|Darwin (default: $os)\n"); - printf(" -build : (default: debug)\n"); - printf(" -bin-path : Specify location of test binaries\n"); - printf(" -filecheck-path : Specify location of filecheck binary\n"); - printf(" -filecheck-data-path : Specify location of filecheck data (default: $filecheck_data_path)\n"); - printf(" -timeout-min : timeout in minutes for each individual test\n"); -} - -GetOptions(\%CmdLineOption, - 'help' => sub { usage() and exit 0 }, - "forcearch=s" => \$arch, - "forceabi=s" => \$abi, - "forceos=s" => \$os, - "build=s" => \$build, - "bin-path=s" => \$bin_path, - "filecheck-path=s" => \$filecheck_path, - "filecheck-data-path=s" => \$filecheck_data_path, - "timeout-min=i" => \$timeout_min, - ); - -my $pwd = getcwd(); -my $bin_path_root = abs_path ("${pwd}/.."); - -if ($arch eq "ARMv7") { - if ($abi eq "") { - $abi = "_gnueabi"; #Use default abi for arm if not specified - } - else { - $abi = "_${abi}"; - } -} -else { - $abi = ""; #Ignore abi for architectures other than arm -} - -my $uname = ""; -$uname = $arch; -chomp($uname); - -if (not $bin_path) { - $bin_path = "${bin_path_root}/bin/${uname}_${os}${abi}_${build}"; -} - -if (not $filecheck_path) { - $filecheck_path = "${bin_path}/nvvm/tools"; -} - -sub process_return_code { - my ($name, $ret, $msg) = @_; - - if ($ret != 0) { - my $signal = $ret & 127; - my $app_exit = $ret >> 8; - my $dumped_core = $ret & 0x80; - if (($app_exit != 0) && ($app_exit != 0)) { - if ($msg ne "") { - printf("#### ERROR $name exited with return value $app_exit. $msg\n"); - } else { - printf("#### ERROR $name exited with return value $app_exit.\n"); - } - } - if ($signal != 0) { - if ($msg ne "") { - printf("#### ERROR $name received signal SIG$sig_names[$signal] ($signal). $msg\n"); - } else { - printf("#### ERROR $name received signal SIG$sig_names[$signal] ($signal).\n"); - } - if ($sig_nums{'INT'} eq $signal) { - die("Terminating testing due to SIGINT."); - } - } - if ($dumped_core != 0) { - if ($msg ne "") { - printf("#### ERROR $name generated a core dump. $msg\n"); - } else { - printf("#### ERROR $name generated a core dump.\n"); - } - } - } -} - -my $have_filecheck = 1; - -sub filecheck_sanity { - my $filecheck_cmd = "$filecheck_path/FileCheck $filecheck_data_path/thrust.sanity.filecheck"; - - my $filecheck_pid = open(my $filecheck_stdin, "|-", "$filecheck_cmd 2>&1"); - - print $filecheck_stdin "SANITY"; - - my $filecheck_ret = 0; - if (close($filecheck_stdin) == 0) - { - $filecheck_ret = $?; - } - - if ($filecheck_ret == 0) { - printf("#### SANE FileCheck\n"); - } else { - # Use a temporary file to send the output to - # FileCheck so we can get the output this time, - # because Perl and bidirectional pipes suck. - my $tmp = File::Temp->new(); - my $tmp_filename = $tmp->filename; - print $tmp "SANITY"; - - printf("********************************************************************************\n"); - print `$filecheck_cmd -input-file $tmp_filename`; - printf("********************************************************************************\n"); - - process_return_code("FileCheck Sanity", $filecheck_ret, ""); - printf("#### INSANE FileCheck\n"); - - $have_filecheck = 0; - } -} - -# Wrapper for system that logs the commands so you can see what it did -sub run_cmd { - my ($cmd) = @_; - my $ret = 0; - my @executable; - my @output; - my $syst_cmd; - - my $start = timestamp(); - eval { - local $SIG{ALRM} = sub { die("Command timed out (received SIGALRM).\n") }; - alarm (60 * $timeout_min); - $syst_cmd = $cmd; - - @executable = split(' ', $syst_cmd, 2); - - open(my $child, "-|", "$syst_cmd") or die("Could not execute $syst_cmd.\n"); - - if ($child) - { - @output = <$child>; - } - - if (close($child) == 0) - { - $ret = $?; - } - - alarm 0; - }; - my $elapsed = timestamp() - $start; - - if ($@) { - printf("\n#### ERROR Command timeout reached, killing $executable[0].\n"); - system("killall ".$executable[0]); - return ($sig_nums{'KILL'}, $elapsed, @output); - } - - return ($ret, $elapsed, @output); -} - -sub current_time -{ - return strftime("%x %X %Z", localtime()); -} - -my $failures = 0; -my $known_failures = 0; -my $errors = 0; -my $passes = 0; - -sub run_examples { - # Get list of tests in binary folder. - my $dir = cwd(); - chdir $bin_path; - my @examplelist; - if ($os eq "win32") - { - @examplelist = glob('thrust.example.*.exe'); - } else { - @examplelist = glob('thrust.example.*'); - } - - chdir $dir; - - my $test; - foreach $test (@examplelist) - { - my $test_exe = $test; - - # Ignore FileCheck files. - if ($test =~ /[.]filecheck$/) - { - next; - } - - if ($os eq "win32") - { - $test =~ s/\.exe//g; - } - - # Check the test actually exists. - if (!-e "${bin_path}/${test_exe}") - { - next; - } - - my $cmd = "${bin_path}/${test_exe} --verbose 2>&1"; - - printf("&&&& RUNNING $test\n"); - printf("#### CURRENT_TIME " . current_time() . "\n"); - - my ($ret, $elapsed, @output) = run_cmd($cmd); - - printf("********************************************************************************\n"); - print @output; - printf("********************************************************************************\n"); - - if ($ret != 0) { - process_return_code($test, $ret, "Example crash?"); - printf("&&&& FAILED $test\n"); - printf("#### WALLTIME $test %.2f [s]\n", $elapsed); - $errors = $errors + 1; - } else { - printf("&&&& PASSED $test\n"); - printf("#### WALLTIME $test %.2f [s]\n", $elapsed); - $passes = $passes + 1; - - if ($have_filecheck) { - # Check output with LLVM FileCheck. - - printf("&&&& RUNNING FileCheck $test\n"); - - if (-f "${filecheck_data_path}/${test}.filecheck") { - # If the filecheck file is empty, don't use filecheck, just - # check if the output file is also empty. - if (-z "${filecheck_data_path}/${test}.filecheck") { - if (join("", @output) eq "") { - printf("&&&& PASSED FileCheck $test\n"); - $passes = $passes + 1; - } else { - printf("#### ERROR Output received but not expected.\n"); - printf("&&&& FAILED FileCheck $test\n"); - $failures = $failures + 1; - } - } else { - my $filecheck_cmd = "$filecheck_path/FileCheck $filecheck_data_path/$test.filecheck"; - - my $filecheck_pid = open(my $filecheck_stdin, "|-", "$filecheck_cmd 2>&1"); - - print $filecheck_stdin @output; - - my $filecheck_ret = 0; - if (close($filecheck_stdin) == 0) - { - $filecheck_ret = $?; - } - - if ($filecheck_ret == 0) { - printf("&&&& PASSED FileCheck $test\n"); - $passes = $passes + 1; - } else { - # Use a temporary file to send the output to - # FileCheck so we can get the output this time, - # because Perl and bidirectional pipes suck. - my $tmp = File::Temp->new(); - my $tmp_filename = $tmp->filename; - print $tmp @output; - - printf("********************************************************************************\n"); - print `$filecheck_cmd -input-file $tmp_filename`; - printf("********************************************************************************\n"); - - process_return_code("FileCheck $test", $filecheck_ret, ""); - printf("&&&& FAILED FileCheck $test\n"); - $failures = $failures + 1; - } - } - } else { - printf("#### ERROR $test has no FileCheck comparison.\n"); - printf("&&&& FAILED FileCheck $test\n"); - $errors = $errors + 1; - } - } - } - printf("\n"); - } -} - -sub run_unit_tests { - # Get list of tests in binary folder. - my $dir = cwd(); - chdir $bin_path; - my @unittestlist; - if ($os eq "win32") - { - @unittestlist = glob('thrust.test.*.exe'); - } else { - @unittestlist = glob('thrust.test.*'); - } - chdir $dir; - - my $test; - foreach $test (@unittestlist) - { - my $test_exe = $test; - - # Ignore FileCheck files. - if ($test =~ /[.]filecheck$/) - { - next; - } - - if ($os eq "win32") - { - $test =~ s/\.exe//g; - } - - # Check the test actually exists. - if (!-e "${bin_path}/${test_exe}") - { - next; - } - - # Check the test actually exists - next unless (-e "${bin_path}/${test_exe}"); - - my $cmd = "${bin_path}/${test_exe} --verbose 2>&1"; - - printf("&&&& RUNNING $test\n"); - printf("#### CURRENT_TIME " . current_time() . "\n"); - - my ($ret, $elapsed, @output) = run_cmd($cmd); - - printf("********************************************************************************\n"); - print @output; - printf("********************************************************************************\n"); - my $fail = 0; - my $known_fail = 0; - my $error = 0; - my $pass = 0; - my $found_totals = 0; - foreach my $line (@output) - { - if (($fail, $known_fail, $error, $pass) = $line =~ /Totals: ([0-9]+) failures, ([0-9]+) known failures, ([0-9]+) errors, and ([0-9]+) passes[.]/igs) { - $found_totals = 1; - $failures = $failures + $fail; - $known_failures = $known_failures + $known_fail; - $errors = $errors + $error; - $passes = $passes + $pass; - last; - } else { - $fail = 0; - $known_fail = 0; - $error = 0; - $pass = 0; - } - } - if ($ret == 0) { - if ($found_totals == 0) { - $errors = $errors + 1; - printf("#### ERROR $test returned 0 and no summary line was found. Invalid test?\n"); - printf("&&&& FAILED $test\n"); - printf("#### WALLTIME $test %.2f [s]\n", $elapsed); - } - else { - if ($fail != 0 or $error != 0) { - $errors = $errors + 1; - printf("#### ERROR $test returned 0 and had failures or errors. Test driver error?\n"); - printf("&&&& FAILED $test\n"); - printf("#### WALLTIME $test %.2f [s]\n", $elapsed); - } elsif ($known_fail == 0 and $pass == 0) { - printf("#### DISABLED $test returned 0 and had no failures, known failures, errors or passes.\n"); - printf("&&&& PASSED $test\n"); - printf("#### WALLTIME $test %.2f [s]\n", $elapsed); - } else { - printf("&&&& PASSED $test\n"); - printf("#### WALLTIME $test %.2f [s]\n", $elapsed); - - if ($have_filecheck) { - # Check output with LLVM FileCheck if the test has a FileCheck input. - - if (-f "${filecheck_data_path}/${test}.filecheck") { - printf("&&&& RUNNING FileCheck $test\n"); - - # If the filecheck file is empty, don't use filecheck, - # just check if the output file is also empty. - if (! -z "${filecheck_data_path}/${test}.filecheck") { - if (@output) { - printf("&&&& PASSED FileCheck $test\n"); - $passes = $passes + 1; - } else { - printf("#### Output received but not expected.\n"); - printf("&&&& FAILED FileCheck $test\n"); - $failures = $failures + 1; - } - } else { - my $filecheck_cmd = "$filecheck_path/FileCheck $filecheck_data_path/$test.filecheck"; - - my $filecheck_pid = open(my $filecheck_stdin, "|-", "$filecheck_cmd 2>&1"); - - print $filecheck_stdin @output; - - my $filecheck_ret = 0; - if (close($filecheck_stdin) == 0) - { - $filecheck_ret = $?; - } - - if ($filecheck_ret == 0) { - printf("&&&& PASSED FileCheck $test\n"); - $passes = $passes + 1; - } else { - # Use a temporary file to send the output to - # FileCheck so we can get the output this time, - # because Perl and bidirectional pipes suck. - my $tmp = File::Temp->new(); - my $tmp_filename = $tmp->filename; - print $tmp @output; - - printf("********************************************************************************\n"); - print `$filecheck_cmd -input-file $tmp_filename`; - printf("********************************************************************************\n"); - - process_return_code("FileCheck $test", $filecheck_ret, ""); - printf("&&&& FAILED FileCheck $test\n"); - $failures = $failures + 1; - } - } - } - } - } - } - } else { - $errors = $errors + 1; - process_return_code($test, $ret, "Test crash?"); - printf("&&&& FAILED $test\n"); - printf("#### WALLTIME $test %.2f [s]\n", $elapsed); - } - printf("\n"); - } -} - -sub dvs_summary { - my $dvs_score = 0; - my $denominator = $failures + $known_failures + $errors + $passes; - if ($denominator == 0) { - $dvs_score = 0; - } - else { - $dvs_score = 100 * (($passes + $known_failures) / $denominator); - } - - printf("\n"); - - printf("%*%*%*%* FA!LUR3S $failures\n"); - printf("%*%*%*%* KN0WN FA!LUR3S $known_failures\n"); - printf("%*%*%*%* 3RR0RS $errors\n"); - printf("%*%*%*%* PASS3S $passes\n"); - - printf("\n"); - - printf("CUDA DVS BASIC SANITY SCORE : %.1f\n", $dvs_score); - - if ($failures + $errors > 0) { - exit(1); - } -} - -############################################################################### - -printf("#### CONFIG arch `%s`\n", $arch); -printf("#### CONFIG abi `%s`\n", $abi); -printf("#### CONFIG os `%s`\n", $os); -printf("#### CONFIG build `%s`\n", $build); -printf("#### CONFIG bin_path `%s`\n", $bin_path); -printf("#### CONFIG have_filecheck `$have_filecheck`\n"); -printf("#### CONFIG filecheck_path `%s`\n", $filecheck_path); -printf("#### CONFIG filecheck_data_path `%s`\n", $filecheck_data_path); -printf("#### CONFIG have_time_hi_res `$have_time_hi_res`\n"); -printf("#### CONFIG timeout_min `%s`\n", $timeout_min); -printf("#### ENV PATH `%s`\n", defined $ENV{'PATH'} ? $ENV{'PATH'} : ''); -printf("#### ENV LD_LIBRARY_PATH `%s`\n", defined $ENV{'LD_LIBRARY_PATH'} ? $ENV{'LD_LIBRARY_PATH'} : ''); - -printf("\n"); - -filecheck_sanity(); - -printf("\n"); - -my $START_TIME = current_time(); - -run_examples(); -run_unit_tests(); - -my $STOP_TIME = current_time(); - -printf("#### START_TIME $START_TIME\n"); -printf("#### STOP_TIME $STOP_TIME\n"); - -dvs_summary(); - diff --git a/spaces/CVPR/LIVE/thrust/thrust/detail/type_traits/iterator/is_output_iterator.h b/spaces/CVPR/LIVE/thrust/thrust/detail/type_traits/iterator/is_output_iterator.h deleted file mode 100644 index d6801305be01b903d7a3b9a8bd45101f709543f4..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/detail/type_traits/iterator/is_output_iterator.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include -#include -#include - -namespace thrust -{ - -namespace detail -{ - - -template - struct is_void_like - : thrust::detail::or_< - thrust::detail::is_void, - thrust::detail::is_same - > -{}; // end is_void_like - - -template - struct lazy_is_void_like - : is_void_like -{}; // end lazy_is_void_like - - -// XXX this meta function should first check that T is actually an iterator -// -// if thrust::iterator_value is defined and thrust::iterator_value::type == void -// return false -// else -// return true -template - struct is_output_iterator - : eval_if< - is_metafunction_defined >::value, - lazy_is_void_like >, - thrust::detail::true_type - >::type -{ -}; // end is_output_iterator - -} // end detail - -} // end thrust - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/extrema.h b/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/extrema.h deleted file mode 100644 index 5fbb8c55c287cb15330d991376c3c10d75829f25..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/extrema.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system inherits extrema algorithms -#include - diff --git a/spaces/Chaitanya01/InvestingPlatform/setup.sh b/spaces/Chaitanya01/InvestingPlatform/setup.sh deleted file mode 100644 index c8650a8b74a58d9a5f53b185fd711c5668e1cd52..0000000000000000000000000000000000000000 --- a/spaces/Chaitanya01/InvestingPlatform/setup.sh +++ /dev/null @@ -1,13 +0,0 @@ -mkdir -p ~/.streamlit/ - -echo "\ -[general]\n\ -email = \"your-email@domain.com\"\n\ -" > ~/.streamlit/credentials.toml - -echo "\ -[server]\n\ -headless = true\n\ -enableCORS=false\n\ -port = $PORT\n\ -" > ~/.streamlit/config.toml \ No newline at end of file diff --git a/spaces/Chomkwoy/Nilkessye/cpool_new/setup.py b/spaces/Chomkwoy/Nilkessye/cpool_new/setup.py deleted file mode 100644 index 254eb0e67425f3fb5719a562c17fe7bc26d8debb..0000000000000000000000000000000000000000 --- a/spaces/Chomkwoy/Nilkessye/cpool_new/setup.py +++ /dev/null @@ -1,14 +0,0 @@ -from setuptools import setup -from torch.utils.cpp_extension import BuildExtension, CppExtension - -setup(name="cpools", - ext_modules=[ - CppExtension("top_pool", ["src/top_pool.cpp"]), - CppExtension("bottom_pool", ["src/bottom_pool.cpp"]), - CppExtension("left_pool", ["src/left_pool.cpp"]), - CppExtension("right_pool", ["src/right_pool.cpp"]) - ], - cmdclass={ - "build_ext": BuildExtension - } - ) diff --git a/spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/admin.py b/spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/admin.py deleted file mode 100644 index 8c38f3f3dad51e4585f3984282c2a4bec5349c1e..0000000000000000000000000000000000000000 --- a/spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/admin.py +++ /dev/null @@ -1,3 +0,0 @@ -from django.contrib import admin - -# Register your models here. diff --git a/spaces/CognitiveLabs/Research-Assistant/statics/style.py b/spaces/CognitiveLabs/Research-Assistant/statics/style.py deleted file mode 100644 index caaee0acc5b20c4ea9d5795c64fa5125f3da6de0..0000000000000000000000000000000000000000 --- a/spaces/CognitiveLabs/Research-Assistant/statics/style.py +++ /dev/null @@ -1,117 +0,0 @@ -css = """ - .top-bar { - padding-bottom: 10px; - background-color: transparent; - } - - .top-bar .in-bar-title { - background-image: linear-gradient(45deg, #8B5FBF, #D6C6E1, #ffffff); - -webkit-background-clip: text; - background-clip: text; - -webkit-text-fill-color: transparent; - font-family: Gelion, "Open Sans", Helvetica, "Helvetica Neue", Arial; - font-size: 2rem; - font-weight: bold; - text-align: left; - display: block; - } - - .top-bar .in-bar-subtitle { - font-family: 'Crimson Pro'; - color: #878787; - font-size: 1.4rem; - margin-top: -5px; - display: block; - } - - .main { - max-width: 800px; - min-width: min(100%, 800px); - align-self: center; - } - - .output { - padding: 10px; - min-height: 300px; - border: 1.5px solid #AC7DD280; - border-radius: 10px; - margin-bottom: 10px; - transition: opacity .1s ease-in-out; - background: var(--block-background-fill); - } - - #history { - padding: 10px !important; - border: 1.5px dashed #AC7DD2 !important; - border-radius: 10px !important; - } - - #primary-btn { - border: 1.5px solid #AC7DD2; - font-size: 20px; - } - - summary { - font-size: 14px; - font-weight: bold; - } - - #history_box { - border-bottom: 1.5px dashed #9A73B5; - padding: 10px; - } - - .tab-nav { - border-bottom: 1.5px solid #9A73B5 !important; - } - - button.selected { - border: 1.5px solid #9A73B5 !important; - border-bottom: none !important; - } - - .tabitem { - border: 1.5px solid #9A73B5 !important; - border-top: none !important; - } -""" - -# #809A73B5 - -top_bar = """ - - - - - - - -
      -
      - - AI Research Assistant - - - - - Your personal free GPT researcher -
      -
      - -""" - -report_html = """ - # Report -""" - -english_polishing_html = """ - # Polished Result -""" - -history_result_html = """ - # History Result -""" - -literature_review_html = """ - under construction... -""" \ No newline at end of file diff --git a/spaces/Cong723/gpt-academic-public/crazy_functions/test_project/python/dqn/__init__.py b/spaces/Cong723/gpt-academic-public/crazy_functions/test_project/python/dqn/__init__.py deleted file mode 100644 index 4ae42872c812a7c8a18dff002086c7e6e935f580..0000000000000000000000000000000000000000 --- a/spaces/Cong723/gpt-academic-public/crazy_functions/test_project/python/dqn/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from stable_baselines3.dqn.dqn import DQN -from stable_baselines3.dqn.policies import CnnPolicy, MlpPolicy diff --git a/spaces/Cropinky/gpt2-rap-songs/README.md b/spaces/Cropinky/gpt2-rap-songs/README.md deleted file mode 100644 index 55d6ea113508466b1672e4b604f52ac122fa563d..0000000000000000000000000000000000000000 --- a/spaces/Cropinky/gpt2-rap-songs/README.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Gpt2 Rap Song generator -emoji: 🎤 -colorFrom: red -colorTo: black -sdk: streamlit -app_file: app.py -pinned: true ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/processing_utils.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/processing_utils.py deleted file mode 100644 index e4fecdf4fe0bd7cf4a60b357aa56f1ebca26a40c..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/processing_utils.py +++ /dev/null @@ -1,546 +0,0 @@ -from __future__ import annotations - -import base64 -import json -import logging -import os -import shutil -import subprocess -import tempfile -import warnings -from io import BytesIO -from pathlib import Path - -import numpy as np -from gradio_client import utils as client_utils -from PIL import Image, ImageOps, PngImagePlugin - -from gradio import wasm_utils - -if not wasm_utils.IS_WASM: - # TODO: Support ffmpeg on Wasm - from ffmpy import FFmpeg, FFprobe, FFRuntimeError - -with warnings.catch_warnings(): - warnings.simplefilter("ignore") # Ignore pydub warning if ffmpeg is not installed - from pydub import AudioSegment - -log = logging.getLogger(__name__) - -######################### -# GENERAL -######################### - - -def to_binary(x: str | dict) -> bytes: - """Converts a base64 string or dictionary to a binary string that can be sent in a POST.""" - if isinstance(x, dict): - if x.get("data"): - base64str = x["data"] - else: - base64str = client_utils.encode_url_or_file_to_base64(x["name"]) - else: - base64str = x - return base64.b64decode(extract_base64_data(base64str)) - - -def extract_base64_data(x: str) -> str: - """Just extracts the base64 data from a general base64 string.""" - return x.rsplit(",", 1)[-1] - - -######################### -# IMAGE PRE-PROCESSING -######################### - - -def decode_base64_to_image(encoding: str) -> Image.Image: - image_encoded = extract_base64_data(encoding) - img = Image.open(BytesIO(base64.b64decode(image_encoded))) - try: - if hasattr(ImageOps, "exif_transpose"): - img = ImageOps.exif_transpose(img) - except Exception: - log.warning( - "Failed to transpose image %s based on EXIF data.", - img, - exc_info=True, - ) - return img - - -def encode_plot_to_base64(plt): - with BytesIO() as output_bytes: - plt.savefig(output_bytes, format="png") - bytes_data = output_bytes.getvalue() - base64_str = str(base64.b64encode(bytes_data), "utf-8") - return "data:image/png;base64," + base64_str - - -def get_pil_metadata(pil_image): - # Copy any text-only metadata - metadata = PngImagePlugin.PngInfo() - for key, value in pil_image.info.items(): - if isinstance(key, str) and isinstance(value, str): - metadata.add_text(key, value) - - return metadata - - -def encode_pil_to_bytes(pil_image, format="png"): - with BytesIO() as output_bytes: - pil_image.save(output_bytes, format, pnginfo=get_pil_metadata(pil_image)) - return output_bytes.getvalue() - - -def encode_pil_to_base64(pil_image): - bytes_data = encode_pil_to_bytes(pil_image) - base64_str = str(base64.b64encode(bytes_data), "utf-8") - return "data:image/png;base64," + base64_str - - -def encode_array_to_base64(image_array): - with BytesIO() as output_bytes: - pil_image = Image.fromarray(_convert(image_array, np.uint8, force_copy=False)) - pil_image.save(output_bytes, "PNG") - bytes_data = output_bytes.getvalue() - base64_str = str(base64.b64encode(bytes_data), "utf-8") - return "data:image/png;base64," + base64_str - - -def resize_and_crop(img, size, crop_type="center"): - """ - Resize and crop an image to fit the specified size. - args: - size: `(width, height)` tuple. Pass `None` for either width or height - to only crop and resize the other. - crop_type: can be 'top', 'middle' or 'bottom', depending on this - value, the image will cropped getting the 'top/left', 'middle' or - 'bottom/right' of the image to fit the size. - raises: - ValueError: if an invalid `crop_type` is provided. - """ - if crop_type == "top": - center = (0, 0) - elif crop_type == "center": - center = (0.5, 0.5) - else: - raise ValueError - - resize = list(size) - if size[0] is None: - resize[0] = img.size[0] - if size[1] is None: - resize[1] = img.size[1] - return ImageOps.fit(img, resize, centering=center) # type: ignore - - -################## -# Audio -################## - - -def audio_from_file(filename, crop_min=0, crop_max=100): - try: - audio = AudioSegment.from_file(filename) - except FileNotFoundError as e: - isfile = Path(filename).is_file() - msg = ( - f"Cannot load audio from file: `{'ffprobe' if isfile else filename}` not found." - + " Please install `ffmpeg` in your system to use non-WAV audio file formats" - " and make sure `ffprobe` is in your PATH." - if isfile - else "" - ) - raise RuntimeError(msg) from e - if crop_min != 0 or crop_max != 100: - audio_start = len(audio) * crop_min / 100 - audio_end = len(audio) * crop_max / 100 - audio = audio[audio_start:audio_end] - data = np.array(audio.get_array_of_samples()) - if audio.channels > 1: - data = data.reshape(-1, audio.channels) - return audio.frame_rate, data - - -def audio_to_file(sample_rate, data, filename, format="wav"): - if format == "wav": - data = convert_to_16_bit_wav(data) - audio = AudioSegment( - data.tobytes(), - frame_rate=sample_rate, - sample_width=data.dtype.itemsize, - channels=(1 if len(data.shape) == 1 else data.shape[1]), - ) - file = audio.export(filename, format=format) - file.close() # type: ignore - - -def convert_to_16_bit_wav(data): - # Based on: https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.wavfile.write.html - warning = "Trying to convert audio automatically from {} to 16-bit int format." - if data.dtype in [np.float64, np.float32, np.float16]: - warnings.warn(warning.format(data.dtype)) - data = data / np.abs(data).max() - data = data * 32767 - data = data.astype(np.int16) - elif data.dtype == np.int32: - warnings.warn(warning.format(data.dtype)) - data = data / 65538 - data = data.astype(np.int16) - elif data.dtype == np.int16: - pass - elif data.dtype == np.uint16: - warnings.warn(warning.format(data.dtype)) - data = data - 32768 - data = data.astype(np.int16) - elif data.dtype == np.uint8: - warnings.warn(warning.format(data.dtype)) - data = data * 257 - 32768 - data = data.astype(np.int16) - else: - raise ValueError( - "Audio data cannot be converted automatically from " - f"{data.dtype} to 16-bit int format." - ) - return data - - -################## -# OUTPUT -################## - - -def _convert(image, dtype, force_copy=False, uniform=False): - """ - Adapted from: https://github.com/scikit-image/scikit-image/blob/main/skimage/util/dtype.py#L510-L531 - - Convert an image to the requested data-type. - Warnings are issued in case of precision loss, or when negative values - are clipped during conversion to unsigned integer types (sign loss). - Floating point values are expected to be normalized and will be clipped - to the range [0.0, 1.0] or [-1.0, 1.0] when converting to unsigned or - signed integers respectively. - Numbers are not shifted to the negative side when converting from - unsigned to signed integer types. Negative values will be clipped when - converting to unsigned integers. - Parameters - ---------- - image : ndarray - Input image. - dtype : dtype - Target data-type. - force_copy : bool, optional - Force a copy of the data, irrespective of its current dtype. - uniform : bool, optional - Uniformly quantize the floating point range to the integer range. - By default (uniform=False) floating point values are scaled and - rounded to the nearest integers, which minimizes back and forth - conversion errors. - .. versionchanged :: 0.15 - ``_convert`` no longer warns about possible precision or sign - information loss. See discussions on these warnings at: - https://github.com/scikit-image/scikit-image/issues/2602 - https://github.com/scikit-image/scikit-image/issues/543#issuecomment-208202228 - https://github.com/scikit-image/scikit-image/pull/3575 - References - ---------- - .. [1] DirectX data conversion rules. - https://msdn.microsoft.com/en-us/library/windows/desktop/dd607323%28v=vs.85%29.aspx - .. [2] Data Conversions. In "OpenGL ES 2.0 Specification v2.0.25", - pp 7-8. Khronos Group, 2010. - .. [3] Proper treatment of pixels as integers. A.W. Paeth. - In "Graphics Gems I", pp 249-256. Morgan Kaufmann, 1990. - .. [4] Dirty Pixels. J. Blinn. In "Jim Blinn's corner: Dirty Pixels", - pp 47-57. Morgan Kaufmann, 1998. - """ - dtype_range = { - bool: (False, True), - np.bool_: (False, True), - np.bool8: (False, True), - float: (-1, 1), - np.float_: (-1, 1), - np.float16: (-1, 1), - np.float32: (-1, 1), - np.float64: (-1, 1), - } - - def _dtype_itemsize(itemsize, *dtypes): - """Return first of `dtypes` with itemsize greater than `itemsize` - Parameters - ---------- - itemsize: int - The data type object element size. - Other Parameters - ---------------- - *dtypes: - Any Object accepted by `np.dtype` to be converted to a data - type object - Returns - ------- - dtype: data type object - First of `dtypes` with itemsize greater than `itemsize`. - """ - return next(dt for dt in dtypes if np.dtype(dt).itemsize >= itemsize) - - def _dtype_bits(kind, bits, itemsize=1): - """Return dtype of `kind` that can store a `bits` wide unsigned int - Parameters: - kind: str - Data type kind. - bits: int - Desired number of bits. - itemsize: int - The data type object element size. - Returns - ------- - dtype: data type object - Data type of `kind` that can store a `bits` wide unsigned int - """ - - s = next( - i - for i in (itemsize,) + (2, 4, 8) - if bits < (i * 8) or (bits == (i * 8) and kind == "u") - ) - - return np.dtype(kind + str(s)) - - def _scale(a, n, m, copy=True): - """Scale an array of unsigned/positive integers from `n` to `m` bits. - Numbers can be represented exactly only if `m` is a multiple of `n`. - Parameters - ---------- - a : ndarray - Input image array. - n : int - Number of bits currently used to encode the values in `a`. - m : int - Desired number of bits to encode the values in `out`. - copy : bool, optional - If True, allocates and returns new array. Otherwise, modifies - `a` in place. - Returns - ------- - out : array - Output image array. Has the same kind as `a`. - """ - kind = a.dtype.kind - if n > m and a.max() < 2**m: - return a.astype(_dtype_bits(kind, m)) - elif n == m: - return a.copy() if copy else a - elif n > m: - # downscale with precision loss - if copy: - b = np.empty(a.shape, _dtype_bits(kind, m)) - np.floor_divide(a, 2 ** (n - m), out=b, dtype=a.dtype, casting="unsafe") - return b - else: - a //= 2 ** (n - m) - return a - elif m % n == 0: - # exact upscale to a multiple of `n` bits - if copy: - b = np.empty(a.shape, _dtype_bits(kind, m)) - np.multiply(a, (2**m - 1) // (2**n - 1), out=b, dtype=b.dtype) - return b - else: - a = a.astype(_dtype_bits(kind, m, a.dtype.itemsize), copy=False) - a *= (2**m - 1) // (2**n - 1) - return a - else: - # upscale to a multiple of `n` bits, - # then downscale with precision loss - o = (m // n + 1) * n - if copy: - b = np.empty(a.shape, _dtype_bits(kind, o)) - np.multiply(a, (2**o - 1) // (2**n - 1), out=b, dtype=b.dtype) - b //= 2 ** (o - m) - return b - else: - a = a.astype(_dtype_bits(kind, o, a.dtype.itemsize), copy=False) - a *= (2**o - 1) // (2**n - 1) - a //= 2 ** (o - m) - return a - - image = np.asarray(image) - dtypeobj_in = image.dtype - dtypeobj_out = np.dtype("float64") if dtype is np.floating else np.dtype(dtype) - dtype_in = dtypeobj_in.type - dtype_out = dtypeobj_out.type - kind_in = dtypeobj_in.kind - kind_out = dtypeobj_out.kind - itemsize_in = dtypeobj_in.itemsize - itemsize_out = dtypeobj_out.itemsize - - # Below, we do an `issubdtype` check. Its purpose is to find out - # whether we can get away without doing any image conversion. This happens - # when: - # - # - the output and input dtypes are the same or - # - when the output is specified as a type, and the input dtype - # is a subclass of that type (e.g. `np.floating` will allow - # `float32` and `float64` arrays through) - - if np.issubdtype(dtype_in, np.obj2sctype(dtype)): - if force_copy: - image = image.copy() - return image - - if kind_in in "ui": - imin_in = np.iinfo(dtype_in).min - imax_in = np.iinfo(dtype_in).max - if kind_out in "ui": - imin_out = np.iinfo(dtype_out).min # type: ignore - imax_out = np.iinfo(dtype_out).max # type: ignore - - # any -> binary - if kind_out == "b": - return image > dtype_in(dtype_range[dtype_in][1] / 2) - - # binary -> any - if kind_in == "b": - result = image.astype(dtype_out) - if kind_out != "f": - result *= dtype_out(dtype_range[dtype_out][1]) - return result - - # float -> any - if kind_in == "f": - if kind_out == "f": - # float -> float - return image.astype(dtype_out) - - if np.min(image) < -1.0 or np.max(image) > 1.0: - raise ValueError("Images of type float must be between -1 and 1.") - # floating point -> integer - # use float type that can represent output integer type - computation_type = _dtype_itemsize( - itemsize_out, dtype_in, np.float32, np.float64 - ) - - if not uniform: - if kind_out == "u": - image_out = np.multiply(image, imax_out, dtype=computation_type) # type: ignore - else: - image_out = np.multiply( - image, (imax_out - imin_out) / 2, dtype=computation_type # type: ignore - ) - image_out -= 1.0 / 2.0 - np.rint(image_out, out=image_out) - np.clip(image_out, imin_out, imax_out, out=image_out) # type: ignore - elif kind_out == "u": - image_out = np.multiply(image, imax_out + 1, dtype=computation_type) # type: ignore - np.clip(image_out, 0, imax_out, out=image_out) # type: ignore - else: - image_out = np.multiply( - image, (imax_out - imin_out + 1.0) / 2.0, dtype=computation_type # type: ignore - ) - np.floor(image_out, out=image_out) - np.clip(image_out, imin_out, imax_out, out=image_out) # type: ignore - return image_out.astype(dtype_out) - - # signed/unsigned int -> float - if kind_out == "f": - # use float type that can exactly represent input integers - computation_type = _dtype_itemsize( - itemsize_in, dtype_out, np.float32, np.float64 - ) - - if kind_in == "u": - # using np.divide or np.multiply doesn't copy the data - # until the computation time - image = np.multiply(image, 1.0 / imax_in, dtype=computation_type) # type: ignore - # DirectX uses this conversion also for signed ints - # if imin_in: - # np.maximum(image, -1.0, out=image) - else: - image = np.add(image, 0.5, dtype=computation_type) - image *= 2 / (imax_in - imin_in) # type: ignore - - return np.asarray(image, dtype_out) - - # unsigned int -> signed/unsigned int - if kind_in == "u": - if kind_out == "i": - # unsigned int -> signed int - image = _scale(image, 8 * itemsize_in, 8 * itemsize_out - 1) - return image.view(dtype_out) - else: - # unsigned int -> unsigned int - return _scale(image, 8 * itemsize_in, 8 * itemsize_out) - - # signed int -> unsigned int - if kind_out == "u": - image = _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out) - result = np.empty(image.shape, dtype_out) - np.maximum(image, 0, out=result, dtype=image.dtype, casting="unsafe") - return result - - # signed int -> signed int - if itemsize_in > itemsize_out: - return _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out - 1) - - image = image.astype(_dtype_bits("i", itemsize_out * 8)) - image -= imin_in # type: ignore - image = _scale(image, 8 * itemsize_in, 8 * itemsize_out, copy=False) - image += imin_out # type: ignore - return image.astype(dtype_out) - - -def ffmpeg_installed() -> bool: - if wasm_utils.IS_WASM: - # TODO: Support ffmpeg in WASM - return False - - return shutil.which("ffmpeg") is not None - - -def video_is_playable(video_filepath: str) -> bool: - """Determines if a video is playable in the browser. - - A video is playable if it has a playable container and codec. - .mp4 -> h264 - .webm -> vp9 - .ogg -> theora - """ - try: - container = Path(video_filepath).suffix.lower() - probe = FFprobe( - global_options="-show_format -show_streams -select_streams v -print_format json", - inputs={video_filepath: None}, - ) - output = probe.run(stderr=subprocess.PIPE, stdout=subprocess.PIPE) - output = json.loads(output[0]) - video_codec = output["streams"][0]["codec_name"] - return (container, video_codec) in [ - (".mp4", "h264"), - (".ogg", "theora"), - (".webm", "vp9"), - ] - # If anything goes wrong, assume the video can be played to not convert downstream - except (FFRuntimeError, IndexError, KeyError): - return True - - -def convert_video_to_playable_mp4(video_path: str) -> str: - """Convert the video to mp4. If something goes wrong return the original video.""" - try: - with tempfile.NamedTemporaryFile(delete=False) as tmp_file: - output_path = Path(video_path).with_suffix(".mp4") - shutil.copy2(video_path, tmp_file.name) - # ffmpeg will automatically use h264 codec (playable in browser) when converting to mp4 - ff = FFmpeg( - inputs={str(tmp_file.name): None}, - outputs={str(output_path): None}, - global_options="-y -loglevel quiet", - ) - ff.run() - except FFRuntimeError as e: - print(f"Error converting video to browser-playable format {str(e)}") - output_path = video_path - finally: - # Remove temp file - os.remove(tmp_file.name) # type: ignore - return str(output_path) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/_commit_api.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/_commit_api.py deleted file mode 100644 index 7cbcc0d765b7ecb1c53c9388868c85d15e119141..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/_commit_api.py +++ /dev/null @@ -1,632 +0,0 @@ -""" -Type definitions and utilities for the `create_commit` API -""" -import base64 -import io -import os -import warnings -from collections import defaultdict -from contextlib import contextmanager -from dataclasses import dataclass, field -from itertools import groupby -from pathlib import Path, PurePosixPath -from typing import TYPE_CHECKING, Any, BinaryIO, Dict, Iterable, Iterator, List, Optional, Tuple, Union - -from tqdm.contrib.concurrent import thread_map - -from huggingface_hub import get_session - -from .constants import ENDPOINT, HF_HUB_ENABLE_HF_TRANSFER -from .lfs import UploadInfo, lfs_upload, post_lfs_batch_info -from .utils import ( - EntryNotFoundError, - build_hf_headers, - chunk_iterable, - hf_raise_for_status, - logging, - tqdm_stream_file, - validate_hf_hub_args, -) -from .utils import tqdm as hf_tqdm -from .utils._typing import Literal - - -if TYPE_CHECKING: - from .hf_api import RepoFile - - -logger = logging.get_logger(__name__) - - -UploadMode = Literal["lfs", "regular"] - -# Max is 1,000 per request on the Hub for HfApi.list_files_info -# Otherwise we get: -# HfHubHTTPError: 413 Client Error: Payload Too Large for url: https://huggingface.co/api/datasets/xxx (Request ID: xxx)\n\ntoo many parameters -# See https://github.com/huggingface/huggingface_hub/issues/1503 -FETCH_LFS_BATCH_SIZE = 500 - - -@dataclass -class CommitOperationDelete: - """ - Data structure holding necessary info to delete a file or a folder from a repository - on the Hub. - - Args: - path_in_repo (`str`): - Relative filepath in the repo, for example: `"checkpoints/1fec34a/weights.bin"` - for a file or `"checkpoints/1fec34a/"` for a folder. - is_folder (`bool` or `Literal["auto"]`, *optional*) - Whether the Delete Operation applies to a folder or not. If "auto", the path - type (file or folder) is guessed automatically by looking if path ends with - a "/" (folder) or not (file). To explicitly set the path type, you can set - `is_folder=True` or `is_folder=False`. - """ - - path_in_repo: str - is_folder: Union[bool, Literal["auto"]] = "auto" - - def __post_init__(self): - self.path_in_repo = _validate_path_in_repo(self.path_in_repo) - - if self.is_folder == "auto": - self.is_folder = self.path_in_repo.endswith("/") - if not isinstance(self.is_folder, bool): - raise ValueError( - f"Wrong value for `is_folder`. Must be one of [`True`, `False`, `'auto'`]. Got '{self.is_folder}'." - ) - - -@dataclass -class CommitOperationCopy: - """ - Data structure holding necessary info to copy a file in a repository on the Hub. - - Limitations: - - Only LFS files can be copied. To copy a regular file, you need to download it locally and re-upload it - - Cross-repository copies are not supported. - - Note: you can combine a [`CommitOperationCopy`] and a [`CommitOperationDelete`] to rename an LFS file on the Hub. - - Args: - src_path_in_repo (`str`): - Relative filepath in the repo of the file to be copied, e.g. `"checkpoints/1fec34a/weights.bin"`. - path_in_repo (`str`): - Relative filepath in the repo where to copy the file, e.g. `"checkpoints/1fec34a/weights_copy.bin"`. - src_revision (`str`, *optional*): - The git revision of the file to be copied. Can be any valid git revision. - Default to the target commit revision. - """ - - src_path_in_repo: str - path_in_repo: str - src_revision: Optional[str] = None - - def __post_init__(self): - self.src_path_in_repo = _validate_path_in_repo(self.src_path_in_repo) - self.path_in_repo = _validate_path_in_repo(self.path_in_repo) - - -@dataclass -class CommitOperationAdd: - """ - Data structure holding necessary info to upload a file to a repository on the Hub. - - Args: - path_in_repo (`str`): - Relative filepath in the repo, for example: `"checkpoints/1fec34a/weights.bin"` - path_or_fileobj (`str`, `Path`, `bytes`, or `BinaryIO`): - Either: - - a path to a local file (as `str` or `pathlib.Path`) to upload - - a buffer of bytes (`bytes`) holding the content of the file to upload - - a "file object" (subclass of `io.BufferedIOBase`), typically obtained - with `open(path, "rb")`. It must support `seek()` and `tell()` methods. - - Raises: - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) - If `path_or_fileobj` is not one of `str`, `Path`, `bytes` or `io.BufferedIOBase`. - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) - If `path_or_fileobj` is a `str` or `Path` but not a path to an existing file. - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) - If `path_or_fileobj` is a `io.BufferedIOBase` but it doesn't support both - `seek()` and `tell()`. - """ - - path_in_repo: str - path_or_fileobj: Union[str, Path, bytes, BinaryIO] - upload_info: UploadInfo = field(init=False, repr=False) - - def __post_init__(self) -> None: - """Validates `path_or_fileobj` and compute `upload_info`.""" - self.path_in_repo = _validate_path_in_repo(self.path_in_repo) - - # Validate `path_or_fileobj` value - if isinstance(self.path_or_fileobj, Path): - self.path_or_fileobj = str(self.path_or_fileobj) - if isinstance(self.path_or_fileobj, str): - path_or_fileobj = os.path.normpath(os.path.expanduser(self.path_or_fileobj)) - if not os.path.isfile(path_or_fileobj): - raise ValueError(f"Provided path: '{path_or_fileobj}' is not a file on the local file system") - elif not isinstance(self.path_or_fileobj, (io.BufferedIOBase, bytes)): - # ^^ Inspired from: https://stackoverflow.com/questions/44584829/how-to-determine-if-file-is-opened-in-binary-or-text-mode - raise ValueError( - "path_or_fileobj must be either an instance of str, bytes or" - " io.BufferedIOBase. If you passed a file-like object, make sure it is" - " in binary mode." - ) - if isinstance(self.path_or_fileobj, io.BufferedIOBase): - try: - self.path_or_fileobj.tell() - self.path_or_fileobj.seek(0, os.SEEK_CUR) - except (OSError, AttributeError) as exc: - raise ValueError( - "path_or_fileobj is a file-like object but does not implement seek() and tell()" - ) from exc - - # Compute "upload_info" attribute - if isinstance(self.path_or_fileobj, str): - self.upload_info = UploadInfo.from_path(self.path_or_fileobj) - elif isinstance(self.path_or_fileobj, bytes): - self.upload_info = UploadInfo.from_bytes(self.path_or_fileobj) - else: - self.upload_info = UploadInfo.from_fileobj(self.path_or_fileobj) - - @contextmanager - def as_file(self, with_tqdm: bool = False) -> Iterator[BinaryIO]: - """ - A context manager that yields a file-like object allowing to read the underlying - data behind `path_or_fileobj`. - - Args: - with_tqdm (`bool`, *optional*, defaults to `False`): - If True, iterating over the file object will display a progress bar. Only - works if the file-like object is a path to a file. Pure bytes and buffers - are not supported. - - Example: - - ```python - >>> operation = CommitOperationAdd( - ... path_in_repo="remote/dir/weights.h5", - ... path_or_fileobj="./local/weights.h5", - ... ) - CommitOperationAdd(path_in_repo='remote/dir/weights.h5', path_or_fileobj='./local/weights.h5') - - >>> with operation.as_file() as file: - ... content = file.read() - - >>> with operation.as_file(with_tqdm=True) as file: - ... while True: - ... data = file.read(1024) - ... if not data: - ... break - config.json: 100%|█████████████████████████| 8.19k/8.19k [00:02<00:00, 3.72kB/s] - - >>> with operation.as_file(with_tqdm=True) as file: - ... requests.put(..., data=file) - config.json: 100%|█████████████████████████| 8.19k/8.19k [00:02<00:00, 3.72kB/s] - ``` - """ - if isinstance(self.path_or_fileobj, str) or isinstance(self.path_or_fileobj, Path): - if with_tqdm: - with tqdm_stream_file(self.path_or_fileobj) as file: - yield file - else: - with open(self.path_or_fileobj, "rb") as file: - yield file - elif isinstance(self.path_or_fileobj, bytes): - yield io.BytesIO(self.path_or_fileobj) - elif isinstance(self.path_or_fileobj, io.BufferedIOBase): - prev_pos = self.path_or_fileobj.tell() - yield self.path_or_fileobj - self.path_or_fileobj.seek(prev_pos, io.SEEK_SET) - - def b64content(self) -> bytes: - """ - The base64-encoded content of `path_or_fileobj` - - Returns: `bytes` - """ - with self.as_file() as file: - return base64.b64encode(file.read()) - - -def _validate_path_in_repo(path_in_repo: str) -> str: - # Validate `path_in_repo` value to prevent a server-side issue - if path_in_repo.startswith("/"): - path_in_repo = path_in_repo[1:] - if path_in_repo == "." or path_in_repo == ".." or path_in_repo.startswith("../"): - raise ValueError(f"Invalid `path_in_repo` in CommitOperation: '{path_in_repo}'") - if path_in_repo.startswith("./"): - path_in_repo = path_in_repo[2:] - if any(part == ".git" for part in path_in_repo.split("/")): - raise ValueError( - "Invalid `path_in_repo` in CommitOperation: cannot update files under a '.git/' folder (path:" - f" '{path_in_repo}')." - ) - return path_in_repo - - -CommitOperation = Union[CommitOperationAdd, CommitOperationCopy, CommitOperationDelete] - - -def warn_on_overwriting_operations(operations: List[CommitOperation]) -> None: - """ - Warn user when a list of operations is expected to overwrite itself in a single - commit. - - Rules: - - If a filepath is updated by multiple `CommitOperationAdd` operations, a warning - message is triggered. - - If a filepath is updated at least once by a `CommitOperationAdd` and then deleted - by a `CommitOperationDelete`, a warning is triggered. - - If a `CommitOperationDelete` deletes a filepath that is then updated by a - `CommitOperationAdd`, no warning is triggered. This is usually useless (no need to - delete before upload) but can happen if a user deletes an entire folder and then - add new files to it. - """ - nb_additions_per_path: Dict[str, int] = defaultdict(int) - for operation in operations: - path_in_repo = operation.path_in_repo - if isinstance(operation, CommitOperationAdd): - if nb_additions_per_path[path_in_repo] > 0: - warnings.warn( - "About to update multiple times the same file in the same commit:" - f" '{path_in_repo}'. This can cause undesired inconsistencies in" - " your repo." - ) - nb_additions_per_path[path_in_repo] += 1 - for parent in PurePosixPath(path_in_repo).parents: - # Also keep track of number of updated files per folder - # => warns if deleting a folder overwrite some contained files - nb_additions_per_path[str(parent)] += 1 - if isinstance(operation, CommitOperationDelete): - if nb_additions_per_path[str(PurePosixPath(path_in_repo))] > 0: - if operation.is_folder: - warnings.warn( - "About to delete a folder containing files that have just been" - f" updated within the same commit: '{path_in_repo}'. This can" - " cause undesired inconsistencies in your repo." - ) - else: - warnings.warn( - "About to delete a file that have just been updated within the" - f" same commit: '{path_in_repo}'. This can cause undesired" - " inconsistencies in your repo." - ) - - -@validate_hf_hub_args -def upload_lfs_files( - *, - additions: List[CommitOperationAdd], - repo_type: str, - repo_id: str, - token: Optional[str], - endpoint: Optional[str] = None, - num_threads: int = 5, -): - """ - Uploads the content of `additions` to the Hub using the large file storage protocol. - - Relevant external documentation: - - LFS Batch API: https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md - - Args: - additions (`List` of `CommitOperationAdd`): - The files to be uploaded - repo_type (`str`): - Type of the repo to upload to: `"model"`, `"dataset"` or `"space"`. - repo_id (`str`): - A namespace (user or an organization) and a repo name separated - by a `/`. - token (`str`, *optional*): - An authentication token ( See https://huggingface.co/settings/tokens ) - num_threads (`int`, *optional*): - The number of concurrent threads to use when uploading. Defaults to 5. - - - Raises: `RuntimeError` if an upload failed for any reason - - Raises: `ValueError` if the server returns malformed responses - - Raises: `requests.HTTPError` if the LFS batch endpoint returned an HTTP - error - - """ - # Step 1: retrieve upload instructions from the LFS batch endpoint. - # Upload instructions are retrieved by chunk of 256 files to avoid reaching - # the payload limit. - batch_actions: List[Dict] = [] - for chunk in chunk_iterable(additions, chunk_size=256): - batch_actions_chunk, batch_errors_chunk = post_lfs_batch_info( - upload_infos=[op.upload_info for op in chunk], - token=token, - repo_id=repo_id, - repo_type=repo_type, - endpoint=endpoint, - ) - - # If at least 1 error, we do not retrieve information for other chunks - if batch_errors_chunk: - message = "\n".join( - [ - f'Encountered error for file with OID {err.get("oid")}: `{err.get("error", {}).get("message")}' - for err in batch_errors_chunk - ] - ) - raise ValueError(f"LFS batch endpoint returned errors:\n{message}") - - batch_actions += batch_actions_chunk - oid2addop = {add_op.upload_info.sha256.hex(): add_op for add_op in additions} - - # Step 2: ignore files that have already been uploaded - filtered_actions = [] - for action in batch_actions: - if action.get("actions") is None: - logger.debug( - f"Content of file {oid2addop[action['oid']].path_in_repo} is already" - " present upstream - skipping upload." - ) - else: - filtered_actions.append(action) - - if len(filtered_actions) == 0: - logger.debug("No LFS files to upload.") - return - - # Step 3: upload files concurrently according to these instructions - def _wrapped_lfs_upload(batch_action) -> None: - try: - operation = oid2addop[batch_action["oid"]] - lfs_upload(operation=operation, lfs_batch_action=batch_action, token=token) - except Exception as exc: - raise RuntimeError(f"Error while uploading '{operation.path_in_repo}' to the Hub.") from exc - - if HF_HUB_ENABLE_HF_TRANSFER: - logger.debug(f"Uploading {len(filtered_actions)} LFS files to the Hub using `hf_transfer`.") - for action in hf_tqdm(filtered_actions): - _wrapped_lfs_upload(action) - elif len(filtered_actions) == 1: - logger.debug("Uploading 1 LFS file to the Hub") - _wrapped_lfs_upload(filtered_actions[0]) - else: - logger.debug( - f"Uploading {len(filtered_actions)} LFS files to the Hub using up to {num_threads} threads concurrently" - ) - thread_map( - _wrapped_lfs_upload, - filtered_actions, - desc=f"Upload {len(filtered_actions)} LFS files", - max_workers=num_threads, - tqdm_class=hf_tqdm, - ) - - -def _validate_preupload_info(preupload_info: dict): - files = preupload_info.get("files") - if not isinstance(files, list): - raise ValueError("preupload_info is improperly formatted") - for file_info in files: - if not ( - isinstance(file_info, dict) - and isinstance(file_info.get("path"), str) - and isinstance(file_info.get("uploadMode"), str) - and (file_info["uploadMode"] in ("lfs", "regular")) - ): - raise ValueError("preupload_info is improperly formatted:") - return preupload_info - - -@validate_hf_hub_args -def fetch_upload_modes( - additions: Iterable[CommitOperationAdd], - repo_type: str, - repo_id: str, - token: Optional[str], - revision: str, - endpoint: Optional[str] = None, - create_pr: bool = False, -) -> Dict[str, UploadMode]: - """ - Requests the Hub "preupload" endpoint to determine whether each input file - should be uploaded as a regular git blob or as git LFS blob. - - Args: - additions (`Iterable` of :class:`CommitOperationAdd`): - Iterable of :class:`CommitOperationAdd` describing the files to - upload to the Hub. - repo_type (`str`): - Type of the repo to upload to: `"model"`, `"dataset"` or `"space"`. - repo_id (`str`): - A namespace (user or an organization) and a repo name separated - by a `/`. - token (`str`, *optional*): - An authentication token ( See https://huggingface.co/settings/tokens ) - revision (`str`): - The git revision to upload the files to. Can be any valid git revision. - - Returns: `Dict[str, UploadMode]` - Key is the file path, value is the upload mode ("regular" or "lfs"). - - Raises: - [`~utils.HfHubHTTPError`] - If the Hub API returned an error. - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) - If the Hub API response is improperly formatted. - """ - endpoint = endpoint if endpoint is not None else ENDPOINT - headers = build_hf_headers(token=token) - - # Fetch upload mode (LFS or regular) chunk by chunk. - upload_modes: Dict[str, UploadMode] = {} - for chunk in chunk_iterable(additions, 256): - payload = { - "files": [ - { - "path": op.path_in_repo, - "sample": base64.b64encode(op.upload_info.sample).decode("ascii"), - "size": op.upload_info.size, - "sha": op.upload_info.sha256.hex(), - } - for op in chunk - ] - } - - resp = get_session().post( - f"{endpoint}/api/{repo_type}s/{repo_id}/preupload/{revision}", - json=payload, - headers=headers, - params={"create_pr": "1"} if create_pr else None, - ) - hf_raise_for_status(resp) - preupload_info = _validate_preupload_info(resp.json()) - upload_modes.update(**{file["path"]: file["uploadMode"] for file in preupload_info["files"]}) - - # Empty files cannot be uploaded as LFS (S3 would fail with a 501 Not Implemented) - # => empty files are uploaded as "regular" to still allow users to commit them. - for addition in additions: - if addition.upload_info.size == 0: - path = addition.path_in_repo - upload_modes[path] = "regular" - - return upload_modes - - -@validate_hf_hub_args -def fetch_lfs_files_to_copy( - copies: Iterable[CommitOperationCopy], - repo_type: str, - repo_id: str, - token: Optional[str], - revision: str, - endpoint: Optional[str] = None, -) -> Dict[Tuple[str, Optional[str]], "RepoFile"]: - """ - Requests the Hub files information of the LFS files to be copied, including their sha256. - - Args: - copies (`Iterable` of :class:`CommitOperationCopy`): - Iterable of :class:`CommitOperationCopy` describing the files to - copy on the Hub. - repo_type (`str`): - Type of the repo to upload to: `"model"`, `"dataset"` or `"space"`. - repo_id (`str`): - A namespace (user or an organization) and a repo name separated - by a `/`. - token (`str`, *optional*): - An authentication token ( See https://huggingface.co/settings/tokens ) - revision (`str`): - The git revision to upload the files to. Can be any valid git revision. - - Returns: `Dict[Tuple[str, Optional[str]], RepoFile]]` - Key is the file path and revision of the file to copy, value is the repo file. - - Raises: - [`~utils.HfHubHTTPError`] - If the Hub API returned an error. - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) - If the Hub API response is improperly formatted. - """ - from .hf_api import HfApi - - hf_api = HfApi(endpoint=endpoint, token=token) - files_to_copy = {} - for src_revision, operations in groupby(copies, key=lambda op: op.src_revision): - operations = list(operations) # type: ignore - paths = [op.src_path_in_repo for op in operations] - for offset in range(0, len(paths), FETCH_LFS_BATCH_SIZE): - src_repo_files = hf_api.list_files_info( - repo_id=repo_id, - paths=paths[offset : offset + FETCH_LFS_BATCH_SIZE], - revision=src_revision or revision, - repo_type=repo_type, - ) - for src_repo_file in src_repo_files: - if not src_repo_file.lfs: - raise NotImplementedError("Copying a non-LFS file is not implemented") - files_to_copy[(src_repo_file.rfilename, src_revision)] = src_repo_file - for operation in operations: - if (operation.src_path_in_repo, src_revision) not in files_to_copy: - raise EntryNotFoundError( - f"Cannot copy {operation.src_path_in_repo} at revision " - f"{src_revision or revision}: file is missing on repo." - ) - return files_to_copy - - -def prepare_commit_payload( - operations: Iterable[CommitOperation], - upload_modes: Dict[str, UploadMode], - files_to_copy: Dict[Tuple[str, Optional[str]], "RepoFile"], - commit_message: str, - commit_description: Optional[str] = None, - parent_commit: Optional[str] = None, -) -> Iterable[Dict[str, Any]]: - """ - Builds the payload to POST to the `/commit` API of the Hub. - - Payload is returned as an iterator so that it can be streamed as a ndjson in the - POST request. - - For more information, see: - - https://github.com/huggingface/huggingface_hub/issues/1085#issuecomment-1265208073 - - http://ndjson.org/ - """ - commit_description = commit_description if commit_description is not None else "" - - # 1. Send a header item with the commit metadata - header_value = {"summary": commit_message, "description": commit_description} - if parent_commit is not None: - header_value["parentCommit"] = parent_commit - yield {"key": "header", "value": header_value} - - # 2. Send operations, one per line - for operation in operations: - # 2.a. Case adding a regular file - if isinstance(operation, CommitOperationAdd) and upload_modes.get(operation.path_in_repo) == "regular": - yield { - "key": "file", - "value": { - "content": operation.b64content().decode(), - "path": operation.path_in_repo, - "encoding": "base64", - }, - } - # 2.b. Case adding an LFS file - elif isinstance(operation, CommitOperationAdd) and upload_modes.get(operation.path_in_repo) == "lfs": - yield { - "key": "lfsFile", - "value": { - "path": operation.path_in_repo, - "algo": "sha256", - "oid": operation.upload_info.sha256.hex(), - "size": operation.upload_info.size, - }, - } - # 2.c. Case deleting a file or folder - elif isinstance(operation, CommitOperationDelete): - yield { - "key": "deletedFolder" if operation.is_folder else "deletedFile", - "value": {"path": operation.path_in_repo}, - } - # 2.d. Case copying a file or folder - elif isinstance(operation, CommitOperationCopy): - file_to_copy = files_to_copy[(operation.src_path_in_repo, operation.src_revision)] - if not file_to_copy.lfs: - raise NotImplementedError("Copying a non-LFS file is not implemented") - yield { - "key": "lfsFile", - "value": { - "path": operation.path_in_repo, - "algo": "sha256", - "oid": file_to_copy.lfs["sha256"], - }, - } - # 2.e. Never expected to happen - else: - raise ValueError( - f"Unknown operation to commit. Operation: {operation}. Upload mode:" - f" {upload_modes.get(operation.path_in_repo)}" - ) diff --git a/spaces/Daniil-plotnikov/Daniil-plotnikov-russian-vision-v4/app.py b/spaces/Daniil-plotnikov/Daniil-plotnikov-russian-vision-v4/app.py deleted file mode 100644 index 5c1e7c8a9ed4166dc8c303f6d77ec5ecfa6354f1..0000000000000000000000000000000000000000 --- a/spaces/Daniil-plotnikov/Daniil-plotnikov-russian-vision-v4/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/Daniil-plotnikov/russian-vision-v4").launch() \ No newline at end of file diff --git a/spaces/Datasculptor/LoRA-DreamBooth-Training-UI/trainer.py b/spaces/Datasculptor/LoRA-DreamBooth-Training-UI/trainer.py deleted file mode 100644 index e4e4469796a08b797ae70a641c2f5125dbd22c1e..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/LoRA-DreamBooth-Training-UI/trainer.py +++ /dev/null @@ -1,166 +0,0 @@ -from __future__ import annotations - -import datetime -import os -import pathlib -import shlex -import shutil -import subprocess - -import gradio as gr -import PIL.Image -import slugify -import torch -from huggingface_hub import HfApi - -from app_upload import LoRAModelUploader -from utils import save_model_card - -URL_TO_JOIN_LORA_LIBRARY_ORG = 'https://huggingface.co/organizations/lora-library/share/hjetHAcKjnPHXhHfbeEcqnBqmhgilFfpOL' - - -def pad_image(image: PIL.Image.Image) -> PIL.Image.Image: - w, h = image.size - if w == h: - return image - elif w > h: - new_image = PIL.Image.new(image.mode, (w, w), (0, 0, 0)) - new_image.paste(image, (0, (w - h) // 2)) - return new_image - else: - new_image = PIL.Image.new(image.mode, (h, h), (0, 0, 0)) - new_image.paste(image, ((h - w) // 2, 0)) - return new_image - - -class Trainer: - def __init__(self, hf_token: str | None = None): - self.hf_token = hf_token - self.api = HfApi(token=hf_token) - self.model_uploader = LoRAModelUploader(hf_token) - - def prepare_dataset(self, instance_images: list, resolution: int, - instance_data_dir: pathlib.Path) -> None: - shutil.rmtree(instance_data_dir, ignore_errors=True) - instance_data_dir.mkdir(parents=True) - for i, temp_path in enumerate(instance_images): - image = PIL.Image.open(temp_path.name) - image = pad_image(image) - image = image.resize((resolution, resolution)) - image = image.convert('RGB') - out_path = instance_data_dir / f'{i:03d}.jpg' - image.save(out_path, format='JPEG', quality=100) - - def join_lora_library_org(self) -> None: - subprocess.run( - shlex.split( - f'curl -X POST -H "Authorization: Bearer {self.hf_token}" -H "Content-Type: application/json" {URL_TO_JOIN_LORA_LIBRARY_ORG}' - )) - - def run( - self, - instance_images: list | None, - instance_prompt: str, - output_model_name: str, - overwrite_existing_model: bool, - validation_prompt: str, - base_model: str, - resolution_s: str, - n_steps: int, - learning_rate: float, - gradient_accumulation: int, - seed: int, - fp16: bool, - use_8bit_adam: bool, - checkpointing_steps: int, - use_wandb: bool, - validation_epochs: int, - upload_to_hub: bool, - use_private_repo: bool, - delete_existing_repo: bool, - upload_to: str, - remove_gpu_after_training: bool, - ) -> str: - if not torch.cuda.is_available(): - raise gr.Error('CUDA is not available.') - if instance_images is None: - raise gr.Error('You need to upload images.') - if not instance_prompt: - raise gr.Error('The instance prompt is missing.') - if not validation_prompt: - raise gr.Error('The validation prompt is missing.') - - resolution = int(resolution_s) - - if not output_model_name: - timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') - output_model_name = f'lora-dreambooth-{timestamp}' - output_model_name = slugify.slugify(output_model_name) - - repo_dir = pathlib.Path(__file__).parent - output_dir = repo_dir / 'experiments' / output_model_name - if overwrite_existing_model or upload_to_hub: - shutil.rmtree(output_dir, ignore_errors=True) - output_dir.mkdir(parents=True) - - instance_data_dir = repo_dir / 'training_data' / output_model_name - self.prepare_dataset(instance_images, resolution, instance_data_dir) - - if upload_to_hub: - self.join_lora_library_org() - - command = f''' - accelerate launch train_dreambooth_lora.py \ - --pretrained_model_name_or_path={base_model} \ - --instance_data_dir={instance_data_dir} \ - --output_dir={output_dir} \ - --instance_prompt="{instance_prompt}" \ - --resolution={resolution} \ - --train_batch_size=1 \ - --gradient_accumulation_steps={gradient_accumulation} \ - --learning_rate={learning_rate} \ - --lr_scheduler=constant \ - --lr_warmup_steps=0 \ - --max_train_steps={n_steps} \ - --checkpointing_steps={checkpointing_steps} \ - --validation_prompt="{validation_prompt}" \ - --validation_epochs={validation_epochs} \ - --seed={seed} - ''' - if fp16: - command += ' --mixed_precision fp16' - if use_8bit_adam: - command += ' --use_8bit_adam' - if use_wandb: - command += ' --report_to wandb' - - with open(output_dir / 'train.sh', 'w') as f: - command_s = ' '.join(command.split()) - f.write(command_s) - subprocess.run(shlex.split(command)) - save_model_card(save_dir=output_dir, - base_model=base_model, - instance_prompt=instance_prompt, - test_prompt=validation_prompt, - test_image_dir='test_images') - - message = 'Training completed!' - print(message) - - if upload_to_hub: - upload_message = self.model_uploader.upload_lora_model( - folder_path=output_dir.as_posix(), - repo_name=output_model_name, - upload_to=upload_to, - private=use_private_repo, - delete_existing_repo=delete_existing_repo) - print(upload_message) - message = message + '\n' + upload_message - - if remove_gpu_after_training: - space_id = os.getenv('SPACE_ID') - if space_id: - self.api.request_space_hardware(repo_id=space_id, - hardware='cpu-basic') - - return message diff --git a/spaces/Datasculptor/StyleGAN-NADA/e4e/models/latent_codes_pool.py b/spaces/Datasculptor/StyleGAN-NADA/e4e/models/latent_codes_pool.py deleted file mode 100644 index 0281d4b5e80f8eb26e824fa35b4f908dcb6634e6..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/StyleGAN-NADA/e4e/models/latent_codes_pool.py +++ /dev/null @@ -1,55 +0,0 @@ -import random -import torch - - -class LatentCodesPool: - """This class implements latent codes buffer that stores previously generated w latent codes. - This buffer enables us to update discriminators using a history of generated w's - rather than the ones produced by the latest encoder. - """ - - def __init__(self, pool_size): - """Initialize the ImagePool class - Parameters: - pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created - """ - self.pool_size = pool_size - if self.pool_size > 0: # create an empty pool - self.num_ws = 0 - self.ws = [] - - def query(self, ws): - """Return w's from the pool. - Parameters: - ws: the latest generated w's from the generator - Returns w's from the buffer. - By 50/100, the buffer will return input w's. - By 50/100, the buffer will return w's previously stored in the buffer, - and insert the current w's to the buffer. - """ - if self.pool_size == 0: # if the buffer size is 0, do nothing - return ws - return_ws = [] - for w in ws: # ws.shape: (batch, 512) or (batch, n_latent, 512) - # w = torch.unsqueeze(image.data, 0) - if w.ndim == 2: - i = random.randint(0, len(w) - 1) # apply a random latent index as a candidate - w = w[i] - self.handle_w(w, return_ws) - return_ws = torch.stack(return_ws, 0) # collect all the images and return - return return_ws - - def handle_w(self, w, return_ws): - if self.num_ws < self.pool_size: # if the buffer is not full; keep inserting current codes to the buffer - self.num_ws = self.num_ws + 1 - self.ws.append(w) - return_ws.append(w) - else: - p = random.uniform(0, 1) - if p > 0.5: # by 50% chance, the buffer will return a previously stored latent code, and insert the current code into the buffer - random_id = random.randint(0, self.pool_size - 1) # randint is inclusive - tmp = self.ws[random_id].clone() - self.ws[random_id] = w - return_ws.append(tmp) - else: # by another 50% chance, the buffer will return the current image - return_ws.append(w) diff --git a/spaces/Datasculptor/StyleGAN-NADA/e4e/models/stylegan2/op/fused_bias_act.cpp b/spaces/Datasculptor/StyleGAN-NADA/e4e/models/stylegan2/op/fused_bias_act.cpp deleted file mode 100644 index 02be898f970bcc8ea297867fcaa4e71b24b3d949..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/StyleGAN-NADA/e4e/models/stylegan2/op/fused_bias_act.cpp +++ /dev/null @@ -1,21 +0,0 @@ -#include - - -torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, - int act, int grad, float alpha, float scale); - -#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) - -torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, - int act, int grad, float alpha, float scale) { - CHECK_CUDA(input); - CHECK_CUDA(bias); - - return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)"); -} \ No newline at end of file diff --git a/spaces/EcoCy/LoRA-DreamBooth-Training-UI/README.md b/spaces/EcoCy/LoRA-DreamBooth-Training-UI/README.md deleted file mode 100644 index 7f46fe50fbc5bdafcff9d3ad1e510337918dd53f..0000000000000000000000000000000000000000 --- a/spaces/EcoCy/LoRA-DreamBooth-Training-UI/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: LoRA DreamBooth Training UI -emoji: ⚡ -colorFrom: red -colorTo: purple -sdk: gradio -sdk_version: 3.16.2 -python_version: 3.10.9 -app_file: app.py -pinned: false -license: mit -duplicated_from: SuSung-boy/LoRA-DreamBooth-Training-UI ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/EleutherAI/magma/magma/train_loop.py b/spaces/EleutherAI/magma/magma/train_loop.py deleted file mode 100644 index ea2c7c6d37dee159b3eb225408b1eb7a77a7758b..0000000000000000000000000000000000000000 --- a/spaces/EleutherAI/magma/magma/train_loop.py +++ /dev/null @@ -1,98 +0,0 @@ -import torch -from tqdm import tqdm -from .utils import reduce_losses, to_cuda_half -from torchvision.utils import make_grid - - -def train_step(config, train_loader, model_engine): - losses = [] - - for _ in range(config.gradient_accumulation_steps): - images, captions = next(train_loader) - images, captions = images.half().cuda(), captions.cuda() - if config.run_blind: - images = torch.zeros_like(images) - outputs = model_engine(images, captions) - loss = outputs.loss - losses.append(loss) - model_engine.backward(loss) - model_engine.step() - - return reduce_losses(torch.mean(torch.stack(losses))).item() - - -def train_step_classification(config, train_loader, model_engine, return_accuracy=True): - losses = [] - if return_accuracy: - accuracies = [] - for _ in range(config.gradient_accumulation_steps): - images, captions, class_labels = next(train_loader) - images, captions, class_labels = to_cuda_half(images, captions, class_labels) - if config.run_blind: - images = torch.zeros_like(images) - loss, logits = model_engine(images, captions, class_labels) - losses.append(loss) - if return_accuracy: - argmax_pred = logits.argmax(dim=-1) - accuracies.append((argmax_pred == class_labels).float().mean()) - model_engine.backward(loss) - model_engine.step() - - loss_reduced = reduce_losses(torch.mean(torch.stack(losses))).item() - if return_accuracy: - accuracy_reduced = reduce_losses(torch.mean(torch.stack(accuracies))).item() - return loss_reduced, accuracy_reduced - return loss_reduced - - -def eval_step(config, eval_loader, model_engine): - losses = [] - - for i in tqdm(range(config.eval_steps), "evaluating..."): - images, captions = next(eval_loader) - images, captions = images.half().cuda(), captions.cuda() - if config.run_blind: - images = torch.zeros_like(images) - outputs = model_engine(images, captions) - loss = outputs.loss - losses.append(loss) - - return reduce_losses(torch.mean(torch.stack(losses))).item() - - -def eval_step_classification(config, train_loader, model_engine, return_accuracy=True): - losses = [] - if return_accuracy: - accuracies = [] - for _ in range(config.gradient_accumulation_steps): - images, captions, class_labels = next(train_loader) - images, captions, class_labels = to_cuda_half(images, captions, class_labels) - if config.run_blind: - images = torch.zeros_like(images) - loss, logits = model_engine(images, captions, class_labels) - losses.append(loss) - if return_accuracy: - argmax_pred = logits.argmax(dim=-1) - accuracies.append((argmax_pred == class_labels).float().mean()) - - loss_reduced = reduce_losses(torch.mean(torch.stack(losses))).item() - if return_accuracy: - accuracy_reduced = reduce_losses(torch.mean(torch.stack(accuracies))).item() - return loss_reduced, accuracy_reduced - return loss_reduced - - -def inference_step(config, eval_loader, model_engine): - images, _ = next(eval_loader) - images = images.half().cuda() - if config.run_blind: - images = torch.zeros_like(images) - captions = model_engine( - images, captions=None, inference=True - ) # [caption1, caption2, ... b] - width = min(2, images.shape[0]) - image_grid = make_grid(images[:width]) - caption = "" - for i in range(width): - caption += f"Caption {i}: \n{captions[i]}\n" - return image_grid, caption diff --git a/spaces/EmbeddedAndrew/examin8/README.md b/spaces/EmbeddedAndrew/examin8/README.md deleted file mode 100644 index d1b1022a0206b35ac775c18e8612b03f6fbd7e74..0000000000000000000000000000000000000000 --- a/spaces/EmbeddedAndrew/examin8/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Examinate -emoji: 🌖 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Enterprisium/Easy_GUI/utils.py b/spaces/Enterprisium/Easy_GUI/utils.py deleted file mode 100644 index 62be8d03a8e8b839f8747310ef0ec0e82fb8ff0a..0000000000000000000000000000000000000000 --- a/spaces/Enterprisium/Easy_GUI/utils.py +++ /dev/null @@ -1,151 +0,0 @@ -import ffmpeg -import numpy as np - -# import praatio -# import praatio.praat_scripts -import os -import sys - -import random - -import csv - -platform_stft_mapping = { - "linux": "stftpitchshift", - "darwin": "stftpitchshift", - "win32": "stftpitchshift.exe", -} - -stft = platform_stft_mapping.get(sys.platform) -# praatEXE = join('.',os.path.abspath(os.getcwd()) + r"\Praat.exe") - - -def CSVutil(file, rw, type, *args): - if type == "formanting": - if rw == "r": - with open(file) as fileCSVread: - csv_reader = list(csv.reader(fileCSVread)) - return ( - (csv_reader[0][0], csv_reader[0][1], csv_reader[0][2]) - if csv_reader is not None - else (lambda: exec('raise ValueError("No data")'))() - ) - else: - if args: - doformnt = args[0] - else: - doformnt = False - qfr = args[1] if len(args) > 1 else 1.0 - tmb = args[2] if len(args) > 2 else 1.0 - with open(file, rw, newline="") as fileCSVwrite: - csv_writer = csv.writer(fileCSVwrite, delimiter=",") - csv_writer.writerow([doformnt, qfr, tmb]) - elif type == "stop": - stop = args[0] if args else False - with open(file, rw, newline="") as fileCSVwrite: - csv_writer = csv.writer(fileCSVwrite, delimiter=",") - csv_writer.writerow([stop]) - - -def load_audio(file, sr, DoFormant, Quefrency, Timbre): - converted = False - DoFormant, Quefrency, Timbre = CSVutil("csvdb/formanting.csv", "r", "formanting") - try: - # https://github.com/openai/whisper/blob/main/whisper/audio.py#L26 - # This launches a subprocess to decode audio while down-mixing and resampling as necessary. - # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed. - file = ( - file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - ) # 防止小白拷路径头尾带了空格和"和回车 - file_formanted = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - - # print(f"dofor={bool(DoFormant)} timbr={Timbre} quef={Quefrency}\n") - - if ( - lambda DoFormant: True - if DoFormant.lower() == "true" - else (False if DoFormant.lower() == "false" else DoFormant) - )(DoFormant): - numerator = round(random.uniform(1, 4), 4) - # os.system(f"stftpitchshift -i {file} -q {Quefrency} -t {Timbre} -o {file_formanted}") - # print('stftpitchshift -i "%s" -p 1.0 --rms -w 128 -v 8 -q %s -t %s -o "%s"' % (file, Quefrency, Timbre, file_formanted)) - - if not file.endswith(".wav"): - if not os.path.isfile(f"{file_formanted}.wav"): - converted = True - # print(f"\nfile = {file}\n") - # print(f"\nfile_formanted = {file_formanted}\n") - converting = ( - ffmpeg.input(file_formanted, threads=0) - .output(f"{file_formanted}.wav") - .run( - cmd=["ffmpeg", "-nostdin"], - capture_stdout=True, - capture_stderr=True, - ) - ) - else: - pass - - file_formanted = ( - f"{file_formanted}.wav" - if not file_formanted.endswith(".wav") - else file_formanted - ) - - print(f" · Formanting {file_formanted}...\n") - - os.system( - '%s -i "%s" -q "%s" -t "%s" -o "%sFORMANTED_%s.wav"' - % ( - stft, - file_formanted, - Quefrency, - Timbre, - file_formanted, - str(numerator), - ) - ) - - print(f" · Formanted {file_formanted}!\n") - - # filepraat = (os.path.abspath(os.getcwd()) + '\\' + file).replace('/','\\') - # file_formantedpraat = ('"' + os.path.abspath(os.getcwd()) + '/' + 'formanted'.join(file_formanted) + '"').replace('/','\\') - # print("%sFORMANTED_%s.wav" % (file_formanted, str(numerator))) - - out, _ = ( - ffmpeg.input( - "%sFORMANTED_%s.wav" % (file_formanted, str(numerator)), threads=0 - ) - .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr) - .run( - cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True - ) - ) - - try: - os.remove("%sFORMANTED_%s.wav" % (file_formanted, str(numerator))) - except Exception: - pass - print("couldn't remove formanted type of file") - - else: - out, _ = ( - ffmpeg.input(file, threads=0) - .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr) - .run( - cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True - ) - ) - except Exception as e: - raise RuntimeError(f"Failed to load audio: {e}") - - if converted: - try: - os.remove(file_formanted) - except Exception: - pass - print("couldn't remove converted type of file") - converted = False - - return np.frombuffer(out, np.float32).flatten() diff --git a/spaces/EronSamez/RVC_HFmeu/infer/modules/train/extract/extract_f0_rmvpe_dml.py b/spaces/EronSamez/RVC_HFmeu/infer/modules/train/extract/extract_f0_rmvpe_dml.py deleted file mode 100644 index 6abb1898550664ca600cebbb6d37ba0de8a3d312..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/infer/modules/train/extract/extract_f0_rmvpe_dml.py +++ /dev/null @@ -1,139 +0,0 @@ -import os -import sys -import traceback - -import parselmouth - -now_dir = os.getcwd() -sys.path.append(now_dir) -import logging - -import numpy as np -import pyworld - -from infer.lib.audio import load_audio - -logging.getLogger("numba").setLevel(logging.WARNING) - -exp_dir = sys.argv[1] -import torch_directml - -device = torch_directml.device(torch_directml.default_device()) -f = open("%s/extract_f0_feature.log" % exp_dir, "a+") - - -def printt(strr): - print(strr) - f.write("%s\n" % strr) - f.flush() - - -class FeatureInput(object): - def __init__(self, samplerate=16000, hop_size=160): - self.fs = samplerate - self.hop = hop_size - - self.f0_bin = 256 - self.f0_max = 1100.0 - self.f0_min = 50.0 - self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) - self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) - - def compute_f0(self, path, f0_method): - x = load_audio(path, self.fs) - # p_len = x.shape[0] // self.hop - if f0_method == "rmvpe": - if hasattr(self, "model_rmvpe") == False: - from infer.lib.rmvpe import RMVPE - - print("Loading rmvpe model") - self.model_rmvpe = RMVPE( - "assets/rmvpe/rmvpe.pt", is_half=False, device=device - ) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - return f0 - - def coarse_f0(self, f0): - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * ( - self.f0_bin - 2 - ) / (self.f0_mel_max - self.f0_mel_min) + 1 - - # use 0 or 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > self.f0_bin - 1] = self.f0_bin - 1 - f0_coarse = np.rint(f0_mel).astype(int) - assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, ( - f0_coarse.max(), - f0_coarse.min(), - ) - return f0_coarse - - def go(self, paths, f0_method): - if len(paths) == 0: - printt("no-f0-todo") - else: - printt("todo-f0-%s" % len(paths)) - n = max(len(paths) // 5, 1) # 每个进程最多打印5条 - for idx, (inp_path, opt_path1, opt_path2) in enumerate(paths): - try: - if idx % n == 0: - printt("f0ing,now-%s,all-%s,-%s" % (idx, len(paths), inp_path)) - if ( - os.path.exists(opt_path1 + ".npy") == True - and os.path.exists(opt_path2 + ".npy") == True - ): - continue - featur_pit = self.compute_f0(inp_path, f0_method) - np.save( - opt_path2, - featur_pit, - allow_pickle=False, - ) # nsf - coarse_pit = self.coarse_f0(featur_pit) - np.save( - opt_path1, - coarse_pit, - allow_pickle=False, - ) # ori - except: - printt("f0fail-%s-%s-%s" % (idx, inp_path, traceback.format_exc())) - - -if __name__ == "__main__": - # exp_dir=r"E:\codes\py39\dataset\mi-test" - # n_p=16 - # f = open("%s/log_extract_f0.log"%exp_dir, "w") - printt(sys.argv) - featureInput = FeatureInput() - paths = [] - inp_root = "%s/1_16k_wavs" % (exp_dir) - opt_root1 = "%s/2a_f0" % (exp_dir) - opt_root2 = "%s/2b-f0nsf" % (exp_dir) - - os.makedirs(opt_root1, exist_ok=True) - os.makedirs(opt_root2, exist_ok=True) - for name in sorted(list(os.listdir(inp_root))): - inp_path = "%s/%s" % (inp_root, name) - if "spec" in inp_path: - continue - opt_path1 = "%s/%s" % (opt_root1, name) - opt_path2 = "%s/%s" % (opt_root2, name) - paths.append([inp_path, opt_path1, opt_path2]) - try: - featureInput.go(paths, "rmvpe") - except: - printt("f0_all_fail-%s" % (traceback.format_exc())) - # ps = [] - # for i in range(n_p): - # p = Process( - # target=featureInput.go, - # args=( - # paths[i::n_p], - # f0method, - # ), - # ) - # ps.append(p) - # p.start() - # for i in range(n_p): - # ps[i].join() diff --git a/spaces/EsoCode/text-generation-webui/extensions/api/streaming_api.py b/spaces/EsoCode/text-generation-webui/extensions/api/streaming_api.py deleted file mode 100644 index e54406639500b71a31b2cf4fa7903d4c55d7168e..0000000000000000000000000000000000000000 --- a/spaces/EsoCode/text-generation-webui/extensions/api/streaming_api.py +++ /dev/null @@ -1,112 +0,0 @@ -import asyncio -import json -from threading import Thread - -from websockets.server import serve - -from extensions.api.util import build_parameters, try_start_cloudflared -from modules import shared -from modules.chat import generate_chat_reply -from modules.text_generation import generate_reply - -PATH = '/api/v1/stream' - - -async def _handle_connection(websocket, path): - - if path == '/api/v1/stream': - async for message in websocket: - message = json.loads(message) - - prompt = message['prompt'] - generate_params = build_parameters(message) - stopping_strings = generate_params.pop('stopping_strings') - generate_params['stream'] = True - - generator = generate_reply( - prompt, generate_params, stopping_strings=stopping_strings, is_chat=False) - - # As we stream, only send the new bytes. - skip_index = 0 - message_num = 0 - - for a in generator: - to_send = a[skip_index:] - if to_send is None or chr(0xfffd) in to_send: # partial unicode character, don't send it yet. - continue - - await websocket.send(json.dumps({ - 'event': 'text_stream', - 'message_num': message_num, - 'text': to_send - })) - - await asyncio.sleep(0) - skip_index += len(to_send) - message_num += 1 - - await websocket.send(json.dumps({ - 'event': 'stream_end', - 'message_num': message_num - })) - - elif path == '/api/v1/chat-stream': - async for message in websocket: - body = json.loads(message) - - user_input = body['user_input'] - history = body['history'] - generate_params = build_parameters(body, chat=True) - generate_params['stream'] = True - regenerate = body.get('regenerate', False) - _continue = body.get('_continue', False) - - generator = generate_chat_reply( - user_input, history, generate_params, regenerate=regenerate, _continue=_continue, loading_message=False) - - message_num = 0 - for a in generator: - await websocket.send(json.dumps({ - 'event': 'text_stream', - 'message_num': message_num, - 'history': a - })) - - await asyncio.sleep(0) - message_num += 1 - - await websocket.send(json.dumps({ - 'event': 'stream_end', - 'message_num': message_num - })) - - else: - print(f'Streaming api: unknown path: {path}') - return - - -async def _run(host: str, port: int): - async with serve(_handle_connection, host, port, ping_interval=None): - await asyncio.Future() # run forever - - -def _run_server(port: int, share: bool = False): - address = '0.0.0.0' if shared.args.listen else '127.0.0.1' - - def on_start(public_url: str): - public_url = public_url.replace('https://', 'wss://') - print(f'Starting streaming server at public url {public_url}{PATH}') - - if share: - try: - try_start_cloudflared(port, max_attempts=3, on_start=on_start) - except Exception as e: - print(e) - else: - print(f'Starting streaming server at ws://{address}:{port}{PATH}') - - asyncio.run(_run(host=address, port=port)) - - -def start_server(port: int, share: bool = False): - Thread(target=_run_server, args=[port, share], daemon=True).start() diff --git a/spaces/EyanAn/vits-uma-genshin-honkai/README.md b/spaces/EyanAn/vits-uma-genshin-honkai/README.md deleted file mode 100644 index 1c0aa069bfd980b6b45bb2bf62ff74bd9b0b61c2..0000000000000000000000000000000000000000 --- a/spaces/EyanAn/vits-uma-genshin-honkai/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -license: apache-2.0 -title: ' vits-uma-genshin-honkai' -sdk: gradio -sdk_version: 3.7 -emoji: 🐨 -colorTo: yellow -pinned: false -app_file: app.py -duplicated_from: ikechan8370/vits-uma-genshin-honkai ---- diff --git a/spaces/Flux9665/IMS-Toucan/Layers/LayerNorm.py b/spaces/Flux9665/IMS-Toucan/Layers/LayerNorm.py deleted file mode 100644 index c4cb4c15df0ccc0195bc18e124f4b50fb6bcee80..0000000000000000000000000000000000000000 --- a/spaces/Flux9665/IMS-Toucan/Layers/LayerNorm.py +++ /dev/null @@ -1,36 +0,0 @@ -# Written by Shigeki Karita, 2019 -# Published under Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) -# Adapted by Florian Lux, 2021 - -import torch - - -class LayerNorm(torch.nn.LayerNorm): - """ - Layer normalization module. - - Args: - nout (int): Output dim size. - dim (int): Dimension to be normalized. - """ - - def __init__(self, nout, dim=-1): - """ - Construct an LayerNorm object. - """ - super(LayerNorm, self).__init__(nout, eps=1e-12) - self.dim = dim - - def forward(self, x): - """ - Apply layer normalization. - - Args: - x (torch.Tensor): Input tensor. - - Returns: - torch.Tensor: Normalized tensor. - """ - if self.dim == -1: - return super(LayerNorm, self).forward(x) - return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1) diff --git a/spaces/GIZ/vulnerability_analysis/app.py b/spaces/GIZ/vulnerability_analysis/app.py deleted file mode 100644 index c280f71185b97a2475da08cad8d5b910e86a21b4..0000000000000000000000000000000000000000 --- a/spaces/GIZ/vulnerability_analysis/app.py +++ /dev/null @@ -1,161 +0,0 @@ -import appStore.vulnerability_analysis as vulnerability_analysis -import appStore.doc_processing as processing -from utils.uploadAndExample import add_upload -import streamlit as st -from utils.vulnerability_classifier import label_dict -import pandas as pd -import plotly.express as px - -st.set_page_config(page_title = 'Vulnerability Analysis', - initial_sidebar_state='expanded', layout="wide") - -with st.sidebar: - # upload and example doc - choice = st.sidebar.radio(label = 'Select the Document', - help = 'You can upload the document \ - or else you can try a example document', - options = ('Upload Document', 'Try Example'), - horizontal = True) - add_upload(choice) - -with st.container(): - st.markdown("

      Vulnerability Analysis

      ", unsafe_allow_html=True) - st.write(' ') - -with st.expander("ℹ️ - About this app", expanded=False): - st.write( - """ - The Vulnerability Analysis App is an open-source\ - digital tool which aims to assist policy analysts and \ - other users in extracting and filtering references \ - to different vulnerable groups from public documents. - """) - # st.write('**Definitions**') - - # st.caption(""" - # - **Target**: Targets are an intention to achieve a specific result, \ - # for example, to reduce GHG emissions to a specific level \ - # (a GHG target) or increase energy efficiency or renewable \ - # energy to a specific level (a non-GHG target), typically by \ - # a certain date. - # - **Economy-wide Target**: Certain Target are applicable \ - # not at specific Sector level but are applicable at economic \ - # wide scale. - # - **Netzero**: Identifies if its Netzero Target or not. - # - 'NET-ZERO': target_labels = ['T_Netzero','T_Netzero_C'] - # - 'Non Netzero Target': target_labels_neg = ['T_Economy_C', - # 'T_Economy_Unc','T_Adaptation_C','T_Adaptation_Unc','T_Transport_C', - # 'T_Transport_O_C','T_Transport_O_Unc','T_Transport_Unc'] - # - 'Others': Other Targets beside covered above - # - **GHG Target**: GHG targets refer to contributions framed as targeted \ - # outcomes in GHG terms. - # - 'GHG': target_labels_ghg_yes = ['T_Transport_Unc','T_Transport_C'] - # - 'NON GHG TRANSPORT TARGET': target_labels_ghg_no = ['T_Adaptation_Unc',\ - # 'T_Adaptation_C', 'T_Transport_O_Unc', 'T_Transport_O_C'] - # - 'OTHERS': Other Targets beside covered above. - # - **Conditionality**: An “unconditional contribution” is what countries \ - # could implement without any conditions and based on their own \ - # resources and capabilities. A “conditional contribution” is one \ - # that countries would undertake if international means of support \ - # are provided, or other conditions are met. - # - **Action**: Actions are an intention to implement specific means of \ - # achieving GHG reductions, usually in forms of concrete projects. - # - **Policies and Plans**: Policies are domestic planning documents \ - # such as policies, regulations or guidlines, and Plans are broader \ - # than specific policies or actions, such as a general intention \ - # to ‘improve efficiency’, ‘develop renewable energy’, etc. \ - # The terms come from the World Bank's NDC platform and WRI's publication. - # """) - - #c1, c2, c3 = st.columns([12,1,10]) - #with c1: - # image = Image.open('docStore/img/flow.jpg') - # st.image(image) - #with c3: - st.write(""" - What Happens in background? - - - Step 1: Once the document is provided to app, it undergoes *Pre-processing*.\ - In this step the document is broken into smaller paragraphs \ - (based on word/sentence count). - - Step 2: The paragraphs are then fed to the **Vulnerability Classifier** which detects if - the paragraph contains any references to vulnerable groups. - """) - - st.write("") - -# Define the apps used -apps = [processing.app, vulnerability_analysis.app] - -multiplier_val =1/len(apps) -if st.button("Analyze Document"): - prg = st.progress(0.0) - for i,func in enumerate(apps): - func() - prg.progress((i+1)*multiplier_val) - -# If there is data stored -if 'key0' in st.session_state: - with st.sidebar: - topic = st.radio( - "Which category you want to explore?", - (['Vulnerability'])) - - if topic == 'Vulnerability': - - # Assign dataframe a name - df_vul = st.session_state['key0'] - - col1, col2 = st.columns([1,1]) - - with col1: - # Header - st.subheader("Explore references to vulnerable groups:") - - # Text - num_paragraphs = len(df_vul['Vulnerability Label']) - num_references = len(df_vul[df_vul['Vulnerability Label'] != 'Other']) - - st.markdown(f"""
      The document contains a - total of {num_paragraphs} paragraphs. - We identified {num_references} - references to vulnerable groups.
      -
      - In the pie chart on the right you can see the distribution of the different - groups defined. For a more detailed view in the text, see the paragraphs and - their respective labels in the table below.""", unsafe_allow_html=True) - - with col2: - ### Pie chart - - # Create a df that stores all the labels - df_labels = pd.DataFrame(list(label_dict.items()), columns=['Label ID', 'Label']) - - # Count how often each label appears in the "Vulnerability Labels" column - label_counts = df_vul['Vulnerability Label'].value_counts().reset_index() - label_counts.columns = ['Label', 'Count'] - - # Merge the label counts with the df_label DataFrame - df_labels = df_labels.merge(label_counts, on='Label', how='left') - - # Configure graph - fig = px.pie(df_labels, - names="Label", - values="Count", - title='Label Counts', - hover_name="Count", - color_discrete_sequence=px.colors.qualitative.Plotly - ) - - #Show plot - st.plotly_chart(fig, use_container_width=True) - - ### Table - st.table(df_vul[df_vul['Vulnerability Label'] != 'Other']) - - # vulnerability_analysis.vulnerability_display() - # elif topic == 'Action': - # policyaction.action_display() - # else: - # policyaction.policy_display() - #st.write(st.session_state.key0) \ No newline at end of file diff --git a/spaces/GT4SD/patent_generative_transformers/app.py b/spaces/GT4SD/patent_generative_transformers/app.py deleted file mode 100644 index eac8d73284225a2cc83c2879a20f41fcf2951090..0000000000000000000000000000000000000000 --- a/spaces/GT4SD/patent_generative_transformers/app.py +++ /dev/null @@ -1,115 +0,0 @@ -import logging -import pathlib -import gradio as gr -import pandas as pd -from gt4sd.algorithms.generation.pgt import ( - PGT, - PGTCoherenceChecker, - PGTEditor, - PGTGenerator, -) -from gt4sd.algorithms.registry import ApplicationsRegistry - - -logger = logging.getLogger(__name__) -logger.addHandler(logging.NullHandler()) - -MODEL_FN = { - "PGTGenerator": PGTGenerator, - "PGTEditor": PGTEditor, - "PGTCoherenceChecker": PGTCoherenceChecker, -} - - -def run_inference( - model_type: str, - generator_task: str, - editor_task: str, - checker_task: str, - prompt: str, - second_prompt: str, - length: int, - k: int, - p: float, -): - - kwargs = {"max_length": length, "top_k": k, "top_p": p} - - if model_type == "PGTGenerator": - config = PGTGenerator(task=generator_task, input_text=prompt, **kwargs) - elif model_type == "PGTEditor": - config = PGTEditor(input_type=editor_task, input_text=prompt, **kwargs) - elif model_type == "PGTCoherenceChecker": - config = PGTCoherenceChecker( - coherence_type=checker_task, input_a=prompt, input_b=second_prompt, **kwargs - ) - - model = PGT(config) - text = list(model.sample(1))[0] - - return text - - -if __name__ == "__main__": - - # Preparation (retrieve all available algorithms) - all_algos = ApplicationsRegistry.list_available() - algos = [ - x["algorithm_application"] - for x in list(filter(lambda x: "PGT" in x["algorithm_name"], all_algos)) - ] - - # Load metadata - metadata_root = pathlib.Path(__file__).parent.joinpath("model_cards") - - examples = pd.read_csv( - metadata_root.joinpath("examples.csv"), sep="|", header=None - ).fillna("") - print("Examples: ", examples.values.tolist()) - - with open(metadata_root.joinpath("article.md"), "r") as f: - article = f.read() - with open(metadata_root.joinpath("description.md"), "r") as f: - description = f.read() - - gen_tasks = [ - "title-to-abstract", - "abstract-to-title", - "abstract-to-claim", - "claim-to-abstract", - ] - - demo = gr.Interface( - fn=run_inference, - title="Patent Generative Transformer", - inputs=[ - gr.Dropdown(algos, label="Model type", value="PGTGenerator"), - gr.Dropdown(gen_tasks, label="Generator task", value="title-to-abstract"), - gr.Dropdown(["abstract", "claim"], label="Editor task", value="abstract"), - gr.Dropdown( - ["title-abstract", "title-claim", "abstract-claim"], - label="Checker task", - value="title-abstract", - ), - gr.Textbox( - label="Primary Text prompt", - placeholder="Artificial intelligence and machine learning infrastructure", - lines=5, - ), - gr.Textbox( - label="Secondary text prompt (only coherence checker)", - placeholder="", - lines=1, - ), - gr.Slider( - minimum=5, maximum=1024, value=512, label="Maximal length", step=1 - ), - gr.Slider(minimum=2, maximum=500, value=50, label="Top-k", step=1), - gr.Slider(minimum=0.5, maximum=1.0, value=0.95, label="Top-p"), - ], - outputs=gr.Textbox(label="Output"), - article=article, - description=description, - examples=examples.values.tolist(), - ) - demo.launch(debug=True, show_error=True) diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/align_pair_colored_blocks_along_line.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/align_pair_colored_blocks_along_line.py deleted file mode 100644 index f5b41a6351110bb7bfaad95093f1aa46ed146778..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/align_pair_colored_blocks_along_line.py +++ /dev/null @@ -1,47 +0,0 @@ -import numpy as np -from cliport.tasks.task import Task -from cliport.utils import utils - -class AlignPairColoredBlocksAlongLine(Task): - """Align two pairs of blocks, each pair painted a different color (red and blue), along a marked line on the tabletop.""" - - def __init__(self): - super().__init__() - self.max_steps = 10 - self.lang_template = "Place two pairs of blocks, each pair painted a different color (red and blue), along a marked line on the tabletop." - self.task_completed_desc = "done aligning blocks." - self.additional_reset() - - def reset(self, env): - super().reset(env) - - # Add line. - line_size = (0.3, 0.01, 0.01) - line_pose = self.get_random_pose(env, line_size) - line_template = 'line/line-template.urdf' - replace = {'DIM': line_size} - line_urdf = self.fill_template(line_template, replace) - env.add_object(line_urdf, line_pose, 'fixed') - - # Add blocks. - block_size = (0.04, 0.04, 0.04) - block_template = 'block/block-template.urdf' - colors = [utils.COLORS['red'], utils.COLORS['blue']] - blocks = [] - anchor_base_poses = [(utils.apply(line_pose, (0.04, 0, 0.001)), line_pose[1]), - (utils.apply(line_pose, (0.04 * 2, 0, 0.001)), line_pose[1]), - (utils.apply(line_pose, (-0.04, 0, 0.041)), line_pose[1]), - (utils.apply(line_pose, (-0.04 * 2, 0, 0.041)), line_pose[1])] - - for color in colors: - for _ in range(2): - block_pose = self.get_random_pose(env, block_size) - replace = {'DIM': block_size} - block_urdf = self.fill_template(block_template, replace) - block_id = env.add_object(block_urdf, block_pose, color=color) - blocks.append(block_id) - - # Goal: each pair of similarly colored blocks are touching and both pairs are aligned along the line. - self.add_goal(objs=blocks, matches=np.ones((4, 4)), targ_poses=anchor_base_poses, replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1, - language_goal=self.lang_template) \ No newline at end of file diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/models/clip_film_lingunet_lat.py b/spaces/Gen-Sim/Gen-Sim/cliport/models/clip_film_lingunet_lat.py deleted file mode 100644 index 32a8ee64326d9bc33e0e2890de514dc3af266196..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/models/clip_film_lingunet_lat.py +++ /dev/null @@ -1,116 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F - -import cliport.utils.utils as utils -from cliport.models.resnet import IdentityBlock, ConvBlock -from cliport.models.core.unet import Up -from cliport.models.clip_lingunet_lat import CLIPLingUNetLat - -from cliport.models.core import fusion -from cliport.models.core.fusion import FusionConvLat - - -class CLIPFilmLingUNet(CLIPLingUNetLat): - """ CLIP RN50 with U-Net skip connections """ - - def __init__(self, input_shape, output_dim, cfg, device, preprocess): - super().__init__(input_shape, output_dim, cfg, device, preprocess) - - def _build_decoder(self): - # language - self.lang_fusion_type = 'film' - - self.lang_fuser1 = fusion.names[self.lang_fusion_type](input_dim=self.input_dim // 2) - self.lang_fuser2 = fusion.names[self.lang_fusion_type](input_dim=self.input_dim // 4) - self.lang_fuser3 = fusion.names[self.lang_fusion_type](input_dim=self.input_dim // 8) - - self.proj_input_dim = 1024 - - self.lang_gamma1 = nn.Linear(self.proj_input_dim, 1024) - self.lang_gamma2 = nn.Linear(self.proj_input_dim, 512) - self.lang_gamma3 = nn.Linear(self.proj_input_dim, 256) - - self.lang_beta1 = nn.Linear(self.proj_input_dim, 1024) - self.lang_beta2 = nn.Linear(self.proj_input_dim, 512) - self.lang_beta3 = nn.Linear(self.proj_input_dim, 256) - - # vision - self.conv1 = nn.Sequential( - nn.Conv2d(self.input_dim, 1024, kernel_size=3, stride=1, padding=1, bias=False), - nn.ReLU(True) - ) - self.up1 = Up(2048, 1024 // self.up_factor, self.bilinear) - self.lat_fusion1 = FusionConvLat(input_dim=1024+512, output_dim=512) - - self.up2 = Up(1024, 512 // self.up_factor, self.bilinear) - self.lat_fusion2 = FusionConvLat(input_dim=512+256, output_dim=256) - - self.up3 = Up(512, 256 // self.up_factor, self.bilinear) - self.lat_fusion3 = FusionConvLat(input_dim=256+128, output_dim=128) - - self.layer1 = nn.Sequential( - ConvBlock(128, [64, 64, 64], kernel_size=3, stride=1, batchnorm=self.batchnorm), - IdentityBlock(64, [64, 64, 64], kernel_size=3, stride=1, batchnorm=self.batchnorm), - nn.UpsamplingBilinear2d(scale_factor=2), - ) - self.lat_fusion4 = FusionConvLat(input_dim=128+64, output_dim=64) - - self.layer2 = nn.Sequential( - ConvBlock(64, [32, 32, 32], kernel_size=3, stride=1, batchnorm=self.batchnorm), - IdentityBlock(32, [32, 32, 32], kernel_size=3, stride=1, batchnorm=self.batchnorm), - nn.UpsamplingBilinear2d(scale_factor=2), - ) - self.lat_fusion5 = FusionConvLat(input_dim=64+32, output_dim=32) - - self.layer3 = nn.Sequential( - ConvBlock(32, [16, 16, 16], kernel_size=3, stride=1, batchnorm=self.batchnorm), - IdentityBlock(16, [16, 16, 16], kernel_size=3, stride=1, batchnorm=self.batchnorm), - nn.UpsamplingBilinear2d(scale_factor=2), - ) - self.lat_fusion6 = FusionConvLat(input_dim=32+16, output_dim=16) - - self.conv2 = nn.Sequential( - nn.Conv2d(16, self.output_dim, kernel_size=1) - ) - - def forward(self, x, lat, l): - x = self.preprocess(x, dist='clip') - - in_type = x.dtype - in_shape = x.shape - x = x[:,:3] # select RGB - x, im = self.encode_image(x) - x = x.to(in_type) - - l_enc, l_emb, l_mask = self.encode_text(l) - l_input = l_enc - l_input = l_input.to(dtype=x.dtype) - - assert x.shape[1] == self.input_dim - x = self.conv1(x) - - x = self.lang_fuser1(x, l_input, gamma=self.lang_gamma1, beta=self.lang_beta1) - x = self.up1(x, im[-2]) - x = self.lat_fusion1(x, lat[-6]) - - x = self.lang_fuser2(x, l_input, gamma=self.lang_gamma2, beta=self.lang_beta2) - x = self.up2(x, im[-3]) - x = self.lat_fusion2(x, lat[-5]) - - x = self.lang_fuser3(x, l_input, gamma=self.lang_gamma3, beta=self.lang_beta3) - x = self.up3(x, im[-4]) - x = self.lat_fusion3(x, lat[-4]) - - x = self.layer1(x) - x = self.lat_fusion4(x, lat[-3]) - - x = self.layer2(x) - x = self.lat_fusion5(x, lat[-2]) - - x = self.layer3(x) - x = self.lat_fusion6(x, lat[-1]) - - x = self.conv2(x) - - x = F.interpolate(x, size=(in_shape[-2], in_shape[-1]), mode='bilinear') - return x \ No newline at end of file diff --git a/spaces/Gradio-Blocks/EmojiGAN/torch_utils/ops/fma.py b/spaces/Gradio-Blocks/EmojiGAN/torch_utils/ops/fma.py deleted file mode 100644 index 2eeac58a626c49231e04122b93e321ada954c5d3..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/EmojiGAN/torch_utils/ops/fma.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Fused multiply-add, with slightly faster gradients than `torch.addcmul()`.""" - -import torch - -#---------------------------------------------------------------------------- - -def fma(a, b, c): # => a * b + c - return _FusedMultiplyAdd.apply(a, b, c) - -#---------------------------------------------------------------------------- - -class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c - @staticmethod - def forward(ctx, a, b, c): # pylint: disable=arguments-differ - out = torch.addcmul(c, a, b) - ctx.save_for_backward(a, b) - ctx.c_shape = c.shape - return out - - @staticmethod - def backward(ctx, dout): # pylint: disable=arguments-differ - a, b = ctx.saved_tensors - c_shape = ctx.c_shape - da = None - db = None - dc = None - - if ctx.needs_input_grad[0]: - da = _unbroadcast(dout * b, a.shape) - - if ctx.needs_input_grad[1]: - db = _unbroadcast(dout * a, b.shape) - - if ctx.needs_input_grad[2]: - dc = _unbroadcast(dout, c_shape) - - return da, db, dc - -#---------------------------------------------------------------------------- - -def _unbroadcast(x, shape): - extra_dims = x.ndim - len(shape) - assert extra_dims >= 0 - dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)] - if len(dim): - x = x.sum(dim=dim, keepdim=True) - if extra_dims: - x = x.reshape(-1, *x.shape[extra_dims+1:]) - assert x.shape == shape - return x - -#---------------------------------------------------------------------------- diff --git a/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/models/stylegan2/__init__.py b/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/models/stylegan2/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py deleted file mode 100644 index 72b90f881dc0df51bbf6c562a9f80493ea150ab6..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py +++ /dev/null @@ -1,54 +0,0 @@ -_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py' - -model = dict( - pretrained='open-mmlab://detectron2/resnet50_caffe', - backbone=dict( - dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True)), - bbox_head=dict( - norm_on_bbox=True, - centerness_on_reg=True, - dcn_on_last_conv=True, - center_sampling=True, - conv_bias=True, - loss_bbox=dict(type='GIoULoss', loss_weight=1.0)), - # training and testing settings - test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6))) - -# dataset settings -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -optimizer_config = dict(_delete_=True, grad_clip=None) - -lr_config = dict(warmup='linear') diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py deleted file mode 100644 index 4be68176d2ed6f9b209823187f1367d204fe67d1..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' -model = dict( - pretrained='open-mmlab://jhu/resnet101_gn_ws', backbone=dict(depth=101)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/datasets/pascal_context_59.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/datasets/pascal_context_59.py deleted file mode 100644 index 37585abab89834b95cd5bdd993b994fca1db65f6..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/datasets/pascal_context_59.py +++ /dev/null @@ -1,60 +0,0 @@ -# dataset settings -dataset_type = 'PascalContextDataset59' -data_root = 'data/VOCdevkit/VOC2010/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -img_scale = (520, 520) -crop_size = (480, 480) - -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', reduce_zero_label=True), - dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', prob=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=img_scale, - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=4, - workers_per_gpu=4, - train=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClassContext', - split='ImageSets/SegmentationContext/train.txt', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClassContext', - split='ImageSets/SegmentationContext/val.txt', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClassContext', - split='ImageSets/SegmentationContext/val.txt', - pipeline=test_pipeline)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_769x769_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 654f377b6f6152c9bd98d33824a39a41d7510c3f..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ccnet_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/emanet/emanet_r50-d8_769x769_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/emanet/emanet_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 699aa212c3518901b2f84db3f062c16b023c7538..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/emanet/emanet_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/emanet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True), - test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/nonlocal_net/README.md b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/nonlocal_net/README.md deleted file mode 100644 index da0924ac60f0a16a17fe4705e0edbf5aad962a82..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/nonlocal_net/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# Non-local Neural Networks - -## Introduction - - - -```latex -@inproceedings{wang2018non, - title={Non-local neural networks}, - author={Wang, Xiaolong and Girshick, Ross and Gupta, Abhinav and He, Kaiming}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={7794--7803}, - year={2018} -} -``` - -## Results and models - -### Cityscapes - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| -------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ----------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| NonLocal | R-50-D8 | 512x1024 | 40000 | 7.4 | 2.72 | 78.24 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes/nonlocal_r50-d8_512x1024_40k_cityscapes_20200605_210748-c75e81e3.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes/nonlocal_r50-d8_512x1024_40k_cityscapes_20200605_210748.log.json) | -| NonLocal | R-101-D8 | 512x1024 | 40000 | 10.9 | 1.95 | 78.66 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes/nonlocal_r101-d8_512x1024_40k_cityscapes_20200605_210748-d63729fa.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes/nonlocal_r101-d8_512x1024_40k_cityscapes_20200605_210748.log.json) | -| NonLocal | R-50-D8 | 769x769 | 40000 | 8.9 | 1.52 | 78.33 | 79.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes/nonlocal_r50-d8_769x769_40k_cityscapes_20200530_045243-82ef6749.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes/nonlocal_r50-d8_769x769_40k_cityscapes_20200530_045243.log.json) | -| NonLocal | R-101-D8 | 769x769 | 40000 | 12.8 | 1.05 | 78.57 | 80.29 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes/nonlocal_r101-d8_769x769_40k_cityscapes_20200530_045348-8fe9a9dc.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes/nonlocal_r101-d8_769x769_40k_cityscapes_20200530_045348.log.json) | -| NonLocal | R-50-D8 | 512x1024 | 80000 | - | - | 78.01 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes/nonlocal_r50-d8_512x1024_80k_cityscapes_20200607_193518-d6839fae.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes/nonlocal_r50-d8_512x1024_80k_cityscapes_20200607_193518.log.json) | -| NonLocal | R-101-D8 | 512x1024 | 80000 | - | - | 78.93 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes/nonlocal_r101-d8_512x1024_80k_cityscapes_20200607_183411-32700183.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes/nonlocal_r101-d8_512x1024_80k_cityscapes_20200607_183411.log.json) | -| NonLocal | R-50-D8 | 769x769 | 80000 | - | - | 79.05 | 80.68 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes/nonlocal_r50-d8_769x769_80k_cityscapes_20200607_193506-1f9792f6.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes/nonlocal_r50-d8_769x769_80k_cityscapes_20200607_193506.log.json) | -| NonLocal | R-101-D8 | 769x769 | 80000 | - | - | 79.40 | 80.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes/nonlocal_r101-d8_769x769_80k_cityscapes_20200607_183428-0e1fa4f9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes/nonlocal_r101-d8_769x769_80k_cityscapes_20200607_183428.log.json) | - -### ADE20K - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| -------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| NonLocal | R-50-D8 | 512x512 | 80000 | 9.1 | 21.37 | 40.75 | 42.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k/nonlocal_r50-d8_512x512_80k_ade20k_20200615_015801-5ae0aa33.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k/nonlocal_r50-d8_512x512_80k_ade20k_20200615_015801.log.json) | -| NonLocal | R-101-D8 | 512x512 | 80000 | 12.6 | 13.97 | 42.90 | 44.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k/nonlocal_r101-d8_512x512_80k_ade20k_20200615_015758-24105919.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k/nonlocal_r101-d8_512x512_80k_ade20k_20200615_015758.log.json) | -| NonLocal | R-50-D8 | 512x512 | 160000 | - | - | 42.03 | 43.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k/nonlocal_r50-d8_512x512_160k_ade20k_20200616_005410-baef45e3.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k/nonlocal_r50-d8_512x512_160k_ade20k_20200616_005410.log.json) | -| NonLocal | R-101-D8 | 512x512 | 160000 | - | - | 43.36 | 44.83 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k/nonlocal_r101-d8_512x512_160k_ade20k_20200616_003422-affd0f8d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k/nonlocal_r101-d8_512x512_160k_ade20k_20200616_003422.log.json) | - -### Pascal VOC 2012 + Aug - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| -------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| NonLocal | R-50-D8 | 512x512 | 20000 | 6.4 | 21.21 | 76.20 | 77.12 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug/nonlocal_r50-d8_512x512_20k_voc12aug_20200617_222613-07f2a57c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug/nonlocal_r50-d8_512x512_20k_voc12aug_20200617_222613.log.json) | -| NonLocal | R-101-D8 | 512x512 | 20000 | 9.8 | 14.01 | 78.15 | 78.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug/nonlocal_r101-d8_512x512_20k_voc12aug_20200617_222615-948c68ab.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug/nonlocal_r101-d8_512x512_20k_voc12aug_20200617_222615.log.json) | -| NonLocal | R-50-D8 | 512x512 | 40000 | - | - | 76.65 | 77.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug/nonlocal_r50-d8_512x512_40k_voc12aug_20200614_000028-0139d4a9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug/nonlocal_r50-d8_512x512_40k_voc12aug_20200614_000028.log.json) | -| NonLocal | R-101-D8 | 512x512 | 40000 | - | - | 78.27 | 79.12 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug/nonlocal_r101-d8_512x512_40k_voc12aug_20200614_000028-7e5ff470.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug/nonlocal_r101-d8_512x512_40k_voc12aug_20200614_000028.log.json) | diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index a0726c293d6026898110f7fa55d5e7d2d55d7a02..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/nonlocal_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True), - test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes.py deleted file mode 100644 index 1c86eba17c46a863091d999b1a090e1237202ec5..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/models/__init__.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/models/__init__.py deleted file mode 100644 index be6bfe4b787a132aeaabaed1c3437c9ecd5c656c..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/models/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -""" -Models for EnCodec, AudioGen, MusicGen, as well as the generic LMModel. -""" -# flake8: noqa -from . import builders, loaders -from .encodec import ( - CompressionModel, EncodecModel, DAC, - HFEncodecModel, HFEncodecCompressionModel) -from .audiogen import AudioGen -from .lm import LMModel -from .multibanddiffusion import MultiBandDiffusion -from .musicgen import MusicGen -from .unet import DiffusionUnet diff --git a/spaces/GrandaddyShmax/MusicGen_Plus/tests/quantization/test_vq.py b/spaces/GrandaddyShmax/MusicGen_Plus/tests/quantization/test_vq.py deleted file mode 100644 index c215099fedacae35c6798fdd9b8420a447aa16bb..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/MusicGen_Plus/tests/quantization/test_vq.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from audiocraft.quantization.vq import ResidualVectorQuantizer - - -class TestResidualVectorQuantizer: - - def test_rvq(self): - x = torch.randn(1, 16, 2048) - vq = ResidualVectorQuantizer(n_q=8, dimension=16, bins=8) - res = vq(x, 1.) - assert res.x.shape == torch.Size([1, 16, 2048]) diff --git a/spaces/GroveStreet/GTA_SOVITS/diffusion/vocoder.py b/spaces/GroveStreet/GTA_SOVITS/diffusion/vocoder.py deleted file mode 100644 index bbaa47f64fd5a3191a24dfaa054c423fa86e5bae..0000000000000000000000000000000000000000 --- a/spaces/GroveStreet/GTA_SOVITS/diffusion/vocoder.py +++ /dev/null @@ -1,94 +0,0 @@ -import torch -from vdecoder.nsf_hifigan.nvSTFT import STFT -from vdecoder.nsf_hifigan.models import load_model,load_config -from torchaudio.transforms import Resample - - -class Vocoder: - def __init__(self, vocoder_type, vocoder_ckpt, device = None): - if device is None: - device = 'cuda' if torch.cuda.is_available() else 'cpu' - self.device = device - - if vocoder_type == 'nsf-hifigan': - self.vocoder = NsfHifiGAN(vocoder_ckpt, device = device) - elif vocoder_type == 'nsf-hifigan-log10': - self.vocoder = NsfHifiGANLog10(vocoder_ckpt, device = device) - else: - raise ValueError(f" [x] Unknown vocoder: {vocoder_type}") - - self.resample_kernel = {} - self.vocoder_sample_rate = self.vocoder.sample_rate() - self.vocoder_hop_size = self.vocoder.hop_size() - self.dimension = self.vocoder.dimension() - - def extract(self, audio, sample_rate, keyshift=0): - - # resample - if sample_rate == self.vocoder_sample_rate: - audio_res = audio - else: - key_str = str(sample_rate) - if key_str not in self.resample_kernel: - self.resample_kernel[key_str] = Resample(sample_rate, self.vocoder_sample_rate, lowpass_filter_width = 128).to(self.device) - audio_res = self.resample_kernel[key_str](audio) - - # extract - mel = self.vocoder.extract(audio_res, keyshift=keyshift) # B, n_frames, bins - return mel - - def infer(self, mel, f0): - f0 = f0[:,:mel.size(1),0] # B, n_frames - audio = self.vocoder(mel, f0) - return audio - - -class NsfHifiGAN(torch.nn.Module): - def __init__(self, model_path, device=None): - super().__init__() - if device is None: - device = 'cuda' if torch.cuda.is_available() else 'cpu' - self.device = device - self.model_path = model_path - self.model = None - self.h = load_config(model_path) - self.stft = STFT( - self.h.sampling_rate, - self.h.num_mels, - self.h.n_fft, - self.h.win_size, - self.h.hop_size, - self.h.fmin, - self.h.fmax) - - def sample_rate(self): - return self.h.sampling_rate - - def hop_size(self): - return self.h.hop_size - - def dimension(self): - return self.h.num_mels - - def extract(self, audio, keyshift=0): - mel = self.stft.get_mel(audio, keyshift=keyshift).transpose(1, 2) # B, n_frames, bins - return mel - - def forward(self, mel, f0): - if self.model is None: - print('| Load HifiGAN: ', self.model_path) - self.model, self.h = load_model(self.model_path, device=self.device) - with torch.no_grad(): - c = mel.transpose(1, 2) - audio = self.model(c, f0) - return audio - -class NsfHifiGANLog10(NsfHifiGAN): - def forward(self, mel, f0): - if self.model is None: - print('| Load HifiGAN: ', self.model_path) - self.model, self.h = load_model(self.model_path, device=self.device) - with torch.no_grad(): - c = 0.434294 * mel.transpose(1, 2) - audio = self.model(c, f0) - return audio \ No newline at end of file diff --git a/spaces/GuruVineeth/GenAIGPT/README.md b/spaces/GuruVineeth/GenAIGPT/README.md deleted file mode 100644 index 84016c131474ac751d715f88527f765a835b2c17..0000000000000000000000000000000000000000 --- a/spaces/GuruVineeth/GenAIGPT/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: GenAIGPT -emoji: 🦀 -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/README.md b/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/README.md deleted file mode 100644 index c3af39e7cd24c8e4c2b6f5f364dca88b901b4849..0000000000000000000000000000000000000000 --- a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/README.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Rgbdsod Multimae Streamlit -emoji: 🌖 -colorFrom: pink -colorTo: pink -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - -# Run locally (Port 8540 - Domain) -```APP_ENVIRONMENT=LOCAL streamlit run app.py --server.port 8541``` - -# Run WebRTC -```APP_ENVIRONMENT=LOCAL streamlit run webrtc_app.py --server.port 8540``` - -# Environments -- Python 3.9.12 -- Ubuntu 22.04 diff --git a/spaces/HaHaBill/LandShapes-Antarctica/models/biggan/pytorch_biggan/pytorch_pretrained_biggan/__init__.py b/spaces/HaHaBill/LandShapes-Antarctica/models/biggan/pytorch_biggan/pytorch_pretrained_biggan/__init__.py deleted file mode 100644 index b570848421afd921fae635569c97d0f8f5b33c80..0000000000000000000000000000000000000000 --- a/spaces/HaHaBill/LandShapes-Antarctica/models/biggan/pytorch_biggan/pytorch_pretrained_biggan/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .config import BigGANConfig -from .model import BigGAN -from .file_utils import PYTORCH_PRETRAINED_BIGGAN_CACHE, cached_path -from .utils import (truncated_noise_sample, save_as_images, - convert_to_images, display_in_terminal, - one_hot_from_int, one_hot_from_names) diff --git a/spaces/HaloMaster/chinesesummary/fengshen/metric/metric.py b/spaces/HaloMaster/chinesesummary/fengshen/metric/metric.py deleted file mode 100644 index c59c2c06d0c5cb583ba0cb3943da5d8f95308b75..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/metric/metric.py +++ /dev/null @@ -1,91 +0,0 @@ -# coding=utf-8 -from collections import Counter -import torch -from torch import nn -# import seqeval - -from .utils_ner import get_entities - - -class metrics_mlm_acc(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, logits, labels, masked_lm_metric): - - # if len(list(logits.shape))==3: - mask_label_size = 0 - for i in masked_lm_metric: - for j in i: - if j > 0: - mask_label_size += 1 - - y_pred = torch.argmax(logits, dim=-1) - - y_pred = y_pred.view(size=(-1,)) - y_true = labels.view(size=(-1,)) - masked_lm_metric = masked_lm_metric.view(size=(-1,)) - - corr = torch.eq(y_pred, y_true) - corr = torch.multiply(masked_lm_metric, corr) - - acc = torch.sum(corr.float())/mask_label_size - return acc - - -class SeqEntityScore(object): - def __init__(self, id2label, markup='bios', middle_prefix='I-'): - self.id2label = id2label - self.markup = markup - self.middle_prefix = middle_prefix - self.reset() - - def reset(self): - self.origins = [] - self.founds = [] - self.rights = [] - - def compute(self, origin, found, right): - recall = 0 if origin == 0 else (right / origin) - precision = 0 if found == 0 else (right / found) - f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (precision + recall) - return recall, precision, f1 - - def result(self): - class_info = {} - origin_counter = Counter([x[0] for x in self.origins]) - found_counter = Counter([x[0] for x in self.founds]) - right_counter = Counter([x[0] for x in self.rights]) - for type_, count in origin_counter.items(): - origin = count - found = found_counter.get(type_, 0) - right = right_counter.get(type_, 0) - # print('origin:', origin, ' found:', found, ' right:', right) - recall, precision, f1 = self.compute(origin, found, right) - class_info[type_] = {"acc": round(precision, 4), 'recall': round(recall, 4), 'f1': round(f1, 4)} - origin = len(self.origins) - found = len(self.founds) - right = len(self.rights) - recall, precision, f1 = self.compute(origin, found, right) - return {'acc': precision, 'recall': recall, 'f1': f1}, class_info - - def update(self, label_paths, pred_paths): - ''' - labels_paths: [[],[],[],....] - pred_paths: [[],[],[],.....] - - :param label_paths: - :param pred_paths: - :return: - Example: - >>> labels_paths = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] - >>> pred_paths = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] - ''' - for label_path, pre_path in zip(label_paths, pred_paths): - label_entities = get_entities(label_path, self.id2label, self.markup, self.middle_prefix) - pre_entities = get_entities(pre_path, self.id2label, self.markup, self.middle_prefix) - # print('label:', label_path, ',label_entities: ', label_entities) - # print('pred:', pre_path, ',pre_entities: ', pre_entities) - self.origins.extend(label_entities) - self.founds.extend(pre_entities) - self.rights.extend([pre_entity for pre_entity in pre_entities if pre_entity in label_entities]) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/metrics/abx_metrics/dump_abx_feats.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/metrics/abx_metrics/dump_abx_feats.py deleted file mode 100644 index 41cf558970608fa5a9241e91e59ba214b609dc73..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/metrics/abx_metrics/dump_abx_feats.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import logging -import os - -import joblib -import numpy as np - -from examples.textless_nlp.gslm.speech2unit.clustering.utils import get_audio_files -from examples.textless_nlp.gslm.speech2unit.pretrained.utils import get_features - -def get_logger(): - log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" - logging.basicConfig(format=log_format, level=logging.INFO) - logger = logging.getLogger(__name__) - return logger - -def get_parser(): - parser = argparse.ArgumentParser( - description="Quantize using K-means clustering over acoustic features." - ) - parser.add_argument( - "--feature_type", - type=str, - choices=["logmel", "hubert", "w2v2", "cpc"], - default=None, - required=True, - help="Acoustic feature type", - ) - parser.add_argument( - "--kmeans_model_path", - type=str, - required=True, - help="K-means model file path to use for inference", - ) - parser.add_argument( - "--manifest_path", - type=str, - default=None, - help="Manifest file containing the root dir and file names", - ) - parser.add_argument( - "--checkpoint_path", - type=str, - help="Pretrained model checkpoint", - ) - parser.add_argument( - "--layer", - type=int, - help="The layer of the pretrained model to extract features from", - default=-1, - ) - parser.add_argument( - "--out_dir_path", - required=True, - type=str, - help="File path of quantized output.", - ) - parser.add_argument( - "--extension", type=str, default=".flac", help="Features file path" - ) - return parser - - -def one_hot(feat, n_clusters): - return np.eye(n_clusters)[feat] - -def main(args, logger): - # Feature extraction - logger.info(f"Extracting {args.feature_type} acoustic features...") - features_batch = get_features( - feature_type=args.feature_type, - checkpoint_path=args.checkpoint_path, - layer=args.layer, - manifest_path=args.manifest_path, - sample_pct=1.0, - flatten=False, - ) - logger.info(f"Features extracted for {len(features_batch)} utterances.\n") - logger.info(f"Dimensionality of representation = {features_batch[0].shape[1]}") - - logger.info(f"Loading K-means model from {args.kmeans_model_path} ...") - kmeans_model = joblib.load(open(args.kmeans_model_path, "rb")) - kmeans_model.verbose = False - - _, fnames, _ = get_audio_files(args.manifest_path) - - os.makedirs(args.out_dir_path, exist_ok=True) - logger.info(f"Writing quantized features to {args.out_dir_path}") - for i, feats in enumerate(features_batch): - pred = kmeans_model.predict(feats) - emb = one_hot(pred, kmeans_model.n_clusters) - base_fname = os.path.basename(fnames[i]).rstrip(args.extension) - output_path = os.path.join(args.out_dir_path, f"{base_fname}.npy") - with open(output_path, "wb") as f: - np.save(f, emb) - -if __name__ == "__main__": - parser = get_parser() - args = parser.parse_args() - logger = get_logger() - logger.info(args) - main(args, logger) diff --git a/spaces/Harveenchadha/en_to_indic_translation/indic_nlp_library/indicnlp/loader.py b/spaces/Harveenchadha/en_to_indic_translation/indic_nlp_library/indicnlp/loader.py deleted file mode 100644 index b1304f90e8cb354c3c88628069e77c98672073d3..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/en_to_indic_translation/indic_nlp_library/indicnlp/loader.py +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright (c) 2013-present, Anoop Kunchukuttan -# All rights reserved. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -# - -from indicnlp import common -from indicnlp.script import indic_scripts -from indicnlp.script import english_script -from indicnlp.transliterate import unicode_transliterate - -def load(): - """ - Initializes the Indic NLP library. Clients should call this method before using the library. - - Any module requiring initialization should have a init() method, to which a call must be made from this method - """ - - ### Order of intialization may matter - - # Common has to be loaded first to get access to resources - common.init() - - ## Initialization of Indic scripts module - indic_scripts.init() - - ## Initialization of English scripts module - english_script.init() - - ## Initialization of unicode_transliterate module - unicode_transliterate.init() - - diff --git a/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/data_measurements/lengths/lengths.py b/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/data_measurements/lengths/lengths.py deleted file mode 100644 index 6ca9e52652691648eb9c2e44c1cd1a0c14df8c58..0000000000000000000000000000000000000000 --- a/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/data_measurements/lengths/lengths.py +++ /dev/null @@ -1,176 +0,0 @@ -import logging -import matplotlib.image as mpimg -import matplotlib.pyplot as plt -from matplotlib.figure import Figure -from PIL import Image -import seaborn as sns -import statistics -from os.path import join as pjoin -import pandas as pd -import utils -from utils import dataset_utils as ds_utils - - -from collections import Counter -from os.path import exists, isdir -from os.path import join as pjoin - -TEXT_FIELD = "text" -TOKENIZED_FIELD = "tokenized_text" -LENGTH_FIELD = "length" - -UNIQ = "num_instance_lengths" -AVG = "average_instance_length" -STD = "standard_dev_instance_length" - -logs = utils.prepare_logging(__file__) - -def make_fig_lengths(lengths_df): - # How the hell is this working? plt transforms to sns ?! - logs.info("Creating lengths figure.") - plt.switch_backend('Agg') - fig_tok_lengths, axs = plt.subplots(figsize=(15, 6), dpi=150) - plt.xlabel("Number of tokens") - plt.title("Binned counts of text lengths, with kernel density estimate and ticks for each instance.") - sns.histplot(data=lengths_df, kde=True, ax=axs, x=LENGTH_FIELD, legend=False) - sns.rugplot(data=lengths_df, ax=axs) - return fig_tok_lengths - -class DMTHelper: - def __init__(self, dstats, load_only=False, save=True): - self.tokenized_df = dstats.tokenized_df - # Whether to only use cache - self.load_only = load_only - # Whether to try using cache first. - # Must be true when self.load_only = True; this function assures that. - self.use_cache = dstats.use_cache - self.cache_dir = dstats.dataset_cache_dir - self.save = save - # Lengths class object - self.lengths_obj = None - # Content shared in the DMT: - # The figure, the table, and the sufficient statistics (measurements) - self.fig_lengths = None - self.lengths_df = None - self.avg_length = None - self.std_length = None - self.uniq_counts = None - # Dict for the measurements, used in caching - self.length_stats_dict = {} - # Filenames, used in caching - self.lengths_dir = "lengths" - length_meas_json = "length_measurements.json" - lengths_fig_png = "lengths_fig.png" - lengths_df_json = "lengths_table.json" - self.length_stats_json_fid = pjoin(self.cache_dir, self.lengths_dir, length_meas_json) - self.lengths_fig_png_fid = pjoin(self.cache_dir, self.lengths_dir, lengths_fig_png) - self.lengths_df_json_fid = pjoin(self.cache_dir, self.lengths_dir, lengths_df_json) - - def run_DMT_processing(self): - """ - Gets data structures for the figure, table, and measurements. - """ - # First look to see what we can load from cache. - if self.use_cache: - logs.info("Trying to load from cache...") - # Defines self.lengths_df, self.length_stats_dict, self.fig_lengths - # This is the table, the dict of measurements, and the figure - self.load_lengths_cache() - # Sets the measurements as attributes of the DMT object - self.set_attributes() - # If we do not have measurements loaded from cache... - if not self.length_stats_dict and not self.load_only: - logs.info("Preparing length results") - # Compute length statistics. Uses the Lengths class. - self.lengths_obj = self._prepare_lengths() - # Dict of measurements - self.length_stats_dict = self.lengths_obj.length_stats_dict - # Table of text and lengths - self.lengths_df = self.lengths_obj.lengths_df - # Sets the measurements in the length_stats_dict - self.set_attributes() - # Makes the figure - self.fig_lengths = make_fig_lengths(self.lengths_df) - # Finish - if self.save: - logs.info("Saving results.") - self._write_lengths_cache() - if exists(self.lengths_fig_png_fid): - # As soon as we have a figure, we redefine it as an image. - # This is a hack to handle a UI display error (TODO: file bug) - self.fig_lengths = Image.open(self.lengths_fig_png_fid) - - def set_attributes(self): - if self.length_stats_dict: - self.avg_length = self.length_stats_dict[AVG] - self.std_length = self.length_stats_dict[STD] - self.uniq_counts = self.length_stats_dict[UNIQ] - else: - logs.info("No lengths stats found. =(") - - def load_lengths_cache(self): - # Dataframe with exists. Load it. - if exists(self.lengths_df_json_fid): - self.lengths_df = ds_utils.read_df(self.lengths_df_json_fid) - # Image exists. Load it. - if exists(self.lengths_fig_png_fid): - self.fig_lengths = Image.open(self.lengths_fig_png_fid) # mpimg.imread(self.lengths_fig_png_fid) - # Measurements exist. Load them. - if exists(self.length_stats_json_fid): - # Loads the length measurements - self.length_stats_dict = ds_utils.read_json(self.length_stats_json_fid) - - def _write_lengths_cache(self): - # Writes the data structures using the corresponding filetypes. - ds_utils.make_path(pjoin(self.cache_dir, self.lengths_dir)) - if self.length_stats_dict != {}: - ds_utils.write_json(self.length_stats_dict, self.length_stats_json_fid) - if isinstance(self.fig_lengths, Figure): - self.fig_lengths.savefig(self.lengths_fig_png_fid) - if isinstance(self.lengths_df, pd.DataFrame): - ds_utils.write_df(self.lengths_df, self.lengths_df_json_fid) - - def _prepare_lengths(self): - """Loads a Lengths object and computes length statistics""" - # Length object for the dataset - lengths_obj = Lengths(dataset=self.tokenized_df) - lengths_obj.prepare_lengths() - return lengths_obj - - def get_filenames(self): - lengths_fid_dict = {"statistics": self.length_stats_json_fid, - "figure png": self.lengths_fig_png_fid, - "table": self.lengths_df_json_fid} - return lengths_fid_dict - - -class Lengths: - """Generic class for text length processing. - Uses DataFrames for faster processing. - Given a dataframe with tokenized words in a column called TOKENIZED_TEXT, - and the text instances in a column called TEXT, compute statistics. - """ - - def __init__(self, dataset): - self.dset_df = dataset - # Dict of measurements - self.length_stats_dict = {} - # Measurements - self.avg_length = None - self.std_length = None - self.num_uniq_lengths = None - # Table of lengths and sentences - self.lengths_df = None - - def prepare_lengths(self): - self.lengths_df = pd.DataFrame(self.dset_df[TEXT_FIELD]) - self.lengths_df[LENGTH_FIELD] = self.dset_df[TOKENIZED_FIELD].apply(len) - lengths_array = self.lengths_df[LENGTH_FIELD] - self.avg_length = statistics.mean(lengths_array) - self.std_length = statistics.stdev(lengths_array) - self.num_uniq_lengths = len(lengths_array.unique()) - self.length_stats_dict = { - "average_instance_length": self.avg_length, - "standard_dev_instance_length": self.std_length, - "num_instance_lengths": self.num_uniq_lengths, - } diff --git a/spaces/Humbert/mmcls-retriever/README.md b/spaces/Humbert/mmcls-retriever/README.md deleted file mode 100644 index 8c244c8de16d0ca1feb40f6499997acda63623f4..0000000000000000000000000000000000000000 --- a/spaces/Humbert/mmcls-retriever/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Mmcls Retriever -emoji: 😻 -colorFrom: green -colorTo: yellow -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/pq/utils.py b/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/pq/utils.py deleted file mode 100644 index 14c015b7c19aae65812e864cf1d95ef3d39de606..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/pq/utils.py +++ /dev/null @@ -1,374 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import re -from operator import attrgetter, itemgetter -import torch -import numpy as np -import torch.distributed as dist -import torch.nn as nn - -from .modules import PQConv2d, PQEmbedding, PQLinear -from .pq import PQ - - -def quantize_model_( - model, - size_tracker, - layers_to_quantize, - block_sizes_config, - n_centroids_config, - step=0, - n_iter=15, - eps=1e-6, - max_tentatives=100, - remove_weights=False, - verbose=True, - state_dict=None, -): - """ - Quantize a model in-place by stages. All the targeted - layers are replaced by their quantized counterpart, - and the model is ready for the finetuning of the - centroids in a standard training loop (no modifications - required). Note that we do not quantize biases. - - Args: - - model: a nn.Module - - size_tracker: useful for tracking quatization statistics - - layers_to_quantize: a list containing regexps for - filtering the layers to quantize at each stage according - to their name (as in model.named_parameters()) - - block_sizes_config: dict like - { - 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}), - 'Linear': ('in_features', {'*': 8}) - } - For instance, all conv2d layers with kernel size 3x3 have - a block size of 9 and all Linear layers are quantized with - a block size of 8, irrespective of their size. - - n_centroids_config: dict like - { - 'Conv2d': ('kernel_size', {'*': 256}), - 'Linear': ('in_features', {'*': 256}) - } - For instance, all conv2d layers are quantized with 256 centroids - - step: the layers to quantize inplace corresponding - to layers_to_quantize[step] - """ - - quantized_layers = get_layers(model, layers_to_quantize[step], remove_weights=remove_weights) - - for layer in quantized_layers: - - # book-keeping - is_master_process = (not dist.is_initialized()) or ( - dist.is_initialized() and dist.get_rank() == 0 - ) - verbose = verbose and is_master_process - - # get block size and centroids - module = attrgetter(layer)(model) - block_size = get_param(module, layer, block_sizes_config) - n_centroids = get_param(module, layer, n_centroids_config) - if verbose: - logging.info( - f"Quantizing layer {layer} with block size {block_size} and {n_centroids} centroids" - ) - - # quantize layer - weight = module.weight.data.clone() - is_bias = "bias" in [x[0] for x in module.named_parameters()] - bias = module.bias.data.clone() if is_bias else None - quantizer = PQ( - weight, - block_size, - n_centroids=n_centroids, - n_iter=n_iter, - eps=eps, - max_tentatives=max_tentatives, - verbose=verbose, - ) - - # quantization performed on all GPUs with same seed - quantizer.encode() - centroids = quantizer.centroids.contiguous() - assignments = quantizer.assignments.contiguous() - - # If n_iter = 0 and state_dict is provided, then - # we initialize random assignments and centroids to - # random values of the appropriate dimensions - # because the quantized model parameters will - # overwritten by the state_dict later on. - if n_iter == 0 and state_dict: - # Initialize random centroids of the correct size - centroids = torch.rand(centroids.size()) - centroids.cuda() - # Get counts and assignment keys from layer in loaded checkpoint. - counts_key = layer+"."+"counts" - assignment_key = layer+"."+"assignments" - # Get number of different bins to include. - counts = list(state_dict[counts_key].shape)[0] - print(layer) - print(state_dict[counts_key]) - print(counts) - # Initialize random assignments of the correct size - # with an appropriate number of bins. - num_assignments = list(state_dict[assignment_key].shape)[0] - num_extra = num_assignments - counts - print(num_assignments) - print(num_extra) - assignments_bins = torch.arange(counts) - assignments_rand = torch.randint(0, counts-1, (num_extra, )) - assignments = torch.cat((assignments_bins, assignments_rand), 0) - # assignments = assignments.type(torch.IntTensor) - assignments.cuda() - print("assignments") - print(assignments) - - # broadcast results to make sure weights are up-to-date - if dist.is_initialized(): - dist.broadcast(centroids, 0) - dist.broadcast(assignments, 0) - - # instantiate the quantized counterpart - if isinstance(module, nn.Linear): - out_features, in_features = map( - lambda k: module.__dict__[k], ["out_features", "in_features"] - ) - quantized_module = PQLinear( - centroids, assignments, bias, in_features, out_features - ) - elif isinstance(module, nn.Embedding): - num_embeddings, embedding_dim = map( - lambda k: module.__dict__[k], ["num_embeddings", "embedding_dim"] - ) - quantized_module = PQEmbedding( - centroids, assignments, num_embeddings, embedding_dim - ) - elif isinstance(module, nn.Conv2d): - out_channels, in_channels, kernel_size = map( - lambda k: module.__dict__[k], - ["out_channels", "in_channels", "kernel_size"], - ) - stride, padding, dilation, groups, padding_mode = map( - lambda k: module.__dict__[k], - ["stride", "padding", "dilation", "groups", "padding_mode"], - ) - - quantized_module = PQConv2d( - centroids, - assignments, - bias, - in_channels, - out_channels, - kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - padding_mode=padding_mode, - ) - else: - raise ValueError(f"Module {module} not yet supported for quantization") - - # replace layer by its quantized counterpart - attrsetter(layer)(model, quantized_module) - - # update statistics - size_tracker.update(weight, block_size, n_centroids) - - # return name of quantized layers - return quantized_layers - - -def get_layers(model, filter_regexp, remove_weights=False): - """ - Filters out the layers according to a regexp. Note that - we omit biases. - - Args: - - model: a nn.Module - - filter_regexp: a regexp to filter the layers to keep - according to their name in model.named_parameters(). - For instance, the regexp: - - down_layers\\.[123456]\\.(conv[12]|identity\\.conv)) - - is keeping blocks down_layers from 1 to 6, and inside - each block is keeping conv1, conv2 and identity.conv. - - Remarks: - - We add (module\\.)? at the beginning of the regexp to - account for the possible use of nn.parallel.DataParallel - """ - - # get all parameter names - all_layers = map(itemgetter(0), model.named_parameters()) - - # remove biases - all_layers = filter(lambda x: "bias" not in x, all_layers) - - # remove .weight in all other names (or .weight_orig is spectral norm) - all_layers = map(lambda x: x.replace(".weight_orig", ""), all_layers) - # remove weights indicates whether the weights extension should be removed, in addition to - # weight_orig and weight extension on names - if remove_weights: - all_layers = map(lambda x: x.replace(".weights", ""), all_layers) - all_layers = map(lambda x: x.replace(".weight", ""), all_layers) - - # return filtered layers - filter_regexp = "(module\\.)?" + "(" + filter_regexp + ")" - r = re.compile(filter_regexp) - - return list(filter(r.match, all_layers)) - - -def get_param(module, layer_name, param_config): - """ - Given a quantization configuration, get the right parameter - for the module to be quantized. - - Args: - - module: a nn.Module - - layer_name: the name of the layer - - param_config: a dict like - { - 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}), - 'Linear': ('in_features', {'*': 8}) - } - For instance, all conv2d layers with kernel size 3x3 have - a block size of 9 and all Linear layers are quantized with - a block size of 8, irrespective of their size. - - Remarks: - - if 'fuzzy_name' is passed as a parameter, layers whose layer_name - include 'fuzzy_name' will be assigned the given parameter. - In the following example, conv.expand layers will have a block - size of 9 while conv.reduce will have a block size of 4 and all - other layers will have a block size of 2. - { - 'Conv2d': ('fuzzy_name', {'expand': 9, 'reduce': 4, '*': 2}), - 'Linear': ('fuzzy_name', {'classifier': 8, 'projection': 4}) - } - - """ - - layer_type = module.__class__.__name__ - - if layer_type not in param_config: - raise KeyError(f"Layer type {layer_type} not in config for layer {module}") - - feature, params = param_config[module.__class__.__name__] - - if feature != "fuzzy_name": - feature_value = str(getattr(module, feature)) - if feature_value not in params: - if "*" in params: - feature_value = "*" - else: - raise KeyError( - f"{feature}={feature_value} not in config for layer {module}" - ) - else: - feature_values = [name for name in params if name in layer_name] - if len(feature_values) == 0: - if "*" in params: - feature_value = "*" - else: - raise KeyError(f"name={layer_name} not in config for {module}") - else: - feature_value = feature_values[0] - - return params[feature_value] - - -class SizeTracker(object): - """ - Class to keep track of the compressed network size with iPQ. - - Args: - - model: a nn.Module - - Remarks: - - The compressed size is the sum of three components - for each layer in the network: - (1) Storing the centroids given by iPQ in fp16 - (2) Storing the assignments of the blocks in int8 - (3) Storing all non-compressed elements such as biases - - This cost in only valid if we use 256 centroids (then - indexing can indeed by done with int8). - """ - - def __init__(self, model): - self.model = model - self.size_non_compressed_model = self.compute_size() - self.size_non_quantized = self.size_non_compressed_model - self.size_index = 0 - self.size_centroids = 0 - self.n_quantized_layers = 0 - - def compute_size(self): - """ - Computes the size of the model (in MB). - """ - - res = 0 - for _, p in self.model.named_parameters(): - res += p.numel() - return res * 4 / 1024 / 1024 - - def update(self, W, block_size, n_centroids): - """ - Updates the running statistics when quantizing a new layer. - """ - - # bits per weights - bits_per_weight = np.log2(n_centroids) / block_size - self.n_quantized_layers += 1 - - # size of indexing the subvectors of size block_size (in MB) - size_index_layer = bits_per_weight * W.numel() / 8 / 1024 / 1024 - self.size_index += size_index_layer - - # size of the centroids stored in float16 (in MB) - size_centroids_layer = n_centroids * block_size * 2 / 1024 / 1024 - self.size_centroids += size_centroids_layer - - # size of non-compressed layers, e.g. LayerNorms or biases (in MB) - size_uncompressed_layer = W.numel() * 4 / 1024 / 1024 - self.size_non_quantized -= size_uncompressed_layer - - def __repr__(self): - size_compressed = ( - self.size_index + self.size_centroids + self.size_non_quantized - ) - compression_ratio = self.size_non_compressed_model / size_compressed # NOQA - return ( - f"Non-compressed model size: {self.size_non_compressed_model:.2f} MB. " - f"After quantizing {self.n_quantized_layers} layers, size " - f"(indexing + centroids + other): {self.size_index:.2f} MB + " - f"{self.size_centroids:.2f} MB + {self.size_non_quantized:.2f} MB = " - f"{size_compressed:.2f} MB, compression ratio: {compression_ratio:.2f}x" - ) - - -def attrsetter(*items): - def resolve_attr(obj, attr): - attrs = attr.split(".") - head = attrs[:-1] - tail = attrs[-1] - - for name in head: - obj = getattr(obj, name) - return obj, tail - - def g(obj, val): - for attr in items: - resolved_obj, resolved_attr = resolve_attr(obj, attr) - setattr(resolved_obj, resolved_attr, val) - - return g diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/models/__init__.py b/spaces/Iceclear/StableSR/StableSR/basicsr/models/__init__.py deleted file mode 100644 index 85796deae014c20a9aa600133468d04900c4fb89..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/basicsr/models/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -import importlib -from copy import deepcopy -from os import path as osp - -from basicsr.utils import get_root_logger, scandir -from basicsr.utils.registry import MODEL_REGISTRY - -__all__ = ['build_model'] - -# automatically scan and import model modules for registry -# scan all the files under the 'models' folder and collect files ending with '_model.py' -model_folder = osp.dirname(osp.abspath(__file__)) -model_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(model_folder) if v.endswith('_model.py')] -# import all the model modules -_model_modules = [importlib.import_module(f'basicsr.models.{file_name}') for file_name in model_filenames] - - -def build_model(opt): - """Build model from options. - - Args: - opt (dict): Configuration. It must contain: - model_type (str): Model type. - """ - opt = deepcopy(opt) - model = MODEL_REGISTRY.get(opt['model_type'])(opt) - logger = get_root_logger() - logger.info(f'Model [{model.__class__.__name__}] is created.') - return model diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/evaluation/losses/fid/inception.py b/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/evaluation/losses/fid/inception.py deleted file mode 100644 index e9bd0863b457aaa40c770eaa4acbb142b18fc18b..0000000000000000000000000000000000000000 --- a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/evaluation/losses/fid/inception.py +++ /dev/null @@ -1,323 +0,0 @@ -import logging - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torchvision import models - -try: - from torchvision.models.utils import load_state_dict_from_url -except ImportError: - from torch.utils.model_zoo import load_url as load_state_dict_from_url - -# Inception weights ported to Pytorch from -# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz -FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth' - - -LOGGER = logging.getLogger(__name__) - - -class InceptionV3(nn.Module): - """Pretrained InceptionV3 network returning feature maps""" - - # Index of default block of inception to return, - # corresponds to output of final average pooling - DEFAULT_BLOCK_INDEX = 3 - - # Maps feature dimensionality to their output blocks indices - BLOCK_INDEX_BY_DIM = { - 64: 0, # First max pooling features - 192: 1, # Second max pooling featurs - 768: 2, # Pre-aux classifier features - 2048: 3 # Final average pooling features - } - - def __init__(self, - output_blocks=[DEFAULT_BLOCK_INDEX], - resize_input=True, - normalize_input=True, - requires_grad=False, - use_fid_inception=True): - """Build pretrained InceptionV3 - - Parameters - ---------- - output_blocks : list of int - Indices of blocks to return features of. Possible values are: - - 0: corresponds to output of first max pooling - - 1: corresponds to output of second max pooling - - 2: corresponds to output which is fed to aux classifier - - 3: corresponds to output of final average pooling - resize_input : bool - If true, bilinearly resizes input to width and height 299 before - feeding input to model. As the network without fully connected - layers is fully convolutional, it should be able to handle inputs - of arbitrary size, so resizing might not be strictly needed - normalize_input : bool - If true, scales the input from range (0, 1) to the range the - pretrained Inception network expects, namely (-1, 1) - requires_grad : bool - If true, parameters of the model require gradients. Possibly useful - for finetuning the network - use_fid_inception : bool - If true, uses the pretrained Inception model used in Tensorflow's - FID implementation. If false, uses the pretrained Inception model - available in torchvision. The FID Inception model has different - weights and a slightly different structure from torchvision's - Inception model. If you want to compute FID scores, you are - strongly advised to set this parameter to true to get comparable - results. - """ - super(InceptionV3, self).__init__() - - self.resize_input = resize_input - self.normalize_input = normalize_input - self.output_blocks = sorted(output_blocks) - self.last_needed_block = max(output_blocks) - - assert self.last_needed_block <= 3, \ - 'Last possible output block index is 3' - - self.blocks = nn.ModuleList() - - if use_fid_inception: - inception = fid_inception_v3() - else: - inception = models.inception_v3(pretrained=True) - - # Block 0: input to maxpool1 - block0 = [ - inception.Conv2d_1a_3x3, - inception.Conv2d_2a_3x3, - inception.Conv2d_2b_3x3, - nn.MaxPool2d(kernel_size=3, stride=2) - ] - self.blocks.append(nn.Sequential(*block0)) - - # Block 1: maxpool1 to maxpool2 - if self.last_needed_block >= 1: - block1 = [ - inception.Conv2d_3b_1x1, - inception.Conv2d_4a_3x3, - nn.MaxPool2d(kernel_size=3, stride=2) - ] - self.blocks.append(nn.Sequential(*block1)) - - # Block 2: maxpool2 to aux classifier - if self.last_needed_block >= 2: - block2 = [ - inception.Mixed_5b, - inception.Mixed_5c, - inception.Mixed_5d, - inception.Mixed_6a, - inception.Mixed_6b, - inception.Mixed_6c, - inception.Mixed_6d, - inception.Mixed_6e, - ] - self.blocks.append(nn.Sequential(*block2)) - - # Block 3: aux classifier to final avgpool - if self.last_needed_block >= 3: - block3 = [ - inception.Mixed_7a, - inception.Mixed_7b, - inception.Mixed_7c, - nn.AdaptiveAvgPool2d(output_size=(1, 1)) - ] - self.blocks.append(nn.Sequential(*block3)) - - for param in self.parameters(): - param.requires_grad = requires_grad - - def forward(self, inp): - """Get Inception feature maps - - Parameters - ---------- - inp : torch.autograd.Variable - Input tensor of shape Bx3xHxW. Values are expected to be in - range (0, 1) - - Returns - ------- - List of torch.autograd.Variable, corresponding to the selected output - block, sorted ascending by index - """ - outp = [] - x = inp - - if self.resize_input: - x = F.interpolate(x, - size=(299, 299), - mode='bilinear', - align_corners=False) - - if self.normalize_input: - x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1) - - for idx, block in enumerate(self.blocks): - x = block(x) - if idx in self.output_blocks: - outp.append(x) - - if idx == self.last_needed_block: - break - - return outp - - -def fid_inception_v3(): - """Build pretrained Inception model for FID computation - - The Inception model for FID computation uses a different set of weights - and has a slightly different structure than torchvision's Inception. - - This method first constructs torchvision's Inception and then patches the - necessary parts that are different in the FID Inception model. - """ - LOGGER.info('fid_inception_v3 called') - inception = models.inception_v3(num_classes=1008, - aux_logits=False, - pretrained=False) - LOGGER.info('models.inception_v3 done') - inception.Mixed_5b = FIDInceptionA(192, pool_features=32) - inception.Mixed_5c = FIDInceptionA(256, pool_features=64) - inception.Mixed_5d = FIDInceptionA(288, pool_features=64) - inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128) - inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160) - inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160) - inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192) - inception.Mixed_7b = FIDInceptionE_1(1280) - inception.Mixed_7c = FIDInceptionE_2(2048) - - LOGGER.info('fid_inception_v3 patching done') - - state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True) - LOGGER.info('fid_inception_v3 weights downloaded') - - inception.load_state_dict(state_dict) - LOGGER.info('fid_inception_v3 weights loaded into model') - - return inception - - -class FIDInceptionA(models.inception.InceptionA): - """InceptionA block patched for FID computation""" - def __init__(self, in_channels, pool_features): - super(FIDInceptionA, self).__init__(in_channels, pool_features) - - def forward(self, x): - branch1x1 = self.branch1x1(x) - - branch5x5 = self.branch5x5_1(x) - branch5x5 = self.branch5x5_2(branch5x5) - - branch3x3dbl = self.branch3x3dbl_1(x) - branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) - branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) - - # Patch: Tensorflow's average pool does not use the padded zero's in - # its average calculation - branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, - count_include_pad=False) - branch_pool = self.branch_pool(branch_pool) - - outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] - return torch.cat(outputs, 1) - - -class FIDInceptionC(models.inception.InceptionC): - """InceptionC block patched for FID computation""" - def __init__(self, in_channels, channels_7x7): - super(FIDInceptionC, self).__init__(in_channels, channels_7x7) - - def forward(self, x): - branch1x1 = self.branch1x1(x) - - branch7x7 = self.branch7x7_1(x) - branch7x7 = self.branch7x7_2(branch7x7) - branch7x7 = self.branch7x7_3(branch7x7) - - branch7x7dbl = self.branch7x7dbl_1(x) - branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) - branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) - branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) - branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) - - # Patch: Tensorflow's average pool does not use the padded zero's in - # its average calculation - branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, - count_include_pad=False) - branch_pool = self.branch_pool(branch_pool) - - outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] - return torch.cat(outputs, 1) - - -class FIDInceptionE_1(models.inception.InceptionE): - """First InceptionE block patched for FID computation""" - def __init__(self, in_channels): - super(FIDInceptionE_1, self).__init__(in_channels) - - def forward(self, x): - branch1x1 = self.branch1x1(x) - - branch3x3 = self.branch3x3_1(x) - branch3x3 = [ - self.branch3x3_2a(branch3x3), - self.branch3x3_2b(branch3x3), - ] - branch3x3 = torch.cat(branch3x3, 1) - - branch3x3dbl = self.branch3x3dbl_1(x) - branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) - branch3x3dbl = [ - self.branch3x3dbl_3a(branch3x3dbl), - self.branch3x3dbl_3b(branch3x3dbl), - ] - branch3x3dbl = torch.cat(branch3x3dbl, 1) - - # Patch: Tensorflow's average pool does not use the padded zero's in - # its average calculation - branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, - count_include_pad=False) - branch_pool = self.branch_pool(branch_pool) - - outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] - return torch.cat(outputs, 1) - - -class FIDInceptionE_2(models.inception.InceptionE): - """Second InceptionE block patched for FID computation""" - def __init__(self, in_channels): - super(FIDInceptionE_2, self).__init__(in_channels) - - def forward(self, x): - branch1x1 = self.branch1x1(x) - - branch3x3 = self.branch3x3_1(x) - branch3x3 = [ - self.branch3x3_2a(branch3x3), - self.branch3x3_2b(branch3x3), - ] - branch3x3 = torch.cat(branch3x3, 1) - - branch3x3dbl = self.branch3x3dbl_1(x) - branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) - branch3x3dbl = [ - self.branch3x3dbl_3a(branch3x3dbl), - self.branch3x3dbl_3b(branch3x3dbl), - ] - branch3x3dbl = torch.cat(branch3x3dbl, 1) - - # Patch: The FID Inception model uses max pooling instead of average - # pooling. This is likely an error in this specific Inception - # implementation, as other Inception models use average pooling here - # (which matches the description in the paper). - branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1) - branch_pool = self.branch_pool(branch_pool) - - outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] - return torch.cat(outputs, 1) diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/config.py b/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/config.py deleted file mode 100644 index 77f8bc62737cb08ae38a9345ae8dc420fb643f25..0000000000000000000000000000000000000000 --- a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/config.py +++ /dev/null @@ -1,280 +0,0 @@ -from collections import defaultdict -from contextlib import contextmanager -import os -import logging -import sys -import commentjson as json - -from . import shared -from . import presets - - -__all__ = [ - "my_api_key", - "sensitive_id", - "authflag", - "auth_list", - "dockerflag", - "retrieve_proxy", - "advance_docs", - "update_doc_config", - "usage_limit", - "multi_api_key", - "server_name", - "server_port", - "share", - "check_update", - "latex_delimiters_set", - "hide_history_when_not_logged_in", - "default_chuanhu_assistant_model", - "show_api_billing" -] - -# 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低) -# 同时,也可以为后续支持自定义功能提供config的帮助 -if os.path.exists("config.json"): - with open("config.json", "r", encoding='utf-8') as f: - config = json.load(f) -else: - config = {} - - -def load_config_to_environ(key_list): - global config - for key in key_list: - if key in config: - os.environ[key.upper()] = os.environ.get(key.upper(), config[key]) - - -lang_config = config.get("language", "auto") -language = os.environ.get("LANGUAGE", lang_config) - -hide_history_when_not_logged_in = config.get( - "hide_history_when_not_logged_in", False) -check_update = config.get("check_update", True) -show_api_billing = config.get("show_api_billing", False) -show_api_billing = bool(os.environ.get("SHOW_API_BILLING", show_api_billing)) - -if os.path.exists("api_key.txt"): - logging.info("检测到api_key.txt文件,正在进行迁移...") - with open("api_key.txt", "r", encoding="utf-8") as f: - config["openai_api_key"] = f.read().strip() - os.rename("api_key.txt", "api_key(deprecated).txt") - with open("config.json", "w", encoding='utf-8') as f: - json.dump(config, f, indent=4, ensure_ascii=False) - -if os.path.exists("auth.json"): - logging.info("检测到auth.json文件,正在进行迁移...") - auth_list = [] - with open("auth.json", "r", encoding='utf-8') as f: - auth = json.load(f) - for _ in auth: - if auth[_]["username"] and auth[_]["password"]: - auth_list.append((auth[_]["username"], auth[_]["password"])) - else: - logging.error("请检查auth.json文件中的用户名和密码!") - sys.exit(1) - config["users"] = auth_list - os.rename("auth.json", "auth(deprecated).json") - with open("config.json", "w", encoding='utf-8') as f: - json.dump(config, f, indent=4, ensure_ascii=False) - -# 处理docker if we are running in Docker -dockerflag = config.get("dockerflag", False) -if os.environ.get("dockerrun") == "yes": - dockerflag = True - -# 处理 api-key 以及 允许的用户列表 -my_api_key = config.get("openai_api_key", "") -my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key) -os.environ["OPENAI_API_KEY"] = my_api_key -os.environ["OPENAI_EMBEDDING_API_KEY"] = my_api_key - -if config.get("legacy_api_usage", False): - sensitive_id = my_api_key -else: - sensitive_id = config.get("sensitive_id", "") - sensitive_id = os.environ.get("SENSITIVE_ID", sensitive_id) - -# 模型配置 -if "extra_models" in config: - presets.MODELS.extend(config["extra_models"]) - logging.info(f"已添加额外的模型:{config['extra_models']}") - -google_palm_api_key = config.get("google_palm_api_key", "") -google_palm_api_key = os.environ.get( - "GOOGLE_PALM_API_KEY", google_palm_api_key) -os.environ["GOOGLE_PALM_API_KEY"] = google_palm_api_key - -xmchat_api_key = config.get("xmchat_api_key", "") -os.environ["XMCHAT_API_KEY"] = xmchat_api_key - -minimax_api_key = config.get("minimax_api_key", "") -os.environ["MINIMAX_API_KEY"] = minimax_api_key -minimax_group_id = config.get("minimax_group_id", "") -os.environ["MINIMAX_GROUP_ID"] = minimax_group_id - -midjourney_proxy_api_base = config.get("midjourney_proxy_api_base", "") -os.environ["MIDJOURNEY_PROXY_API_BASE"] = midjourney_proxy_api_base -midjourney_proxy_api_secret = config.get("midjourney_proxy_api_secret", "") -os.environ["MIDJOURNEY_PROXY_API_SECRET"] = midjourney_proxy_api_secret -midjourney_discord_proxy_url = config.get("midjourney_discord_proxy_url", "") -os.environ["MIDJOURNEY_DISCORD_PROXY_URL"] = midjourney_discord_proxy_url -midjourney_temp_folder = config.get("midjourney_temp_folder", "") -os.environ["MIDJOURNEY_TEMP_FOLDER"] = midjourney_temp_folder - -load_config_to_environ(["openai_api_type", "azure_openai_api_key", "azure_openai_api_base_url", - "azure_openai_api_version", "azure_deployment_name", "azure_embedding_deployment_name", "azure_embedding_model_name"]) - - -usage_limit = os.environ.get("USAGE_LIMIT", config.get("usage_limit", 120)) - -# 多账户机制 -multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制 -if multi_api_key: - api_key_list = config.get("api_key_list", []) - if len(api_key_list) == 0: - logging.error("多账号模式已开启,但api_key_list为空,请检查config.json") - sys.exit(1) - shared.state.set_api_key_queue(api_key_list) - -auth_list = config.get("users", []) # 实际上是使用者的列表 -authflag = len(auth_list) > 0 # 是否开启认证的状态值,改为判断auth_list长度 - -# 处理自定义的api_host,优先读环境变量的配置,如果存在则自动装配 -api_host = os.environ.get( - "OPENAI_API_BASE", config.get("openai_api_base", None)) -if api_host is not None: - shared.state.set_api_host(api_host) - os.environ["OPENAI_API_BASE"] = f"{api_host}/v1" - logging.info(f"OpenAI API Base set to: {os.environ['OPENAI_API_BASE']}") - -default_chuanhu_assistant_model = config.get( - "default_chuanhu_assistant_model", "gpt-3.5-turbo") -for x in ["GOOGLE_CSE_ID", "GOOGLE_API_KEY", "WOLFRAM_ALPHA_APPID", "SERPAPI_API_KEY"]: - if config.get(x, None) is not None: - os.environ[x] = config[x] - - -@contextmanager -def retrieve_openai_api(api_key=None): - old_api_key = os.environ.get("OPENAI_API_KEY", "") - if api_key is None: - os.environ["OPENAI_API_KEY"] = my_api_key - yield my_api_key - else: - os.environ["OPENAI_API_KEY"] = api_key - yield api_key - os.environ["OPENAI_API_KEY"] = old_api_key - - - -# 处理代理: -http_proxy = os.environ.get("HTTP_PROXY", "") -https_proxy = os.environ.get("HTTPS_PROXY", "") -http_proxy = config.get("http_proxy", http_proxy) -https_proxy = config.get("https_proxy", https_proxy) - -# 重置系统变量,在不需要设置的时候不设置环境变量,以免引起全局代理报错 -os.environ["HTTP_PROXY"] = "" -os.environ["HTTPS_PROXY"] = "" - -local_embedding = config.get("local_embedding", False) # 是否使用本地embedding - - -@contextmanager -def retrieve_proxy(proxy=None): - """ - 1, 如果proxy = NONE,设置环境变量,并返回最新设置的代理 - 2,如果proxy != NONE,更新当前的代理配置,但是不更新环境变量 - """ - global http_proxy, https_proxy - if proxy is not None: - http_proxy = proxy - https_proxy = proxy - yield http_proxy, https_proxy - else: - old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] - os.environ["HTTP_PROXY"] = http_proxy - os.environ["HTTPS_PROXY"] = https_proxy - yield http_proxy, https_proxy # return new proxy - - # return old proxy - os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var - - -# 处理latex options -user_latex_option = config.get("latex_option", "default") -if user_latex_option == "default": - latex_delimiters_set = [ - {"left": "$$", "right": "$$", "display": True}, - {"left": "$", "right": "$", "display": False}, - {"left": "\\(", "right": "\\)", "display": False}, - {"left": "\\[", "right": "\\]", "display": True}, - ] -elif user_latex_option == "strict": - latex_delimiters_set = [ - {"left": "$$", "right": "$$", "display": True}, - {"left": "\\(", "right": "\\)", "display": False}, - {"left": "\\[", "right": "\\]", "display": True}, - ] -elif user_latex_option == "all": - latex_delimiters_set = [ - {"left": "$$", "right": "$$", "display": True}, - {"left": "$", "right": "$", "display": False}, - {"left": "\\(", "right": "\\)", "display": False}, - {"left": "\\[", "right": "\\]", "display": True}, - {"left": "\\begin{equation}", "right": "\\end{equation}", "display": True}, - {"left": "\\begin{align}", "right": "\\end{align}", "display": True}, - {"left": "\\begin{alignat}", "right": "\\end{alignat}", "display": True}, - {"left": "\\begin{gather}", "right": "\\end{gather}", "display": True}, - {"left": "\\begin{CD}", "right": "\\end{CD}", "display": True}, - ] -elif user_latex_option == "disabled": - latex_delimiters_set = [] -else: - latex_delimiters_set = [ - {"left": "$$", "right": "$$", "display": True}, - {"left": "$", "right": "$", "display": False}, - {"left": "\\(", "right": "\\)", "display": False}, - {"left": "\\[", "right": "\\]", "display": True}, - ] - -# 处理advance docs -advance_docs = defaultdict(lambda: defaultdict(dict)) -advance_docs.update(config.get("advance_docs", {})) - - -def update_doc_config(two_column_pdf): - global advance_docs - advance_docs["pdf"]["two_column"] = two_column_pdf - - logging.info(f"更新后的文件参数为:{advance_docs}") - - -# 处理gradio.launch参数 -server_name = config.get("server_name", None) -server_port = config.get("server_port", None) -if server_name is None: - if dockerflag: - server_name = "0.0.0.0" - else: - server_name = "127.0.0.1" -if server_port is None: - if dockerflag: - server_port = 7860 - -assert server_port is None or type(server_port) == int, "要求port设置为int类型" - -# 设置默认model -default_model = config.get("default_model", "") -try: - presets.DEFAULT_MODEL = presets.MODELS.index(default_model) -except ValueError: - pass - -share = config.get("share", False) - -# avatar -bot_avatar = config.get("bot_avatar", "default") -user_avatar = config.get("user_avatar", "default") \ No newline at end of file diff --git a/spaces/Jorgerv97/Herramienta_interactiva_ensenyanza_tecnicas_aprendizaje_supervisado_salud/UIMessages/warningsGeneral.py b/spaces/Jorgerv97/Herramienta_interactiva_ensenyanza_tecnicas_aprendizaje_supervisado_salud/UIMessages/warningsGeneral.py deleted file mode 100644 index a979a620698363b206a0fb9316dc62c86dae8b8b..0000000000000000000000000000000000000000 --- a/spaces/Jorgerv97/Herramienta_interactiva_ensenyanza_tecnicas_aprendizaje_supervisado_salud/UIMessages/warningsGeneral.py +++ /dev/null @@ -1,24 +0,0 @@ -from shiny import module, ui, render - -#################################### CORRELACIÓN DE DATOS #################################### -@module.ui -def correlation_warning_ui(): - return ui.div( - ui.output_text("correlation_warning_txt"), - style="color:red; font-style:italic; margin-top:20px; padding: 10px; background: #f7f7f7; border-radius: 10px;" - ) - - - - -@module.server -def warnings_general_server(input, output, session): - -#################################### CORRELACIÓN DE DATOS #################################### - @output - @render.text - def correlation_warning_txt(): - return "Los datos de diagnosis han tenido que ser convertidos previamente para continuar." - - - diff --git a/spaces/KdaiP/yolov8-deepsort-tracking/deep_sort/utils/parser.py b/spaces/KdaiP/yolov8-deepsort-tracking/deep_sort/utils/parser.py deleted file mode 100644 index 0a611bd0f182d16fecb4ab987d07896e4d1c7a48..0000000000000000000000000000000000000000 --- a/spaces/KdaiP/yolov8-deepsort-tracking/deep_sort/utils/parser.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -import yaml -from easydict import EasyDict as edict - -class YamlParser(edict): - """ - This is yaml parser based on EasyDict. - """ - def __init__(self, cfg_dict=None, config_file=None): - if cfg_dict is None: - cfg_dict = {} - - if config_file is not None: - assert(os.path.isfile(config_file)) - with open(config_file, 'r') as fo: - cfg_dict.update(yaml.load(fo.read())) - - super(YamlParser, self).__init__(cfg_dict) - - - def merge_from_file(self, config_file): - with open(config_file, 'r') as fo: - #self.update(yaml.load(fo.read())) - self.update(yaml.load(fo.read(),Loader=yaml.FullLoader)) - - def merge_from_dict(self, config_dict): - self.update(config_dict) - - -def get_config(config_file=None): - return YamlParser(config_file=config_file) - - -if __name__ == "__main__": - cfg = YamlParser(config_file="../configs/yolov3.yaml") - cfg.merge_from_file("../configs/deep_sort.yaml") - - import ipdb; ipdb.set_trace() \ No newline at end of file diff --git a/spaces/Laihiujin/OneFormer/demo/colormap.py b/spaces/Laihiujin/OneFormer/demo/colormap.py deleted file mode 100644 index 3eff9a46d37a1926c48ef0ad6e3308128438140f..0000000000000000000000000000000000000000 --- a/spaces/Laihiujin/OneFormer/demo/colormap.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -""" -An awesome colormap for really neat visualizations. -Copied from Detectron, and removed gray colors. -""" - -import numpy as np -import random -random.seed(0) - -__all__ = ["colormap", "random_color", "random_colors"] - -# fmt: off -# RGB: -# _COLORS = np.array( -# [ -# 0.000, 0.447, 0.741, -# 0.850, 0.325, 0.098, -# 0.929, 0.694, 0.125, -# 0.494, 0.184, 0.556, -# 0.466, 0.674, 0.188, -# 0.301, 0.745, 0.933, -# 0.635, 0.078, 0.184, -# 0.300, 0.300, 0.300, -# 0.600, 0.600, 0.600, -# 1.000, 0.000, 0.000, -# 1.000, 0.500, 0.000, -# 0.749, 0.749, 0.000, -# 0.000, 1.000, 0.000, -# 0.000, 0.000, 1.000, -# 0.667, 0.000, 1.000, -# 0.333, 0.333, 0.000, -# 0.333, 0.667, 0.000, -# 0.333, 1.000, 0.000, -# 0.667, 0.333, 0.000, -# 0.667, 0.667, 0.000, -# 0.667, 1.000, 0.000, -# 1.000, 0.333, 0.000, -# 1.000, 0.667, 0.000, -# 1.000, 1.000, 0.000, -# 0.000, 0.333, 0.500, -# 0.000, 0.667, 0.500, -# 0.000, 1.000, 0.500, -# 0.333, 0.000, 0.500, -# 0.333, 0.333, 0.500, -# 0.333, 0.667, 0.500, -# 0.333, 1.000, 0.500, -# 0.667, 0.000, 0.500, -# 0.667, 0.333, 0.500, -# 0.667, 0.667, 0.500, -# 0.667, 1.000, 0.500, -# 1.000, 0.000, 0.500, -# 1.000, 0.333, 0.500, -# 1.000, 0.667, 0.500, -# 1.000, 1.000, 0.500, -# 0.000, 0.333, 1.000, -# 0.000, 0.667, 1.000, -# 0.000, 1.000, 1.000, -# 0.333, 0.000, 1.000, -# 0.333, 0.333, 1.000, -# 0.333, 0.667, 1.000, -# 0.333, 1.000, 1.000, -# 0.667, 0.000, 1.000, -# 0.667, 0.333, 1.000, -# 0.667, 0.667, 1.000, -# 0.667, 1.000, 1.000, -# 1.000, 0.000, 1.000, -# 1.000, 0.333, 1.000, -# 1.000, 0.667, 1.000, -# 0.333, 0.000, 0.000, -# 0.500, 0.000, 0.000, -# 0.667, 0.000, 0.000, -# 0.833, 0.000, 0.000, -# 1.000, 0.000, 0.000, -# 0.000, 0.167, 0.000, -# 0.000, 0.333, 0.000, -# 0.000, 0.500, 0.000, -# 0.000, 0.667, 0.000, -# 0.000, 0.833, 0.000, -# 0.000, 1.000, 0.000, -# 0.000, 0.000, 0.167, -# 0.000, 0.000, 0.333, -# 0.000, 0.000, 0.500, -# 0.000, 0.000, 0.667, -# 0.000, 0.000, 0.833, -# 0.000, 0.000, 1.000, -# 0.000, 0.000, 0.000, -# 0.143, 0.143, 0.143, -# 0.857, 0.857, 0.857, -# 1.000, 1.000, 1.000 -# ] -# ).astype(np.float32).reshape(-1, 3) -# fmt: on - -_COLORS = [] - - -def gen_color(): - color = tuple(np.round(np.random.choice(range(256), size=3)/255, 3)) - if color not in _COLORS and np.mean(color) != 0.0: - _COLORS.append(color) - else: - gen_color() - - -for _ in range(300): - gen_color() - - -def colormap(rgb=False, maximum=255): - """ - Args: - rgb (bool): whether to return RGB colors or BGR colors. - maximum (int): either 255 or 1 - Returns: - ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1] - """ - assert maximum in [255, 1], maximum - c = _COLORS * maximum - if not rgb: - c = c[:, ::-1] - return c - - -def random_color(rgb=False, maximum=255): - """ - Args: - rgb (bool): whether to return RGB colors or BGR colors. - maximum (int): either 255 or 1 - Returns: - ndarray: a vector of 3 numbers - """ - idx = np.random.randint(0, len(_COLORS)) - ret = _COLORS[idx] * maximum - if not rgb: - ret = ret[::-1] - return ret - - -def random_colors(N, rgb=False, maximum=255): - """ - Args: - N (int): number of unique colors needed - rgb (bool): whether to return RGB colors or BGR colors. - maximum (int): either 255 or 1 - Returns: - ndarray: a list of random_color - """ - indices = random.sample(range(len(_COLORS)), N) - ret = [_COLORS[i] * maximum for i in indices] - if not rgb: - ret = [x[::-1] for x in ret] - return ret - - -if __name__ == "__main__": - import cv2 - - size = 100 - H, W = 10, 10 - canvas = np.random.rand(H * size, W * size, 3).astype("float32") - for h in range(H): - for w in range(W): - idx = h * W + w - if idx >= len(_COLORS): - break - canvas[h * size : (h + 1) * size, w * size : (w + 1) * size] = _COLORS[idx] - cv2.imshow("a", canvas) - cv2.waitKey(0) \ No newline at end of file diff --git a/spaces/Lamai/LAMAIGPT/autogpt/commands/__init__.py b/spaces/Lamai/LAMAIGPT/autogpt/commands/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Lamai/LAMAIGPT/run_continuous.bat b/spaces/Lamai/LAMAIGPT/run_continuous.bat deleted file mode 100644 index 812aa01c1c5506c452665610c0e9e83a17c426f2..0000000000000000000000000000000000000000 --- a/spaces/Lamai/LAMAIGPT/run_continuous.bat +++ /dev/null @@ -1,3 +0,0 @@ -@echo off -set argument=--continuous -call run.bat %argument% diff --git a/spaces/Layer6/TR0N/app.py b/spaces/Layer6/TR0N/app.py deleted file mode 100644 index e03523710bf942d65994f3df74d2585d678bff5c..0000000000000000000000000000000000000000 --- a/spaces/Layer6/TR0N/app.py +++ /dev/null @@ -1,235 +0,0 @@ -import os -import re -import gradio as gr -import torch -import torch.nn.functional as F -from torch.optim import Adam -from torchvision.transforms import transforms as T -import clip -from tr0n.config import parse_args -from tr0n.modules.models.model_stylegan import Model -from tr0n.modules.models.loss import AugCosineSimLatent -from tr0n.modules.optimizers.sgld import SGLD -from bad_words import bad_words - -device = "cuda" if torch.cuda.is_available() else "cpu" -model_modes = { - "text": { - "checkpoint": "https://huggingface.co/Layer6/tr0n-stylegan2-clip/resolve/main/tr0n-stylegan2-clip-text.pth", - }, - "image": { - "checkpoint": "https://huggingface.co/Layer6/tr0n-stylegan2-clip/resolve/main/tr0n-stylegan2-clip-image.pth", - } -} - -os.environ['TOKENIZERS_PARALLELISM'] = "false" - - -# set config params -config = parse_args(is_demo=True) -config_vars = vars(config) -config_vars["stylegan_gen"] = "sg2-ffhq-1024" -config_vars["with_gmm"] = True -config_vars["num_mixtures"] = 10 - - -model = Model(config, device, None) -model.to(device) -model.eval() -for p in model.translator.parameters(): - p.requires_grad = False -loss = AugCosineSimLatent() - - -transforms_image = T.Compose([ - T.Resize(224, interpolation=T.InterpolationMode.BICUBIC), - T.CenterCrop(224), - T.ToTensor(), - T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), -]) - - -checkpoint_text = torch.hub.load_state_dict_from_url(model_modes["text"]["checkpoint"], map_location="cpu") -translator_state_dict_text = checkpoint_text['translator_state_dict'] -checkpoint_image = torch.hub.load_state_dict_from_url(model_modes["image"]["checkpoint"], map_location="cpu") -translator_state_dict_image = checkpoint_image['translator_state_dict'] - -# default -model.translator.load_state_dict(translator_state_dict_text) - - -css = """ - a { - display: inline-block; - color: black !important; - text-decoration: none !important; - } - #image-gen { - height: 256px; - width: 256px; - margin-left: auto; - margin-right: auto; - } -""" - - -def _slerp(val, low, high): - low_norm = low / torch.norm(low, dim=1, keepdim=True) - high_norm = high / torch.norm(high, dim=1, keepdim=True) - omega = torch.acos((low_norm*high_norm).sum(1)) - so = torch.sin(omega) - res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high - return res - - -def model_mode_text_select(): - model.translator.load_state_dict(translator_state_dict_text) - - -def model_mode_image_select(): - model.translator.load_state_dict(translator_state_dict_image) - - -def text_to_face_generate(text): - if text == "": - raise gr.Error("You need to provide to provide a prompt.") - - for word in bad_words: - if re.search(rf"\b{word}\b", text): - raise gr.Error("Unsafe content found. Please try again with a different prompt.") - - text_tok = clip.tokenize([text], truncate=True).to(device) - - # initialize optimization from the translator's output - with torch.no_grad(): - target_clip_latent, w_mixture_logits, w_means = model(x=text_tok, x_type='text', return_after_translator=True, no_sample=True) - pi = w_mixture_logits.unsqueeze(-1).repeat(1, 1, w_means.shape[-1]) # 1 x num_mixtures x w_dim - w = w_means # 1 x num_mixtures x w_dim - - w.requires_grad = True - pi.requires_grad = True - - optimizer_w = SGLD((w,), lr=1e-1, momentum=0.99, noise_std=0.01, device=device) - optimizer_pi = Adam((pi,), lr=5e-3) - - # optimization - for _ in range(100): - soft_pi = F.softmax(pi, dim=1) - w_prime = soft_pi * w - w_prime = w_prime.sum(dim=1) - - _, _, pred_clip_latent, _, _ = model(x=w_prime, x_type='gan_latent', times_augment_pred_image=50) - - l = loss(target_clip_latent, pred_clip_latent) - l.backward() - torch.nn.utils.clip_grad_norm_((w,), 1.) - torch.nn.utils.clip_grad_norm_((pi,), 1.) - optimizer_w.step() - optimizer_pi.step() - optimizer_w.zero_grad() - optimizer_pi.zero_grad() - - # generate final image - with torch.no_grad(): - soft_pi = F.softmax(pi, dim=1) - w_prime = soft_pi * w - w_prime = w_prime.sum(dim=1) - - _, _, _, _, pred_image_raw = model(x=w_prime, x_type='gan_latent') - - pred_image = ((pred_image_raw[0]+1.)/2.).cpu() - return T.ToPILImage()(pred_image) - - -def face_to_face_interpolate(image1, image2, interp_lambda=0.5): - if image1 is None or image2 is None: - raise gr.Error("You need to provide two images as input.") - - image1_pt = transforms_image(image1).to(device) - image2_pt = transforms_image(image2).to(device) - - # initialize optimization from the translator's output - with torch.no_grad(): - images_pt = torch.stack([image1_pt, image2_pt]) - target_clip_latents = model.clip.encode_image(images_pt).detach().float() - target_clip_latent = _slerp(interp_lambda, target_clip_latents[0].unsqueeze(0), target_clip_latents[1].unsqueeze(0)) - _, _, w = model(x=target_clip_latent, x_type='clip_latent', return_after_translator=True) - - w.requires_grad = True - - optimizer_w = SGLD((w,), lr=1e-1, momentum=0.99, noise_std=0.01, device=device) - - # optimization - for _ in range(100): - _, _, pred_clip_latent, _, _ = model(x=w, x_type='gan_latent', times_augment_pred_image=50) - - l = loss(target_clip_latent, pred_clip_latent) - l.backward() - torch.nn.utils.clip_grad_norm_((w,), 1.) - optimizer_w.step() - optimizer_w.zero_grad() - - # generate final image - with torch.no_grad(): - _, _, _, _, pred_image_raw = model(x=w, x_type='gan_latent') - - pred_image = ((pred_image_raw[0]+1.)/2.).cpu() - return T.ToPILImage()(pred_image) - - -examples_text = [ - "Muhammad Ali", - "Tinker Bell", - "A man with glasses, long black hair with sideburns and a goatee", - "A child with blue eyes and straight brown hair in the sunshine", - "A hairdresser", - "A young boy with glasses and an angry face", - "Denzel Washington", - "A portrait of Angela Merkel", - "President Emmanuel Macron", - "President Xi Jinping" -] - -examples_image = [ - ["./examples/example_1_1.jpg", "./examples/example_1_2.jpg"], - ["./examples/example_2_1.jpg", "./examples/example_2_2.jpg"], - ["./examples/example_3_1.jpg", "./examples/example_3_2.jpg"], - ["./examples/example_4_1.jpg", "./examples/example_4_2.jpg"], -] - - -with gr.Blocks(css=css) as demo: - gr.Markdown("

      TR0N Face Generation Demo

      ") - gr.Markdown("

      by Layer 6 AI

      ") - gr.Markdown("""

      - - -

      """) - gr.Markdown("We introduce TR0N, a simple and efficient method to add any type of conditioning to pre-trained generative models. For this demo, we add two types of conditioning to a StyleGAN2 model pre-trained on images of human faces. First, we add text-conditioning to turn StyleGAN2 into a text-to-face model. Second, we add image semantic conditioning to StyleGAN2 to enable face-to-face interpolation. For more details and results on many other generative models, please refer to our paper linked above.") - - with gr.Tab("Text-to-face generation") as text_to_face_generation_demo: - text_to_face_generation_input = gr.Textbox(label="Enter your prompt", placeholder="e.g. A man with a beard and glasses", max_lines=1) - text_to_face_generation_button = gr.Button("Generate") - text_to_face_generation_output = gr.Image(label="Generated image", elem_id="image-gen") - text_to_face_generation_examples = gr.Examples(examples=examples_text, fn=text_to_face_generate, inputs=text_to_face_generation_input, outputs=text_to_face_generation_output) - - with gr.Tab("Face-to-face interpolation") as face_to_face_interpolation_demo: - gr.Markdown("We note that interpolations are not expected to recover the given images, even when the coefficient is 0 or 1.") - with gr.Row(): - face_to_face_interpolation_input1 = gr.Image(label="Image 1", type="pil") - face_to_face_interpolation_input2 = gr.Image(label="Image 2", type="pil") - face_to_face_interpolation_lambda = gr.Slider(label="Interpolation coefficient", minimum=0, maximum=1, value=0.5, step=0.01) - face_to_face_interpolation_button = gr.Button("Interpolate") - face_to_face_interpolation_output = gr.Image(label="Interpolated image", elem_id="image-gen") - face_to_face_interpolation_examples = gr.Examples(examples=examples_image, fn=face_to_face_interpolate, inputs=[face_to_face_interpolation_input1, face_to_face_interpolation_input2, face_to_face_interpolation_lambda], outputs=face_to_face_interpolation_output) - - text_to_face_generation_demo.select(fn=model_mode_text_select) - text_to_face_generation_input.submit(fn=text_to_face_generate, inputs=text_to_face_generation_input, outputs=text_to_face_generation_output) - text_to_face_generation_button.click(fn=text_to_face_generate, inputs=text_to_face_generation_input, outputs=text_to_face_generation_output) - - face_to_face_interpolation_demo.select(fn=model_mode_image_select) - face_to_face_interpolation_button.click(fn=face_to_face_interpolate, inputs=[face_to_face_interpolation_input1, face_to_face_interpolation_input2, face_to_face_interpolation_lambda], outputs=face_to_face_interpolation_output) - - -demo.queue() -demo.launch() diff --git a/spaces/Lbin123/Lbingo/src/components/external-link.tsx b/spaces/Lbin123/Lbingo/src/components/external-link.tsx deleted file mode 100644 index 011265f364d5a64a770f4c7e9c65c5ade21d623a..0000000000000000000000000000000000000000 --- a/spaces/Lbin123/Lbingo/src/components/external-link.tsx +++ /dev/null @@ -1,30 +0,0 @@ -export function ExternalLink({ - href, - children -}: { - href: string - children: React.ReactNode -}) { - return ( - - {children} - - - ) -} diff --git a/spaces/Lianjd/stock_dashboard/backtrader/analyzers/logreturnsrolling.py b/spaces/Lianjd/stock_dashboard/backtrader/analyzers/logreturnsrolling.py deleted file mode 100644 index 7de7a1424125dd6592fe7cd28f99e5699ccb86bf..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/analyzers/logreturnsrolling.py +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -import collections -import math - -import backtrader as bt - - -__all__ = ['LogReturnsRolling'] - - -class LogReturnsRolling(bt.TimeFrameAnalyzerBase): - '''This analyzer calculates rolling returns for a given timeframe and - compression - - Params: - - - ``timeframe`` (default: ``None``) - If ``None`` the ``timeframe`` of the 1st data in the system will be - used - - Pass ``TimeFrame.NoTimeFrame`` to consider the entire dataset with no - time constraints - - - ``compression`` (default: ``None``) - - Only used for sub-day timeframes to for example work on an hourly - timeframe by specifying "TimeFrame.Minutes" and 60 as compression - - If ``None`` then the compression of the 1st data of the system will be - used - - - ``data`` (default: ``None``) - - Reference asset to track instead of the portfolio value. - - .. note:: this data must have been added to a ``cerebro`` instance with - ``addata``, ``resampledata`` or ``replaydata`` - - - ``firstopen`` (default: ``True``) - - When tracking the returns of a ``data`` the following is done when - crossing a timeframe boundary, for example ``Years``: - - - Last ``close`` of previous year is used as the reference price to - see the return in the current year - - The problem is the 1st calculation, because the data has** no - previous** closing price. As such and when this parameter is ``True`` - the *opening* price will be used for the 1st calculation. - - This requires the data feed to have an ``open`` price (for ``close`` - the standard [0] notation will be used without reference to a field - price) - - Else the initial close will be used. - - - ``fund`` (default: ``None``) - - If ``None`` the actual mode of the broker (fundmode - True/False) will - be autodetected to decide if the returns are based on the total net - asset value or on the fund value. See ``set_fundmode`` in the broker - documentation - - Set it to ``True`` or ``False`` for a specific behavior - - Methods: - - - get_analysis - - Returns a dictionary with returns as values and the datetime points for - each return as keys - ''' - - params = ( - ('data', None), - ('firstopen', True), - ('fund', None), - ) - - def start(self): - super(LogReturnsRolling, self).start() - if self.p.fund is None: - self._fundmode = self.strategy.broker.fundmode - else: - self._fundmode = self.p.fund - - self._values = collections.deque([float('Nan')] * self.compression, - maxlen=self.compression) - - if self.p.data is None: - # keep the initial portfolio value if not tracing a data - if not self._fundmode: - self._lastvalue = self.strategy.broker.getvalue() - else: - self._lastvalue = self.strategy.broker.fundvalue - - def notify_fund(self, cash, value, fundvalue, shares): - if not self._fundmode: - self._value = value if self.p.data is None else self.p.data[0] - else: - self._value = fundvalue if self.p.data is None else self.p.data[0] - - def _on_dt_over(self): - # next is called in a new timeframe period - if self.p.data is None or len(self.p.data) > 1: - # Not tracking a data feed or data feed has data already - vst = self._lastvalue # update value_start to last - else: - # The 1st tick has no previous reference, use the opening price - vst = self.p.data.open[0] if self.p.firstopen else self.p.data[0] - - self._values.append(vst) # push values backwards (and out) - - def next(self): - # Calculate the return - super(LogReturnsRolling, self).next() - self.rets[self.dtkey] = math.log(self._value / self._values[0]) - self._lastvalue = self._value # keep last value diff --git a/spaces/LucasCodeBreak/MusicGen/audiocraft/quantization/base.py b/spaces/LucasCodeBreak/MusicGen/audiocraft/quantization/base.py deleted file mode 100644 index 1b16c130d266fbd021d3fc29bb9f98c33dd3c588..0000000000000000000000000000000000000000 --- a/spaces/LucasCodeBreak/MusicGen/audiocraft/quantization/base.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Base class for all quantizers. -""" - -from dataclasses import dataclass, field -import typing as tp - -import torch -from torch import nn - - -@dataclass -class QuantizedResult: - x: torch.Tensor - codes: torch.Tensor - bandwidth: torch.Tensor # bandwidth in kb/s used, per batch item. - penalty: tp.Optional[torch.Tensor] = None - metrics: dict = field(default_factory=dict) - - -class BaseQuantizer(nn.Module): - """Base class for quantizers. - """ - - def forward(self, x: torch.Tensor, frame_rate: int) -> QuantizedResult: - """ - Given input tensor x, returns first the quantized (or approximately quantized) - representation along with quantized codes, bandwidth, and any penalty term for the loss. - Finally, this returns a dict of metrics to update logging etc. - Frame rate must be passed so that the bandwidth is properly computed. - """ - raise NotImplementedError() - - def encode(self, x: torch.Tensor) -> torch.Tensor: - """Encode a given input tensor with the specified sample rate at the given bandwidth. - """ - raise NotImplementedError() - - def decode(self, codes: torch.Tensor) -> torch.Tensor: - """Decode the given codes to the quantized representation. - """ - raise NotImplementedError() - - @property - def total_codebooks(self): - """Total number of codebooks. - """ - raise NotImplementedError() - - @property - def num_codebooks(self): - """Number of active codebooks. - """ - raise NotImplementedError() - - def set_num_codebooks(self, n: int): - """Set the number of active codebooks. - """ - raise NotImplementedError() - - -class DummyQuantizer(BaseQuantizer): - """Fake quantizer that actually does not perform any quantization. - """ - def __init__(self): - super().__init__() - - def forward(self, x: torch.Tensor, frame_rate: int): - q = x.unsqueeze(1) - return QuantizedResult(x, q, torch.tensor(q.numel() * 32 * frame_rate / 1000 / len(x)).to(x)) - - def encode(self, x: torch.Tensor) -> torch.Tensor: - """Encode a given input tensor with the specified sample rate at the given bandwidth. - In the case of the DummyQuantizer, the codes are actually identical - to the input and resulting quantized representation as no quantization is done. - """ - return x.unsqueeze(1) - - def decode(self, codes: torch.Tensor) -> torch.Tensor: - """Decode the given codes to the quantized representation. - In the case of the DummyQuantizer, the codes are actually identical - to the input and resulting quantized representation as no quantization is done. - """ - return codes.squeeze(1) - - @property - def total_codebooks(self): - """Total number of codebooks. - """ - return 1 - - @property - def num_codebooks(self): - """Total number of codebooks. - """ - return self.total_codebooks - - def set_num_codebooks(self, n: int): - """Set the number of active codebooks. - """ - raise AttributeError("Cannot override the number of codebooks for the dummy quantizer") diff --git a/spaces/Mahiruoshi/BangDream-Bert-VITS2/text/english.py b/spaces/Mahiruoshi/BangDream-Bert-VITS2/text/english.py deleted file mode 100644 index 0f9339c9ed771dab5136978eaaab194ec3fe2395..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/BangDream-Bert-VITS2/text/english.py +++ /dev/null @@ -1,214 +0,0 @@ -import pickle -import os -import re -from g2p_en import G2p - -from text import symbols - -current_file_path = os.path.dirname(__file__) -CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep") -CACHE_PATH = os.path.join(current_file_path, "cmudict_cache.pickle") -_g2p = G2p() - -arpa = { - "AH0", - "S", - "AH1", - "EY2", - "AE2", - "EH0", - "OW2", - "UH0", - "NG", - "B", - "G", - "AY0", - "M", - "AA0", - "F", - "AO0", - "ER2", - "UH1", - "IY1", - "AH2", - "DH", - "IY0", - "EY1", - "IH0", - "K", - "N", - "W", - "IY2", - "T", - "AA1", - "ER1", - "EH2", - "OY0", - "UH2", - "UW1", - "Z", - "AW2", - "AW1", - "V", - "UW2", - "AA2", - "ER", - "AW0", - "UW0", - "R", - "OW1", - "EH1", - "ZH", - "AE0", - "IH2", - "IH", - "Y", - "JH", - "P", - "AY1", - "EY0", - "OY2", - "TH", - "HH", - "D", - "ER0", - "CH", - "AO1", - "AE1", - "AO2", - "OY1", - "AY2", - "IH1", - "OW0", - "L", - "SH", -} - - -def post_replace_ph(ph): - rep_map = { - ":": ",", - ";": ",", - ",": ",", - "。": ".", - "!": "!", - "?": "?", - "\n": ".", - "·": ",", - "、": ",", - "...": "…", - "v": "V", - } - if ph in rep_map.keys(): - ph = rep_map[ph] - if ph in symbols: - return ph - if ph not in symbols: - ph = "UNK" - return ph - - -def read_dict(): - g2p_dict = {} - start_line = 49 - with open(CMU_DICT_PATH) as f: - line = f.readline() - line_index = 1 - while line: - if line_index >= start_line: - line = line.strip() - word_split = line.split(" ") - word = word_split[0] - - syllable_split = word_split[1].split(" - ") - g2p_dict[word] = [] - for syllable in syllable_split: - phone_split = syllable.split(" ") - g2p_dict[word].append(phone_split) - - line_index = line_index + 1 - line = f.readline() - - return g2p_dict - - -def cache_dict(g2p_dict, file_path): - with open(file_path, "wb") as pickle_file: - pickle.dump(g2p_dict, pickle_file) - - -def get_dict(): - if os.path.exists(CACHE_PATH): - with open(CACHE_PATH, "rb") as pickle_file: - g2p_dict = pickle.load(pickle_file) - else: - g2p_dict = read_dict() - cache_dict(g2p_dict, CACHE_PATH) - - return g2p_dict - - -eng_dict = get_dict() - - -def refine_ph(phn): - tone = 0 - if re.search(r"\d$", phn): - tone = int(phn[-1]) + 1 - phn = phn[:-1] - return phn.lower(), tone - - -def refine_syllables(syllables): - tones = [] - phonemes = [] - for phn_list in syllables: - for i in range(len(phn_list)): - phn = phn_list[i] - phn, tone = refine_ph(phn) - phonemes.append(phn) - tones.append(tone) - return phonemes, tones - - -def text_normalize(text): - # todo: eng text normalize - return text - - -def g2p(text): - phones = [] - tones = [] - words = re.split(r"([,;.\-\?\!\s+])", text) - for w in words: - if w.upper() in eng_dict: - phns, tns = refine_syllables(eng_dict[w.upper()]) - phones += phns - tones += tns - else: - phone_list = list(filter(lambda p: p != " ", _g2p(w))) - for ph in phone_list: - if ph in arpa: - ph, tn = refine_ph(ph) - phones.append(ph) - tones.append(tn) - else: - phones.append(ph) - tones.append(0) - # todo: implement word2ph - word2ph = [1 for i in phones] - - phones = [post_replace_ph(i) for i in phones] - return phones, tones, word2ph - - -if __name__ == "__main__": - # print(get_dict()) - # print(eng_word_to_phoneme("hello")) - print(g2p("In this paper, we propose 1 DSPGAN, a GAN-based universal vocoder.")) - # all_phones = set() - # for k, syllables in eng_dict.items(): - # for group in syllables: - # for ph in group: - # all_phones.add(ph) - # print(all_phones) diff --git a/spaces/Makiing/coolb-in-gtest/src/lib/bots/bing/index.ts b/spaces/Makiing/coolb-in-gtest/src/lib/bots/bing/index.ts deleted file mode 100644 index 2c4afae01a345b8415935228566cb30d695e768d..0000000000000000000000000000000000000000 --- a/spaces/Makiing/coolb-in-gtest/src/lib/bots/bing/index.ts +++ /dev/null @@ -1,421 +0,0 @@ -import { fetch, WebSocket, debug } from '@/lib/isomorphic' -import WebSocketAsPromised from 'websocket-as-promised' -import { - SendMessageParams, - BingConversationStyle, - ConversationResponse, - ChatResponseMessage, - ConversationInfo, - InvocationEventType, - ChatError, - ErrorCode, - ChatUpdateCompleteResponse, - ImageInfo, - KBlobResponse -} from './types' - -import { convertMessageToMarkdown, websocketUtils, streamAsyncIterable } from './utils' -import { WatchDog, createChunkDecoder } from '@/lib/utils' - -type Params = SendMessageParams<{ bingConversationStyle: BingConversationStyle }> - -const OPTIONS_SETS = [ - 'nlu_direct_response_filter', - 'deepleo', - 'disable_emoji_spoken_text', - 'responsible_ai_policy_235', - 'enablemm', - 'iycapbing', - 'iyxapbing', - 'objopinion', - 'rweasgv2', - 'dagslnv1', - 'dv3sugg', - 'autosave', - 'iyoloxap', - 'iyoloneutral', - 'clgalileo', - 'gencontentv3', -] - -export class BingWebBot { - protected conversationContext?: ConversationInfo - protected cookie: string - protected ua: string - protected endpoint = '' - private lastText = '' - private asyncTasks: Array> = [] - - constructor(opts: { - cookie: string - ua: string - bingConversationStyle?: BingConversationStyle - conversationContext?: ConversationInfo - }) { - const { cookie, ua, conversationContext } = opts - this.cookie = cookie?.includes(';') ? cookie : `_EDGE_V=1; _U=${cookie}` - this.ua = ua - this.conversationContext = conversationContext - } - - static buildChatRequest(conversation: ConversationInfo) { - const optionsSets = OPTIONS_SETS - if (conversation.conversationStyle === BingConversationStyle.Precise) { - optionsSets.push('h3precise') - } else if (conversation.conversationStyle === BingConversationStyle.Creative) { - optionsSets.push('h3imaginative') - } - return { - arguments: [ - { - source: 'cib', - optionsSets, - allowedMessageTypes: [ - 'Chat', - 'InternalSearchQuery', - 'Disengaged', - 'InternalLoaderMessage', - 'SemanticSerp', - 'GenerateContentQuery', - 'SearchQuery', - ], - sliceIds: [ - 'winmuid1tf', - 'anssupfor_c', - 'imgchatgptv2', - 'tts2cf', - 'contansperf', - 'mlchatpc8500w', - 'mlchatpc2', - 'ctrlworkpay', - 'winshortmsgtf', - 'cibctrl', - 'sydtransctrl', - 'sydconfigoptc', - '0705trt4', - '517opinion', - '628ajcopus0', - '330uaugs0', - '529rwea', - '0626snptrcs0', - '424dagslnv1', - ], - isStartOfSession: conversation.invocationId === 0, - message: { - author: 'user', - inputMethod: 'Keyboard', - text: conversation.prompt, - imageUrl: conversation.imageUrl, - messageType: 'Chat', - }, - conversationId: conversation.conversationId, - conversationSignature: conversation.conversationSignature, - participant: { id: conversation.clientId }, - }, - ], - invocationId: conversation.invocationId.toString(), - target: 'chat', - type: InvocationEventType.StreamInvocation, - } - } - - async createConversation(): Promise { - const headers = { - 'Accept-Encoding': 'gzip, deflate, br, zsdch', - 'User-Agent': this.ua, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - cookie: this.cookie, - } - - let resp: ConversationResponse | undefined - try { - const response = await fetch(this.endpoint + '/api/create', { method: 'POST', headers, redirect: 'error', mode: 'cors', credentials: 'include' }) - if (response.status === 404) { - throw new ChatError('Not Found', ErrorCode.NOTFOUND_ERROR) - } - resp = await response.json() as ConversationResponse - } catch (err) { - console.error('create conversation error', err) - } - - if (!resp?.result) { - throw new ChatError('Invalid response', ErrorCode.UNKOWN_ERROR) - } - - const { value, message } = resp.result || {} - if (value !== 'Success') { - const errorMsg = `${value}: ${message}` - if (value === 'UnauthorizedRequest') { - throw new ChatError(errorMsg, ErrorCode.BING_UNAUTHORIZED) - } - if (value === 'Forbidden') { - throw new ChatError(errorMsg, ErrorCode.BING_FORBIDDEN) - } - throw new ChatError(errorMsg, ErrorCode.UNKOWN_ERROR) - } - return resp - } - - private async createContext(conversationStyle: BingConversationStyle) { - if (!this.conversationContext) { - const conversation = await this.createConversation() - this.conversationContext = { - conversationId: conversation.conversationId, - conversationSignature: conversation.conversationSignature, - clientId: conversation.clientId, - invocationId: 0, - conversationStyle, - prompt: '', - } - } - return this.conversationContext - } - - async sendMessage(params: Params) { - try { - await this.createContext(params.options.bingConversationStyle) - Object.assign(this.conversationContext!, { prompt: params.prompt, imageUrl: params.imageUrl }) - return this.sydneyProxy(params) - } catch (error) { - params.onEvent({ - type: 'ERROR', - error: error instanceof ChatError ? error : new ChatError('Catch Error', ErrorCode.UNKOWN_ERROR), - }) - } - } - - private async sydneyProxy(params: Params) { - const abortController = new AbortController() - const response = await fetch(this.endpoint + '/api/sydney', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - signal: abortController.signal, - body: JSON.stringify(this.conversationContext!) - }) - if (response.status !== 200) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - 'Unknown error', - ErrorCode.UNKOWN_ERROR, - ), - }) - } - params.signal?.addEventListener('abort', () => { - abortController.abort() - }) - - const textDecoder = createChunkDecoder() - for await (const chunk of streamAsyncIterable(response.body!)) { - this.parseEvents(params, websocketUtils.unpackMessage(textDecoder(chunk))) - } - } - - async sendWs() { - const wsConfig: ConstructorParameters[1] = { - packMessage: websocketUtils.packMessage, - unpackMessage: websocketUtils.unpackMessage, - createWebSocket: (url) => new WebSocket(url, { - headers: { - 'accept-language': 'zh-CN,zh;q=0.9', - 'cache-control': 'no-cache', - 'User-Agent': this.ua, - pragma: 'no-cache', - cookie: this.cookie, - } - }) - } - const wsp = new WebSocketAsPromised('wss://sydney.bing.com/sydney/ChatHub', wsConfig) - - wsp.open().then(() => { - wsp.sendPacked({ protocol: 'json', version: 1 }) - wsp.sendPacked({ type: 6 }) - wsp.sendPacked(BingWebBot.buildChatRequest(this.conversationContext!)) - }) - - return wsp - } - - private async useWs(params: Params) { - const wsp = await this.sendWs() - const watchDog = new WatchDog() - wsp.onUnpackedMessage.addListener((events) => { - watchDog.watch(() => { - wsp.sendPacked({ type: 6 }) - }) - this.parseEvents(params, events) - }) - - wsp.onClose.addListener(() => { - watchDog.reset() - params.onEvent({ type: 'DONE' }) - wsp.removeAllListeners() - }) - - params.signal?.addEventListener('abort', () => { - wsp.removeAllListeners() - wsp.close() - }) - } - - private async createImage(prompt: string, id: string) { - try { - const headers = { - 'Accept-Encoding': 'gzip, deflate, br, zsdch', - 'User-Agent': this.ua, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - cookie: this.cookie, - } - const query = new URLSearchParams({ - prompt, - id - }) - const response = await fetch(this.endpoint + '/api/image?' + query.toString(), - { - method: 'POST', - headers, - mode: 'cors', - credentials: 'include' - }) - .then(res => res.text()) - if (response) { - this.lastText += '\n' + response - } - } catch (err) { - console.error('Create Image Error', err) - } - } - - private buildKnowledgeApiPayload(imageUrl: string, conversationStyle: BingConversationStyle) { - const imageInfo: ImageInfo = {} - let imageBase64: string | undefined = undefined - const knowledgeRequest = { - imageInfo, - knowledgeRequest: { - invokedSkills: [ - 'ImageById' - ], - subscriptionId: 'Bing.Chat.Multimodal', - invokedSkillsRequestData: { - enableFaceBlur: true - }, - convoData: { - convoid: this.conversationContext?.conversationId, - convotone: conversationStyle, - } - }, - } - - if (imageUrl.startsWith('data:image/')) { - imageBase64 = imageUrl.replace('data:image/', ''); - const partIndex = imageBase64.indexOf(',') - if (partIndex) { - imageBase64 = imageBase64.substring(partIndex + 1) - } - } else { - imageInfo.url = imageUrl - } - return { knowledgeRequest, imageBase64 } - } - - async uploadImage(imageUrl: string, conversationStyle: BingConversationStyle = BingConversationStyle.Creative): Promise { - if (!imageUrl) { - return - } - await this.createContext(conversationStyle) - const payload = this.buildKnowledgeApiPayload(imageUrl, conversationStyle) - - const response = await fetch(this.endpoint + '/api/kblob', - { - headers: { - 'Content-Type': 'application/json', - }, - method: 'POST', - mode: 'cors', - credentials: 'include', - body: JSON.stringify(payload), - }) - .then(res => res.json()) - .catch(e => { - console.log('Error', e) - }) - return response - } - - private async generateContent(message: ChatResponseMessage) { - if (message.contentType === 'IMAGE') { - this.asyncTasks.push(this.createImage(message.text, message.messageId)) - } - } - - private async parseEvents(params: Params, events: any) { - const conversation = this.conversationContext! - - events?.forEach(async (event: ChatUpdateCompleteResponse) => { - debug('bing event', event) - if (event.type === 3) { - await Promise.all(this.asyncTasks) - this.asyncTasks = [] - params.onEvent({ type: 'UPDATE_ANSWER', data: { text: this.lastText } }) - params.onEvent({ type: 'DONE' }) - conversation.invocationId = parseInt(event.invocationId, 10) + 1 - } else if (event.type === 1) { - const messages = event.arguments[0].messages - if (messages) { - const text = convertMessageToMarkdown(messages[0]) - this.lastText = text - params.onEvent({ type: 'UPDATE_ANSWER', data: { text, spokenText: messages[0].text, throttling: event.arguments[0].throttling } }) - } - } else if (event.type === 2) { - const messages = event.item.messages as ChatResponseMessage[] | undefined - if (!messages) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - event.item.result.error || 'Unknown error', - event.item.result.value === 'Throttled' ? ErrorCode.THROTTLE_LIMIT - : event.item.result.value === 'CaptchaChallenge' ? (this.conversationContext?.conversationId?.includes('BingProdUnAuthenticatedUsers') ? ErrorCode.BING_UNAUTHORIZED : ErrorCode.BING_CAPTCHA) - : ErrorCode.UNKOWN_ERROR - ), - }) - return - } - const limited = messages.some((message) => - message.contentOrigin === 'TurnLimiter' - || message.messageType === 'Disengaged' - ) - if (limited) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - 'Sorry, you have reached chat limit in this conversation.', - ErrorCode.CONVERSATION_LIMIT, - ), - }) - return - } - - const lastMessage = event.item.messages.at(-1) as ChatResponseMessage - const specialMessage = event.item.messages.find(message => message.author === 'bot' && message.contentType === 'IMAGE') - if (specialMessage) { - this.generateContent(specialMessage) - } - - if (lastMessage) { - const text = convertMessageToMarkdown(lastMessage) - this.lastText = text - params.onEvent({ - type: 'UPDATE_ANSWER', - data: { text, throttling: event.item.throttling, suggestedResponses: lastMessage.suggestedResponses, sourceAttributions: lastMessage.sourceAttributions }, - }) - } - } - }) - } - - resetConversation() { - this.conversationContext = undefined - } -} diff --git a/spaces/Marshalls/testmtd/analysis/pymo/mocapplayer/styles/pace.css b/spaces/Marshalls/testmtd/analysis/pymo/mocapplayer/styles/pace.css deleted file mode 100644 index b5a50552b2980deebbec3861cf77a1f5c4222a36..0000000000000000000000000000000000000000 --- a/spaces/Marshalls/testmtd/analysis/pymo/mocapplayer/styles/pace.css +++ /dev/null @@ -1,76 +0,0 @@ -.pace { - -webkit-pointer-events: none; - pointer-events: none; - -webkit-user-select: none; - -moz-user-select: none; - user-select: none; -} - -.pace-inactive { - display: none; -} - -.pace .pace-progress { - background: #29d; - position: fixed; - z-index: 2000; - top: 0; - right: 100%; - width: 100%; - height: 2px; -} - -.pace .pace-progress-inner { - display: block; - position: absolute; - right: 0px; - width: 100px; - height: 100%; - box-shadow: 0 0 10px #29d, 0 0 5px #29d; - opacity: 1.0; - -webkit-transform: rotate(3deg) translate(0px, -4px); - -moz-transform: rotate(3deg) translate(0px, -4px); - -ms-transform: rotate(3deg) translate(0px, -4px); - -o-transform: rotate(3deg) translate(0px, -4px); - transform: rotate(3deg) translate(0px, -4px); -} - -.pace .pace-activity { - display: block; - position: fixed; - z-index: 2000; - top: 15px; - right: 20px; - width: 34px; - height: 34px; - border: solid 2px transparent; - border-top-color: #9ea7ac; - border-left-color: #9ea7ac; - border-radius: 30px; - -webkit-animation: pace-spinner 700ms linear infinite; - -moz-animation: pace-spinner 700ms linear infinite; - -ms-animation: pace-spinner 700ms linear infinite; - -o-animation: pace-spinner 700ms linear infinite; - animation: pace-spinner 700ms linear infinite; -} - -@-webkit-keyframes pace-spinner { - 0% { -webkit-transform: rotate(0deg); transform: rotate(0deg); } - 100% { -webkit-transform: rotate(360deg); transform: rotate(360deg); } -} -@-moz-keyframes pace-spinner { - 0% { -moz-transform: rotate(0deg); transform: rotate(0deg); } - 100% { -moz-transform: rotate(360deg); transform: rotate(360deg); } -} -@-o-keyframes pace-spinner { - 0% { -o-transform: rotate(0deg); transform: rotate(0deg); } - 100% { -o-transform: rotate(360deg); transform: rotate(360deg); } -} -@-ms-keyframes pace-spinner { - 0% { -ms-transform: rotate(0deg); transform: rotate(0deg); } - 100% { -ms-transform: rotate(360deg); transform: rotate(360deg); } -} -@keyframes pace-spinner { - 0% { transform: rotate(0deg); transform: rotate(0deg); } - 100% { transform: rotate(360deg); transform: rotate(360deg); } -} \ No newline at end of file diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/runner/hooks/lr_updater.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/runner/hooks/lr_updater.py deleted file mode 100644 index 6365908ddf6070086de2ffc0afada46ed2f32256..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/runner/hooks/lr_updater.py +++ /dev/null @@ -1,670 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numbers -from math import cos, pi - -import annotator.uniformer.mmcv as mmcv -from .hook import HOOKS, Hook - - -class LrUpdaterHook(Hook): - """LR Scheduler in MMCV. - - Args: - by_epoch (bool): LR changes epoch by epoch - warmup (string): Type of warmup used. It can be None(use no warmup), - 'constant', 'linear' or 'exp' - warmup_iters (int): The number of iterations or epochs that warmup - lasts - warmup_ratio (float): LR used at the beginning of warmup equals to - warmup_ratio * initial_lr - warmup_by_epoch (bool): When warmup_by_epoch == True, warmup_iters - means the number of epochs that warmup lasts, otherwise means the - number of iteration that warmup lasts - """ - - def __init__(self, - by_epoch=True, - warmup=None, - warmup_iters=0, - warmup_ratio=0.1, - warmup_by_epoch=False): - # validate the "warmup" argument - if warmup is not None: - if warmup not in ['constant', 'linear', 'exp']: - raise ValueError( - f'"{warmup}" is not a supported type for warming up, valid' - ' types are "constant" and "linear"') - if warmup is not None: - assert warmup_iters > 0, \ - '"warmup_iters" must be a positive integer' - assert 0 < warmup_ratio <= 1.0, \ - '"warmup_ratio" must be in range (0,1]' - - self.by_epoch = by_epoch - self.warmup = warmup - self.warmup_iters = warmup_iters - self.warmup_ratio = warmup_ratio - self.warmup_by_epoch = warmup_by_epoch - - if self.warmup_by_epoch: - self.warmup_epochs = self.warmup_iters - self.warmup_iters = None - else: - self.warmup_epochs = None - - self.base_lr = [] # initial lr for all param groups - self.regular_lr = [] # expected lr if no warming up is performed - - def _set_lr(self, runner, lr_groups): - if isinstance(runner.optimizer, dict): - for k, optim in runner.optimizer.items(): - for param_group, lr in zip(optim.param_groups, lr_groups[k]): - param_group['lr'] = lr - else: - for param_group, lr in zip(runner.optimizer.param_groups, - lr_groups): - param_group['lr'] = lr - - def get_lr(self, runner, base_lr): - raise NotImplementedError - - def get_regular_lr(self, runner): - if isinstance(runner.optimizer, dict): - lr_groups = {} - for k in runner.optimizer.keys(): - _lr_group = [ - self.get_lr(runner, _base_lr) - for _base_lr in self.base_lr[k] - ] - lr_groups.update({k: _lr_group}) - - return lr_groups - else: - return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr] - - def get_warmup_lr(self, cur_iters): - - def _get_warmup_lr(cur_iters, regular_lr): - if self.warmup == 'constant': - warmup_lr = [_lr * self.warmup_ratio for _lr in regular_lr] - elif self.warmup == 'linear': - k = (1 - cur_iters / self.warmup_iters) * (1 - - self.warmup_ratio) - warmup_lr = [_lr * (1 - k) for _lr in regular_lr] - elif self.warmup == 'exp': - k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters) - warmup_lr = [_lr * k for _lr in regular_lr] - return warmup_lr - - if isinstance(self.regular_lr, dict): - lr_groups = {} - for key, regular_lr in self.regular_lr.items(): - lr_groups[key] = _get_warmup_lr(cur_iters, regular_lr) - return lr_groups - else: - return _get_warmup_lr(cur_iters, self.regular_lr) - - def before_run(self, runner): - # NOTE: when resuming from a checkpoint, if 'initial_lr' is not saved, - # it will be set according to the optimizer params - if isinstance(runner.optimizer, dict): - self.base_lr = {} - for k, optim in runner.optimizer.items(): - for group in optim.param_groups: - group.setdefault('initial_lr', group['lr']) - _base_lr = [ - group['initial_lr'] for group in optim.param_groups - ] - self.base_lr.update({k: _base_lr}) - else: - for group in runner.optimizer.param_groups: - group.setdefault('initial_lr', group['lr']) - self.base_lr = [ - group['initial_lr'] for group in runner.optimizer.param_groups - ] - - def before_train_epoch(self, runner): - if self.warmup_iters is None: - epoch_len = len(runner.data_loader) - self.warmup_iters = self.warmup_epochs * epoch_len - - if not self.by_epoch: - return - - self.regular_lr = self.get_regular_lr(runner) - self._set_lr(runner, self.regular_lr) - - def before_train_iter(self, runner): - cur_iter = runner.iter - if not self.by_epoch: - self.regular_lr = self.get_regular_lr(runner) - if self.warmup is None or cur_iter >= self.warmup_iters: - self._set_lr(runner, self.regular_lr) - else: - warmup_lr = self.get_warmup_lr(cur_iter) - self._set_lr(runner, warmup_lr) - elif self.by_epoch: - if self.warmup is None or cur_iter > self.warmup_iters: - return - elif cur_iter == self.warmup_iters: - self._set_lr(runner, self.regular_lr) - else: - warmup_lr = self.get_warmup_lr(cur_iter) - self._set_lr(runner, warmup_lr) - - -@HOOKS.register_module() -class FixedLrUpdaterHook(LrUpdaterHook): - - def __init__(self, **kwargs): - super(FixedLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - return base_lr - - -@HOOKS.register_module() -class StepLrUpdaterHook(LrUpdaterHook): - """Step LR scheduler with min_lr clipping. - - Args: - step (int | list[int]): Step to decay the LR. If an int value is given, - regard it as the decay interval. If a list is given, decay LR at - these steps. - gamma (float, optional): Decay LR ratio. Default: 0.1. - min_lr (float, optional): Minimum LR value to keep. If LR after decay - is lower than `min_lr`, it will be clipped to this value. If None - is given, we don't perform lr clipping. Default: None. - """ - - def __init__(self, step, gamma=0.1, min_lr=None, **kwargs): - if isinstance(step, list): - assert mmcv.is_list_of(step, int) - assert all([s > 0 for s in step]) - elif isinstance(step, int): - assert step > 0 - else: - raise TypeError('"step" must be a list or integer') - self.step = step - self.gamma = gamma - self.min_lr = min_lr - super(StepLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - progress = runner.epoch if self.by_epoch else runner.iter - - # calculate exponential term - if isinstance(self.step, int): - exp = progress // self.step - else: - exp = len(self.step) - for i, s in enumerate(self.step): - if progress < s: - exp = i - break - - lr = base_lr * (self.gamma**exp) - if self.min_lr is not None: - # clip to a minimum value - lr = max(lr, self.min_lr) - return lr - - -@HOOKS.register_module() -class ExpLrUpdaterHook(LrUpdaterHook): - - def __init__(self, gamma, **kwargs): - self.gamma = gamma - super(ExpLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - progress = runner.epoch if self.by_epoch else runner.iter - return base_lr * self.gamma**progress - - -@HOOKS.register_module() -class PolyLrUpdaterHook(LrUpdaterHook): - - def __init__(self, power=1., min_lr=0., **kwargs): - self.power = power - self.min_lr = min_lr - super(PolyLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - if self.by_epoch: - progress = runner.epoch - max_progress = runner.max_epochs - else: - progress = runner.iter - max_progress = runner.max_iters - coeff = (1 - progress / max_progress)**self.power - return (base_lr - self.min_lr) * coeff + self.min_lr - - -@HOOKS.register_module() -class InvLrUpdaterHook(LrUpdaterHook): - - def __init__(self, gamma, power=1., **kwargs): - self.gamma = gamma - self.power = power - super(InvLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - progress = runner.epoch if self.by_epoch else runner.iter - return base_lr * (1 + self.gamma * progress)**(-self.power) - - -@HOOKS.register_module() -class CosineAnnealingLrUpdaterHook(LrUpdaterHook): - - def __init__(self, min_lr=None, min_lr_ratio=None, **kwargs): - assert (min_lr is None) ^ (min_lr_ratio is None) - self.min_lr = min_lr - self.min_lr_ratio = min_lr_ratio - super(CosineAnnealingLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - if self.by_epoch: - progress = runner.epoch - max_progress = runner.max_epochs - else: - progress = runner.iter - max_progress = runner.max_iters - - if self.min_lr_ratio is not None: - target_lr = base_lr * self.min_lr_ratio - else: - target_lr = self.min_lr - return annealing_cos(base_lr, target_lr, progress / max_progress) - - -@HOOKS.register_module() -class FlatCosineAnnealingLrUpdaterHook(LrUpdaterHook): - """Flat + Cosine lr schedule. - - Modified from https://github.com/fastai/fastai/blob/master/fastai/callback/schedule.py#L128 # noqa: E501 - - Args: - start_percent (float): When to start annealing the learning rate - after the percentage of the total training steps. - The value should be in range [0, 1). - Default: 0.75 - min_lr (float, optional): The minimum lr. Default: None. - min_lr_ratio (float, optional): The ratio of minimum lr to the base lr. - Either `min_lr` or `min_lr_ratio` should be specified. - Default: None. - """ - - def __init__(self, - start_percent=0.75, - min_lr=None, - min_lr_ratio=None, - **kwargs): - assert (min_lr is None) ^ (min_lr_ratio is None) - if start_percent < 0 or start_percent > 1 or not isinstance( - start_percent, float): - raise ValueError( - 'expected float between 0 and 1 start_percent, but ' - f'got {start_percent}') - self.start_percent = start_percent - self.min_lr = min_lr - self.min_lr_ratio = min_lr_ratio - super(FlatCosineAnnealingLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - if self.by_epoch: - start = round(runner.max_epochs * self.start_percent) - progress = runner.epoch - start - max_progress = runner.max_epochs - start - else: - start = round(runner.max_iters * self.start_percent) - progress = runner.iter - start - max_progress = runner.max_iters - start - - if self.min_lr_ratio is not None: - target_lr = base_lr * self.min_lr_ratio - else: - target_lr = self.min_lr - - if progress < 0: - return base_lr - else: - return annealing_cos(base_lr, target_lr, progress / max_progress) - - -@HOOKS.register_module() -class CosineRestartLrUpdaterHook(LrUpdaterHook): - """Cosine annealing with restarts learning rate scheme. - - Args: - periods (list[int]): Periods for each cosine anneling cycle. - restart_weights (list[float], optional): Restart weights at each - restart iteration. Default: [1]. - min_lr (float, optional): The minimum lr. Default: None. - min_lr_ratio (float, optional): The ratio of minimum lr to the base lr. - Either `min_lr` or `min_lr_ratio` should be specified. - Default: None. - """ - - def __init__(self, - periods, - restart_weights=[1], - min_lr=None, - min_lr_ratio=None, - **kwargs): - assert (min_lr is None) ^ (min_lr_ratio is None) - self.periods = periods - self.min_lr = min_lr - self.min_lr_ratio = min_lr_ratio - self.restart_weights = restart_weights - assert (len(self.periods) == len(self.restart_weights) - ), 'periods and restart_weights should have the same length.' - super(CosineRestartLrUpdaterHook, self).__init__(**kwargs) - - self.cumulative_periods = [ - sum(self.periods[0:i + 1]) for i in range(0, len(self.periods)) - ] - - def get_lr(self, runner, base_lr): - if self.by_epoch: - progress = runner.epoch - else: - progress = runner.iter - - if self.min_lr_ratio is not None: - target_lr = base_lr * self.min_lr_ratio - else: - target_lr = self.min_lr - - idx = get_position_from_periods(progress, self.cumulative_periods) - current_weight = self.restart_weights[idx] - nearest_restart = 0 if idx == 0 else self.cumulative_periods[idx - 1] - current_periods = self.periods[idx] - - alpha = min((progress - nearest_restart) / current_periods, 1) - return annealing_cos(base_lr, target_lr, alpha, current_weight) - - -def get_position_from_periods(iteration, cumulative_periods): - """Get the position from a period list. - - It will return the index of the right-closest number in the period list. - For example, the cumulative_periods = [100, 200, 300, 400], - if iteration == 50, return 0; - if iteration == 210, return 2; - if iteration == 300, return 3. - - Args: - iteration (int): Current iteration. - cumulative_periods (list[int]): Cumulative period list. - - Returns: - int: The position of the right-closest number in the period list. - """ - for i, period in enumerate(cumulative_periods): - if iteration < period: - return i - raise ValueError(f'Current iteration {iteration} exceeds ' - f'cumulative_periods {cumulative_periods}') - - -@HOOKS.register_module() -class CyclicLrUpdaterHook(LrUpdaterHook): - """Cyclic LR Scheduler. - - Implement the cyclical learning rate policy (CLR) described in - https://arxiv.org/pdf/1506.01186.pdf - - Different from the original paper, we use cosine annealing rather than - triangular policy inside a cycle. This improves the performance in the - 3D detection area. - - Args: - by_epoch (bool): Whether to update LR by epoch. - target_ratio (tuple[float]): Relative ratio of the highest LR and the - lowest LR to the initial LR. - cyclic_times (int): Number of cycles during training - step_ratio_up (float): The ratio of the increasing process of LR in - the total cycle. - anneal_strategy (str): {'cos', 'linear'} - Specifies the annealing strategy: 'cos' for cosine annealing, - 'linear' for linear annealing. Default: 'cos'. - """ - - def __init__(self, - by_epoch=False, - target_ratio=(10, 1e-4), - cyclic_times=1, - step_ratio_up=0.4, - anneal_strategy='cos', - **kwargs): - if isinstance(target_ratio, float): - target_ratio = (target_ratio, target_ratio / 1e5) - elif isinstance(target_ratio, tuple): - target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \ - if len(target_ratio) == 1 else target_ratio - else: - raise ValueError('target_ratio should be either float ' - f'or tuple, got {type(target_ratio)}') - - assert len(target_ratio) == 2, \ - '"target_ratio" must be list or tuple of two floats' - assert 0 <= step_ratio_up < 1.0, \ - '"step_ratio_up" must be in range [0,1)' - - self.target_ratio = target_ratio - self.cyclic_times = cyclic_times - self.step_ratio_up = step_ratio_up - self.lr_phases = [] # init lr_phases - # validate anneal_strategy - if anneal_strategy not in ['cos', 'linear']: - raise ValueError('anneal_strategy must be one of "cos" or ' - f'"linear", instead got {anneal_strategy}') - elif anneal_strategy == 'cos': - self.anneal_func = annealing_cos - elif anneal_strategy == 'linear': - self.anneal_func = annealing_linear - - assert not by_epoch, \ - 'currently only support "by_epoch" = False' - super(CyclicLrUpdaterHook, self).__init__(by_epoch, **kwargs) - - def before_run(self, runner): - super(CyclicLrUpdaterHook, self).before_run(runner) - # initiate lr_phases - # total lr_phases are separated as up and down - max_iter_per_phase = runner.max_iters // self.cyclic_times - iter_up_phase = int(self.step_ratio_up * max_iter_per_phase) - self.lr_phases.append( - [0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]]) - self.lr_phases.append([ - iter_up_phase, max_iter_per_phase, max_iter_per_phase, - self.target_ratio[0], self.target_ratio[1] - ]) - - def get_lr(self, runner, base_lr): - curr_iter = runner.iter - for (start_iter, end_iter, max_iter_per_phase, start_ratio, - end_ratio) in self.lr_phases: - curr_iter %= max_iter_per_phase - if start_iter <= curr_iter < end_iter: - progress = curr_iter - start_iter - return self.anneal_func(base_lr * start_ratio, - base_lr * end_ratio, - progress / (end_iter - start_iter)) - - -@HOOKS.register_module() -class OneCycleLrUpdaterHook(LrUpdaterHook): - """One Cycle LR Scheduler. - - The 1cycle learning rate policy changes the learning rate after every - batch. The one cycle learning rate policy is described in - https://arxiv.org/pdf/1708.07120.pdf - - Args: - max_lr (float or list): Upper learning rate boundaries in the cycle - for each parameter group. - total_steps (int, optional): The total number of steps in the cycle. - Note that if a value is not provided here, it will be the max_iter - of runner. Default: None. - pct_start (float): The percentage of the cycle (in number of steps) - spent increasing the learning rate. - Default: 0.3 - anneal_strategy (str): {'cos', 'linear'} - Specifies the annealing strategy: 'cos' for cosine annealing, - 'linear' for linear annealing. - Default: 'cos' - div_factor (float): Determines the initial learning rate via - initial_lr = max_lr/div_factor - Default: 25 - final_div_factor (float): Determines the minimum learning rate via - min_lr = initial_lr/final_div_factor - Default: 1e4 - three_phase (bool): If three_phase is True, use a third phase of the - schedule to annihilate the learning rate according to - final_div_factor instead of modifying the second phase (the first - two phases will be symmetrical about the step indicated by - pct_start). - Default: False - """ - - def __init__(self, - max_lr, - total_steps=None, - pct_start=0.3, - anneal_strategy='cos', - div_factor=25, - final_div_factor=1e4, - three_phase=False, - **kwargs): - # validate by_epoch, currently only support by_epoch = False - if 'by_epoch' not in kwargs: - kwargs['by_epoch'] = False - else: - assert not kwargs['by_epoch'], \ - 'currently only support "by_epoch" = False' - if not isinstance(max_lr, (numbers.Number, list, dict)): - raise ValueError('the type of max_lr must be the one of list or ' - f'dict, but got {type(max_lr)}') - self._max_lr = max_lr - if total_steps is not None: - if not isinstance(total_steps, int): - raise ValueError('the type of total_steps must be int, but' - f'got {type(total_steps)}') - self.total_steps = total_steps - # validate pct_start - if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float): - raise ValueError('expected float between 0 and 1 pct_start, but ' - f'got {pct_start}') - self.pct_start = pct_start - # validate anneal_strategy - if anneal_strategy not in ['cos', 'linear']: - raise ValueError('anneal_strategy must be one of "cos" or ' - f'"linear", instead got {anneal_strategy}') - elif anneal_strategy == 'cos': - self.anneal_func = annealing_cos - elif anneal_strategy == 'linear': - self.anneal_func = annealing_linear - self.div_factor = div_factor - self.final_div_factor = final_div_factor - self.three_phase = three_phase - self.lr_phases = [] # init lr_phases - super(OneCycleLrUpdaterHook, self).__init__(**kwargs) - - def before_run(self, runner): - if hasattr(self, 'total_steps'): - total_steps = self.total_steps - else: - total_steps = runner.max_iters - if total_steps < runner.max_iters: - raise ValueError( - 'The total steps must be greater than or equal to max ' - f'iterations {runner.max_iters} of runner, but total steps ' - f'is {total_steps}.') - - if isinstance(runner.optimizer, dict): - self.base_lr = {} - for k, optim in runner.optimizer.items(): - _max_lr = format_param(k, optim, self._max_lr) - self.base_lr[k] = [lr / self.div_factor for lr in _max_lr] - for group, lr in zip(optim.param_groups, self.base_lr[k]): - group.setdefault('initial_lr', lr) - else: - k = type(runner.optimizer).__name__ - _max_lr = format_param(k, runner.optimizer, self._max_lr) - self.base_lr = [lr / self.div_factor for lr in _max_lr] - for group, lr in zip(runner.optimizer.param_groups, self.base_lr): - group.setdefault('initial_lr', lr) - - if self.three_phase: - self.lr_phases.append( - [float(self.pct_start * total_steps) - 1, 1, self.div_factor]) - self.lr_phases.append([ - float(2 * self.pct_start * total_steps) - 2, self.div_factor, 1 - ]) - self.lr_phases.append( - [total_steps - 1, 1, 1 / self.final_div_factor]) - else: - self.lr_phases.append( - [float(self.pct_start * total_steps) - 1, 1, self.div_factor]) - self.lr_phases.append( - [total_steps - 1, self.div_factor, 1 / self.final_div_factor]) - - def get_lr(self, runner, base_lr): - curr_iter = runner.iter - start_iter = 0 - for i, (end_iter, start_lr, end_lr) in enumerate(self.lr_phases): - if curr_iter <= end_iter: - pct = (curr_iter - start_iter) / (end_iter - start_iter) - lr = self.anneal_func(base_lr * start_lr, base_lr * end_lr, - pct) - break - start_iter = end_iter - return lr - - -def annealing_cos(start, end, factor, weight=1): - """Calculate annealing cos learning rate. - - Cosine anneal from `weight * start + (1 - weight) * end` to `end` as - percentage goes from 0.0 to 1.0. - - Args: - start (float): The starting learning rate of the cosine annealing. - end (float): The ending learing rate of the cosine annealing. - factor (float): The coefficient of `pi` when calculating the current - percentage. Range from 0.0 to 1.0. - weight (float, optional): The combination factor of `start` and `end` - when calculating the actual starting learning rate. Default to 1. - """ - cos_out = cos(pi * factor) + 1 - return end + 0.5 * weight * (start - end) * cos_out - - -def annealing_linear(start, end, factor): - """Calculate annealing linear learning rate. - - Linear anneal from `start` to `end` as percentage goes from 0.0 to 1.0. - - Args: - start (float): The starting learning rate of the linear annealing. - end (float): The ending learing rate of the linear annealing. - factor (float): The coefficient of `pi` when calculating the current - percentage. Range from 0.0 to 1.0. - """ - return start + (end - start) * factor - - -def format_param(name, optim, param): - if isinstance(param, numbers.Number): - return [param] * len(optim.param_groups) - elif isinstance(param, (list, tuple)): # multi param groups - if len(param) != len(optim.param_groups): - raise ValueError(f'expected {len(optim.param_groups)} ' - f'values for {name}, got {len(param)}') - return param - else: # multi optimizers - if name not in param: - raise KeyError(f'{name} is not found in {param.keys()}') - return param[name] diff --git a/spaces/MetaWabbit/Auto-GPT/tests/local_cache_test.py b/spaces/MetaWabbit/Auto-GPT/tests/local_cache_test.py deleted file mode 100644 index bb10862656bb500f319ac231ff5bd5438d6fe7e2..0000000000000000000000000000000000000000 --- a/spaces/MetaWabbit/Auto-GPT/tests/local_cache_test.py +++ /dev/null @@ -1,67 +0,0 @@ -# sourcery skip: snake-case-functions -"""Tests for LocalCache class""" -import os -import sys -import unittest - -import pytest - -from autogpt.memory.local import LocalCache - - -def mock_config() -> dict: - """Mock the Config class""" - return type( - "MockConfig", - (object,), - { - "debug_mode": False, - "continuous_mode": False, - "speak_mode": False, - "memory_index": "auto-gpt", - }, - ) - - -@pytest.mark.integration_test -class TestLocalCache(unittest.TestCase): - """Tests for LocalCache class""" - - def setUp(self) -> None: - """Set up the test environment""" - self.cfg = mock_config() - self.cache = LocalCache(self.cfg) - - def test_add(self) -> None: - """Test adding a text to the cache""" - text = "Sample text" - self.cache.add(text) - self.assertIn(text, self.cache.data.texts) - - def test_clear(self) -> None: - """Test clearing the cache""" - self.cache.clear() - self.assertEqual(self.cache.data.texts, []) - - def test_get(self) -> None: - """Test getting a text from the cache""" - text = "Sample text" - self.cache.add(text) - result = self.cache.get(text) - self.assertEqual(result, [text]) - - def test_get_relevant(self) -> None: - """Test getting relevant texts from the cache""" - text1 = "Sample text 1" - text2 = "Sample text 2" - self.cache.add(text1) - self.cache.add(text2) - result = self.cache.get_relevant(text1, 1) - self.assertEqual(result, [text1]) - - def test_get_stats(self) -> None: - """Test getting the cache stats""" - text = "Sample text" - self.cache.add(text) - stats = self.cache.get_stats() - self.assertEqual(stats, (4, self.cache.data.embeddings.shape)) diff --git a/spaces/MirageML/sjc/README.md b/spaces/MirageML/sjc/README.md deleted file mode 100644 index ffee2e6795e917bc980db562e20e783e01001e96..0000000000000000000000000000000000000000 --- a/spaces/MirageML/sjc/README.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Score Jacobian Chaining -emoji: 🧊 -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -license: creativeml-openrail-m ---- - -## Bib -``` -@article{sjc, - title={Score Jacobian Chaining: Lifting Pretrained 2D Diffusion Models for 3D Generation}, - author={Wang, Haochen and Du, Xiaodan and Li, Jiahao and Yeh, Raymond A. and Shakhnarovich, Greg}, - journal={arXiv preprint arXiv:2212.00774}, - year={2022}, -} -``` diff --git a/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/lib/renderer/gl/cam_render.py b/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/lib/renderer/gl/cam_render.py deleted file mode 100644 index 7b766af057b9c052388aceb152b0191fa2e4ea25..0000000000000000000000000000000000000000 --- a/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/lib/renderer/gl/cam_render.py +++ /dev/null @@ -1,48 +0,0 @@ -from .render import Render - -GLUT = None - -class CamRender(Render): - def __init__(self, width=1600, height=1200, name='Cam Renderer', - program_files=['simple.fs', 'simple.vs'], color_size=1, ms_rate=1, egl=False): - Render.__init__(self, width, height, name, program_files, color_size, ms_rate=ms_rate, egl=egl) - self.camera = None - - if not egl: - global GLUT - import OpenGL.GLUT as GLUT - GLUT.glutDisplayFunc(self.display) - GLUT.glutKeyboardFunc(self.keyboard) - - def set_camera(self, camera): - self.camera = camera - self.projection_matrix, self.model_view_matrix = camera.get_gl_matrix() - - def keyboard(self, key, x, y): - # up - eps = 1 - # print(key) - if key == b'w': - self.camera.center += eps * self.camera.direction - elif key == b's': - self.camera.center -= eps * self.camera.direction - if key == b'a': - self.camera.center -= eps * self.camera.right - elif key == b'd': - self.camera.center += eps * self.camera.right - if key == b' ': - self.camera.center += eps * self.camera.up - elif key == b'x': - self.camera.center -= eps * self.camera.up - elif key == b'i': - self.camera.near += 0.1 * eps - self.camera.far += 0.1 * eps - elif key == b'o': - self.camera.near -= 0.1 * eps - self.camera.far -= 0.1 * eps - - self.projection_matrix, self.model_view_matrix = self.camera.get_gl_matrix() - - def show(self): - if GLUT is not None: - GLUT.glutMainLoop() diff --git a/spaces/MoonMoonMoonMoon/text_generator/app.py b/spaces/MoonMoonMoonMoon/text_generator/app.py deleted file mode 100644 index f1d4beb0a8f3cee27903f527b6bf8daa485a75a0..0000000000000000000000000000000000000000 --- a/spaces/MoonMoonMoonMoon/text_generator/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("huggingface/gpt2").launch() \ No newline at end of file diff --git a/spaces/MuskanMjn/Segmenting_greek_coins_using_Segmental_Clustering/README.md b/spaces/MuskanMjn/Segmenting_greek_coins_using_Segmental_Clustering/README.md deleted file mode 100644 index 2752971d7b1370b2b4ee2f0b2d75554f28c374b5..0000000000000000000000000000000000000000 --- a/spaces/MuskanMjn/Segmenting_greek_coins_using_Segmental_Clustering/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Segmenting Greek Coins Using Segmental Clustering -emoji: 🏃 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.28.3 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/NAACL2022/CLIP-Caption-Reward/captioning/utils/utils.py b/spaces/NAACL2022/CLIP-Caption-Reward/captioning/utils/utils.py deleted file mode 100644 index 85e12a8a1fcb5be1fa6b8833381b0a7918add5c4..0000000000000000000000000000000000000000 --- a/spaces/NAACL2022/CLIP-Caption-Reward/captioning/utils/utils.py +++ /dev/null @@ -1,138 +0,0 @@ -import re -import numpy as np -import torch -import torch.distributed as dist -import collections -import logging - -def get_area(pos): - """ - Args - pos: [B, N, 4] - (x1, x2, y1, y2) - - Return - area : [B, N] - """ - # [B, N] - height = pos[:, :, 3] - pos[:, :, 2] - width = pos[:, :, 1] - pos[:, :, 0] - area = height * width - return area - -def get_relative_distance(pos): - """ - Args - pos: [B, N, 4] - (x1, x2, y1, y2) - - Return - out : [B, N, N, 4] - """ - # B, N = pos.size()[:-1] - - # [B, N, N, 4] - relative_distance = pos.unsqueeze(1) - pos.unsqueeze(2) - - return relative_distance - - -class LossMeter(object): - def __init__(self, maxlen=100): - """Computes and stores the running average""" - self.vals = collections.deque([], maxlen=maxlen) - - def __len__(self): - return len(self.vals) - - def update(self, new_val): - self.vals.append(new_val) - - @property - def val(self): - return sum(self.vals) / len(self.vals) - - def __repr__(self): - return str(self.val) - - -def count_parameters(model): - return sum(p.numel() for p in model.parameters() if p.requires_grad) - - -def load_state_dict(state_dict_path, loc='cpu'): - state_dict = torch.load(state_dict_path, map_location=loc) - # Change Multi GPU to single GPU - original_keys = list(state_dict.keys()) - for key in original_keys: - if key.startswith("module."): - new_key = key[len("module."):] - state_dict[new_key] = state_dict.pop(key) - return state_dict - - -def set_global_logging_level(level=logging.ERROR, prefices=[""]): - """ - Override logging levels of different modules based on their name as a prefix. - It needs to be invoked after the modules have been loaded so that their loggers have been initialized. - - Args: - - level: desired level. e.g. logging.INFO. Optional. Default is logging.ERROR - - prefices: list of one or more str prefices to match (e.g. ["transformers", "torch"]). Optional. - Default is `[""]` to match all active loggers. - The match is a case-sensitive `module_name.startswith(prefix)` - """ - prefix_re = re.compile(fr'^(?:{ "|".join(prefices) })') - for name in logging.root.manager.loggerDict: - if re.match(prefix_re, name): - logging.getLogger(name).setLevel(level) - - -def get_iou(anchors, gt_boxes): - """ - anchors: (N, 4) torch floattensor - gt_boxes: (K, 4) torch floattensor - overlaps: (N, K) ndarray of overlap between boxes and query_boxes - """ - N = anchors.size(0) - - if gt_boxes.size() == (4,): - gt_boxes = gt_boxes.view(1, 4) - K = gt_boxes.size(0) - - gt_boxes_area = ( - (gt_boxes[:, 2] - gt_boxes[:, 0] + 1) * - (gt_boxes[:, 3] - gt_boxes[:, 1] + 1) - ).view(1, K) - - anchors_area = ( - (anchors[:, 2] - anchors[:, 0] + 1) * - (anchors[:, 3] - anchors[:, 1] + 1) - ).view(N, 1) - - boxes = anchors.view(N, 1, 4).expand(N, K, 4) - query_boxes = gt_boxes.view(1, K, 4).expand(N, K, 4) - - iw = ( - torch.min(boxes[:, :, 2], query_boxes[:, :, 2]) - - torch.max(boxes[:, :, 0], query_boxes[:, :, 0]) - + 1 - ) - iw[iw < 0] = 0 - - ih = ( - torch.min(boxes[:, :, 3], query_boxes[:, :, 3]) - - torch.max(boxes[:, :, 1], query_boxes[:, :, 1]) - + 1 - ) - ih[ih < 0] = 0 - - ua = anchors_area + gt_boxes_area - (iw * ih) - overlaps = iw * ih / ua - - return overlaps - - -def xywh_to_xyxy(boxes): - """Convert [x y w h] box format to [x1 y1 x2 y2] format.""" - return np.hstack((boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1)) diff --git a/spaces/NMEX/rvc-hoyogame-v2/lib/infer_pack/modules/F0Predictor/F0Predictor.py b/spaces/NMEX/rvc-hoyogame-v2/lib/infer_pack/modules/F0Predictor/F0Predictor.py deleted file mode 100644 index f56e49e7f0e6eab3babf0711cae2933371b9f9cc..0000000000000000000000000000000000000000 --- a/spaces/NMEX/rvc-hoyogame-v2/lib/infer_pack/modules/F0Predictor/F0Predictor.py +++ /dev/null @@ -1,16 +0,0 @@ -class F0Predictor(object): - def compute_f0(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length] - """ - pass - - def compute_f0_uv(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length],uv:[signal_length//hop_length] - """ - pass diff --git a/spaces/NiuTaipu/moe-tts-test01/commons.py b/spaces/NiuTaipu/moe-tts-test01/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/NiuTaipu/moe-tts-test01/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/Nixtla/transfer-learning-time-series/app.py b/spaces/Nixtla/transfer-learning-time-series/app.py deleted file mode 100644 index f42094d9b10ac25d0bd0a1ff2b3e59246cb4467c..0000000000000000000000000000000000000000 --- a/spaces/Nixtla/transfer-learning-time-series/app.py +++ /dev/null @@ -1,374 +0,0 @@ -from time import time - -import numpy as np -import pandas as pd -import plotly.express as px -import plotly.graph_objects as go -import streamlit as st -from datasetsforecast.losses import rmse, mae, smape, mse, mape -from st_aggrid import AgGrid - -from src.nf import MODELS, forecast_pretrained_model -from src.model_descriptions import model_cards - -DATASETS = { - "Electricity (Ercot COAST)": "https://raw.githubusercontent.com/Nixtla/transfer-learning-time-series/main/datasets/ercot_COAST.csv", - #"Electriciy (ERCOT, multiple markets)": "https://raw.githubusercontent.com/Nixtla/transfer-learning-time-series/main/datasets/ercot_multiple_ts.csv", - "Web Traffic (Peyton Manning)": "https://raw.githubusercontent.com/Nixtla/transfer-learning-time-series/main/datasets/peyton_manning.csv", - "Demand (AirPassengers)": "https://raw.githubusercontent.com/Nixtla/transfer-learning-time-series/main/datasets/air_passengers.csv", - "Finance (Exchange USD-EUR)": "https://raw.githubusercontent.com/Nixtla/transfer-learning-time-series/main/datasets/usdeur.csv", -} - - -@st.cache_data -def convert_df(df): - # IMPORTANT: Cache the conversion to prevent computation on every rerun - return df.to_csv(index=False).encode("utf-8") - - -def plot(df, uid, df_forecast, model): - figs = [] - figs += [ - go.Scatter( - x=df["ds"], - y=df["y"], - mode="lines", - marker=dict(color="#236796"), - legendrank=1, - name=uid, - ), - ] - if df_forecast is not None: - ds_f = df_forecast["ds"].to_list() - lo = df_forecast["forecast_lo_90"].to_list() - hi = df_forecast["forecast_hi_90"].to_list() - figs += [ - go.Scatter( - x=ds_f + ds_f[::-1], - y=hi + lo[::-1], - fill="toself", - fillcolor="#E7C4C0", - mode="lines", - line=dict(color="#E7C4C0"), - name="Prediction Intervals (90%)", - legendrank=5, - opacity=0.5, - hoverinfo="skip", - ), - go.Scatter( - x=ds_f, - y=df_forecast["forecast"], - mode="lines", - legendrank=4, - marker=dict(color="#E7C4C0"), - name=f"Forecast {uid}", - ), - ] - fig = go.Figure(figs) - fig.update_layout( - {"plot_bgcolor": "rgba(0, 0, 0, 0)", "paper_bgcolor": "rgba(0, 0, 0, 0)"} - ) - fig.update_layout( - title=f"Forecasts for {uid} using Transfer Learning (from {model})", - legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1), - margin=dict(l=20, b=20), - xaxis=dict(rangeslider=dict(visible=True)), - ) - initial_range = [df.tail(200)["ds"].iloc[0], ds_f[-1]] - fig["layout"]["xaxis"].update(range=initial_range) - return fig - - -def st_transfer_learning(): - st.set_page_config( - page_title="Time Series Visualization", - page_icon="🔮", - layout="wide", - initial_sidebar_state="expanded", - ) - - st.title( - "Transfer Learning: Revolutionizing Time Series by Nixtla" - ) - st.write( - "", unsafe_allow_html=True - ) - - intro = """ - The success of startups like Open AI and Stability highlights the potential for transfer learning (TL) techniques to have a similar impact on the field of time series forecasting. - - TL can achieve lightning-fast predictions with a fraction of the computational cost by pre-training a flexible model on a large dataset and then using it on another dataset with little to no additional training. - - In this live demo, you can use pre-trained models by Nixtla (trained on the M4 dataset) to predict your own datasets. You can also see how the models perform on unseen example datasets. - """ - st.write(intro) - - required_cols = ["ds", "y"] - - with st.sidebar.expander("Dataset", expanded=False): - data_selection = st.selectbox("Select example dataset", DATASETS.keys()) - data_url = DATASETS[data_selection] - url_json = st.text_input("Data (you can pass your own url here)", data_url) - st.write( - "You can also upload a CSV file like [this one](https://github.com/Nixtla/transfer-learning-time-series/blob/main/datasets/air_passengers.csv)." - ) - - uploaded_file = st.file_uploader("Upload CSV") - with st.form("Data"): - - if uploaded_file is not None: - df = pd.read_csv(uploaded_file) - cols = df.columns - timestamp_col = st.selectbox("Timestamp column", options=cols) - value_col = st.selectbox("Value column", options=cols) - else: - timestamp_col = st.text_input("Timestamp column", value="timestamp") - value_col = st.text_input("Value column", value="value") - st.write("You must press Submit each time you want to forecast.") - submitted = st.form_submit_button("Submit") - if submitted: - if uploaded_file is None: - st.write("Please provide a dataframe.") - if url_json.endswith("json"): - df = pd.read_json(url_json) - else: - df = pd.read_csv(url_json) - df = df.rename( - columns=dict(zip([timestamp_col, value_col], required_cols)) - ) - else: - # df = pd.read_csv(uploaded_file) - df = df.rename( - columns=dict(zip([timestamp_col, value_col], required_cols)) - ) - else: - if url_json.endswith("json"): - df = pd.read_json(url_json) - else: - df = pd.read_csv(url_json) - cols = df.columns - if "unique_id" in cols: - cols = cols[-2:] - df = df.rename(columns=dict(zip(cols, required_cols))) - - if "unique_id" not in df: - df.insert(0, "unique_id", "ts_0") - - df["ds"] = pd.to_datetime(df["ds"]) - df = df.sort_values(["unique_id", "ds"]) - - with st.sidebar: - st.write("Define the pretrained model you want to use to forecast your data") - model_name = st.selectbox("Select your model", tuple(MODELS.keys())) - model_file = MODELS[model_name]["model"] - st.write("Choose how many steps you want to forecast") - fh = st.number_input("Forecast horizon", value=18) - st.write( - "Choose for how many steps the pretrained model will be updated using your data (use 0 for fast computation)" - ) - max_steps = st.number_input("N-shot inference", value=0) - - # tabs - tab_fcst, tab_cv, tab_docs, tab_nixtla = st.tabs( - [ - "📈 Forecast", - "🔎 Cross Validation", - "📚 Documentation", - "🔮 Nixtlaverse", - ] - ) - - uids = df["unique_id"].unique() - fcst_cols = ["forecast_lo_90", "forecast", "forecast_hi_90"] - - with tab_fcst: - uid = uids[0]#st.selectbox("Dataset", options=uids) - col1, col2 = st.columns([2, 4]) - with col1: - tab_insample, tab_forecast = st.tabs( - ["Modify input data", "Modify forecasts"] - ) - with tab_insample: - df_grid = df.query("unique_id == @uid").drop(columns="unique_id") - grid_table = AgGrid( - df_grid, - editable=True, - theme="streamlit", - fit_columns_on_grid_load=True, - height=360, - ) - df.loc[df["unique_id"] == uid, "y"] = ( - grid_table["data"].sort_values("ds")["y"].values - ) - # forecast code - init = time() - df_forecast = forecast_pretrained_model(df, model_file, fh, max_steps) - end = time() - df_forecast = df_forecast.rename( - columns=dict(zip(["y_5", "y_50", "y_95"], fcst_cols)) - ) - with tab_forecast: - df_fcst_grid = df_forecast.query("unique_id == @uid").filter( - ["ds", "forecast"] - ) - grid_fcst_table = AgGrid( - df_fcst_grid, - editable=True, - theme="streamlit", - fit_columns_on_grid_load=True, - height=360, - ) - changes = ( - df_forecast.query("unique_id == @uid")["forecast"].values - - grid_fcst_table["data"].sort_values("ds")["forecast"].values - ) - for col in fcst_cols: - df_forecast.loc[df_forecast["unique_id"] == uid, col] = ( - df_forecast.loc[df_forecast["unique_id"] == uid, col] - changes - ) - with col2: - st.plotly_chart( - plot( - df.query("unique_id == @uid"), - uid, - df_forecast.query("unique_id == @uid"), - model_name, - ), - use_container_width=True, - ) - st.success(f'Done! Approximate inference time CPU: {0.7*(end-init):.2f} seconds.') - - with tab_cv: - col_uid, col_n_windows = st.columns(2) - uid = uids[0] - #with col_uid: - # uid = st.selectbox("Time series to analyse", options=uids, key="uid_cv") - with col_n_windows: - n_windows = st.number_input("Cross validation windows", value=1) - df_forecast = [] - for i_window in range(n_windows, 0, -1): - test = df.groupby("unique_id").tail(i_window * fh) - df_forecast_w = forecast_pretrained_model( - df.drop(test.index), model_file, fh, max_steps - ) - df_forecast_w = df_forecast_w.rename( - columns=dict(zip(["y_5", "y_50", "y_95"], fcst_cols)) - ) - df_forecast_w.insert(2, "window", i_window) - df_forecast.append(df_forecast_w) - df_forecast = pd.concat(df_forecast) - df_forecast["ds"] = pd.to_datetime(df_forecast["ds"]) - df_forecast = df_forecast.merge(df, how="left", on=["unique_id", "ds"]) - metrics = [mae, mape, rmse, smape] - evaluation = df_forecast.groupby(["unique_id", "window"]).apply( - lambda df: [f'{fn(df["y"].values, df["forecast"]):.2f}' for fn in metrics] - ) - evaluation = evaluation.rename("eval").reset_index() - evaluation["eval"] = evaluation["eval"].str.join(",") - evaluation[["MAE", "MAPE", "RMSE", "sMAPE"]] = evaluation["eval"].str.split( - ",", expand=True - ) - col_eval, col_plot = st.columns([2, 4]) - with col_eval: - st.write("Evaluation metrics for each cross validation window") - st.dataframe( - evaluation.query("unique_id == @uid") - .drop(columns=["unique_id", "eval"]) - .set_index("window") - ) - with col_plot: - st.plotly_chart( - plot( - df.query("unique_id == @uid"), - uid, - df_forecast.query("unique_id == @uid").drop(columns="y"), - model_name, - ), - use_container_width=True, - ) - with tab_docs: - tab_transfer, tab_desc, tab_ref = st.tabs( - [ - "🚀 Transfer Learning", - "🔎 Description of the model", - "📚 References", - ] - ) - - with tab_desc: - model_card_name = MODELS[model_name]["card"] - st.subheader("Abstract") - st.write(f"""{model_cards[model_card_name]['Abstract']}""") - st.subheader("Intended use") - st.write(f"""{model_cards[model_card_name]['Intended use']}""") - st.subheader("Secondary use") - st.write(f"""{model_cards[model_card_name]['Secondary use']}""") - st.subheader("Limitations") - st.write(f"""{model_cards[model_card_name]['Limitations']}""") - st.subheader("Training data") - st.write(f"""{model_cards[model_card_name]['Training data']}""") - st.subheader("BibTex/Citation Info") - st.code(f"""{model_cards[model_card_name]['Citation Info']}""") - - with tab_transfer: - transfer_text = """ - Transfer learning refers to the process of pre-training a flexible model on a large dataset and using it later on other data with little to no training. It is one of the most outstanding 🚀 achievements in Machine Learning 🧠 and has many practical applications. - - For time series forecasting, the technique allows you to get lightning-fast predictions ⚡ bypassing the tradeoff between accuracy and speed. - - [This notebook](https://colab.research.google.com/drive/1uFCO2UBpH-5l2fk3KmxfU0oupsOC6v2n?authuser=0&pli=1#cell-5=) shows how to generate a pre-trained model and store it in a checkpoint to make it available for public use to forecast new time series never seen by the model. - **You can contribute with your pre-trained models by following [this Notebook](https://github.com/Nixtla/transfer-learning-time-series/blob/main/nbs/Transfer_Learning.ipynb) and sending us an email at federico[at]nixtla.io** - - You can also take a look at list of pretrained models here. Currently we have this ones avaiable in our [API](https://docs.nixtla.io/reference/neural_transfer_neural_transfer_post) or [Demo](http://nixtla.io/transfer-learning/). You can also download the `.ckpt`: - - [Pretrained N-HiTS M4 Hourly](https://nixtla-public.s3.amazonaws.com/transfer/pretrained_models/nhits_m4_hourly.ckpt) - - [Pretrained N-HiTS M4 Hourly (Tiny)](https://nixtla-public.s3.amazonaws.com/transfer/pretrained_models/nhits_m4_hourly_tiny.ckpt) - - [Pretrained N-HiTS M4 Daily](https://nixtla-public.s3.amazonaws.com/transfer/pretrained_models/nhits_m4_daily.ckpt) - - [Pretrained N-HiTS M4 Monthly](https://nixtla-public.s3.amazonaws.com/transfer/pretrained_models/nhits_m4_monthly.ckpt) - - [Pretrained N-HiTS M4 Yearly](https://nixtla-public.s3.amazonaws.com/transfer/pretrained_models/nhits_m4_yearly.ckpt) - - [Pretrained N-BEATS M4 Hourly](https://nixtla-public.s3.amazonaws.com/transfer/pretrained_models/nbeats_m4_hourly.ckpt) - - [Pretrained N-BEATS M4 Daily](https://nixtla-public.s3.amazonaws.com/transfer/pretrained_models/nbeats_m4_daily.ckpt) - - [Pretrained N-BEATS M4 Weekly](https://nixtla-public.s3.amazonaws.com/transfer/pretrained_models/nbeats_m4_weekly.ckpt) - - [Pretrained N-BEATS M4 Monthly](https://nixtla-public.s3.amazonaws.com/transfer/pretrained_models/nbeats_m4_monthly.ckpt) - - [Pretrained N-BEATS M4 Yearly](https://nixtla-public.s3.amazonaws.com/transfer/pretrained_models/nbeats_m4_yearly.ckpt) - """ - st.write(transfer_text) - - with tab_ref: - ref_text = """ - If you are interested in the transfer learning literature applied to time series forecasting, take a look at these papers: - - [Meta-learning framework with applications to zero-shot time-series forecasting](https://arxiv.org/abs/2002.02887) - - [N-HiTS: Neural Hierarchical Interpolation for Time Series Forecasting](https://arxiv.org/abs/2201.12886) - """ - st.write(ref_text) - - with tab_nixtla: - nixtla_text = """ - Nixtla is a startup that is building forecasting software for Data Scientists and Devs. - - We have been developing different open source libraries for machine learning, statistical and deep learning forecasting. - - In our [GitHub repo](https://github.com/Nixtla), you can find the projects that support this APP. - """ - st.write(nixtla_text) - st.image( - "https://files.readme.io/168cdb2-Screen_Shot_2022-09-30_at_10.40.09.png", - width=800, - ) - - with st.sidebar: - st.download_button( - label="Download historical data as CSV", - data=convert_df(df), - file_name="history.csv", - mime="text/csv", - ) - st.download_button( - label="Download forecasts as CSV", - data=convert_df(df_forecast), - file_name="forecasts.csv", - mime="text/csv", - ) - - -if __name__ == "__main__": - st_transfer_learning() diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/adadelta.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/adadelta.py deleted file mode 100644 index f1a21549770f0904a6a40a42ff7eb52811f1bfbe..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/adadelta.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch.optim - -from . import LegacyFairseqOptimizer, register_optimizer - - -@register_optimizer("adadelta") -class Adadelta(LegacyFairseqOptimizer): - def __init__(self, args, params): - super().__init__(args) - self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config) - - @staticmethod - def add_args(parser): - """Add optimizer-specific arguments to the parser.""" - # fmt: off - parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO', - help='coefficient used for computing a running average of squared gradients') - parser.add_argument('--adadelta-eps', type=float, default=1e-6, metavar='EPS', - help='term added to the denominator to improve numerical stability') - parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', - help='weight decay') - parser.add_argument('--anneal-eps', action='store_true', help='flag to anneal eps') - # fmt: on - - @property - def optimizer_config(self): - """ - Return a kwarg dictionary that will be used to override optimizer - args stored in checkpoints. This allows us to load a checkpoint and - resume training using a different set of optimizer args, e.g., with a - different learning rate. - """ - return { - "lr": self.args.lr[0], - "rho": self.args.adadelta_rho, - "eps": self.args.adadelta_eps, - "weight_decay": self.args.weight_decay, - } - - @property - def supports_flat_params(self): - return True diff --git a/spaces/OFA-Sys/OFA-Image_Caption/models/ofa/__init__.py b/spaces/OFA-Sys/OFA-Image_Caption/models/ofa/__init__.py deleted file mode 100644 index 5ca74d790a95a2b14d3fbb0cf9f0a9959416d305..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/models/ofa/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .ofa import OFAModel, ofa_base_architecture, ofa_large_architecture, ofa_huge_architecture \ No newline at end of file diff --git a/spaces/OmarSamehSaid/Text-Summerization/app.py b/spaces/OmarSamehSaid/Text-Summerization/app.py deleted file mode 100644 index 95ab82b4d6ae502ff2691317b7345307565dc14d..0000000000000000000000000000000000000000 --- a/spaces/OmarSamehSaid/Text-Summerization/app.py +++ /dev/null @@ -1,12 +0,0 @@ -from transformers import pipeline -import gradio as gr - -model = pipeline("summarization") - -def predict(prompt): - summary = model(prompt)[0]["summary_text"] - return summary - -textbox = gr.Textbox(placeholder="Enter text to summarize", lines=4) -iface = gr.Interface(fn=predict, inputs=textbox, outputs="text") -iface.launch() \ No newline at end of file diff --git a/spaces/OpenMotionLab/MotionGPT/pyrender/README.md b/spaces/OpenMotionLab/MotionGPT/pyrender/README.md deleted file mode 100644 index ae88ed1c5e78f247e38291ed83cf4c81230bf976..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/pyrender/README.md +++ /dev/null @@ -1,92 +0,0 @@ -# Pyrender - -[![Build Status](https://travis-ci.org/mmatl/pyrender.svg?branch=master)](https://travis-ci.org/mmatl/pyrender) -[![Documentation Status](https://readthedocs.org/projects/pyrender/badge/?version=latest)](https://pyrender.readthedocs.io/en/latest/?badge=latest) -[![Coverage Status](https://coveralls.io/repos/github/mmatl/pyrender/badge.svg?branch=master)](https://coveralls.io/github/mmatl/pyrender?branch=master) -[![PyPI version](https://badge.fury.io/py/pyrender.svg)](https://badge.fury.io/py/pyrender) -[![Downloads](https://pepy.tech/badge/pyrender)](https://pepy.tech/project/pyrender) - -Pyrender is a pure Python (2.7, 3.4, 3.5, 3.6) library for physically-based -rendering and visualization. -It is designed to meet the [glTF 2.0 specification from Khronos](https://www.khronos.org/gltf/). - -Pyrender is lightweight, easy to install, and simple to use. -It comes packaged with both an intuitive scene viewer and a headache-free -offscreen renderer with support for GPU-accelerated rendering on headless -servers, which makes it perfect for machine learning applications. - -Extensive documentation, including a quickstart guide, is provided [here](https://pyrender.readthedocs.io/en/latest/). - -For a minimal working example of GPU-accelerated offscreen rendering using EGL, -check out the [EGL Google CoLab Notebook](https://colab.research.google.com/drive/1pcndwqeY8vker3bLKQNJKr3B-7-SYenE?usp=sharing). - - -

      - GIF of Viewer - Damaged Helmet -

      - -## Installation -You can install pyrender directly from pip. - -```bash -pip install pyrender -``` - -## Features - -Despite being lightweight, pyrender has lots of features, including: - -* Simple interoperation with the amazing [trimesh](https://github.com/mikedh/trimesh) project, -which enables out-of-the-box support for dozens of mesh types, including OBJ, -STL, DAE, OFF, PLY, and GLB. -* An easy-to-use scene viewer with support for animation, showing face and vertex -normals, toggling lighting conditions, and saving images and GIFs. -* An offscreen rendering module that supports OSMesa and EGL backends. -* Shadow mapping for directional and spot lights. -* Metallic-roughness materials for physically-based rendering, including several -types of texture and normal mapping. -* Transparency. -* Depth and color image generation. - -## Sample Usage - -For sample usage, check out the [quickstart -guide](https://pyrender.readthedocs.io/en/latest/examples/index.html) or one of -the Google CoLab Notebooks: - -* [EGL Google CoLab Notebook](https://colab.research.google.com/drive/1pcndwqeY8vker3bLKQNJKr3B-7-SYenE?usp=sharing) - -## Viewer Keyboard and Mouse Controls - -When using the viewer, the basic controls for moving about the scene are as follows: - -* To rotate the camera about the center of the scene, hold the left mouse button and drag the cursor. -* To rotate the camera about its viewing axis, hold `CTRL` left mouse button and drag the cursor. -* To pan the camera, do one of the following: - * Hold `SHIFT`, then hold the left mouse button and drag the cursor. - * Hold the middle mouse button and drag the cursor. -* To zoom the camera in or out, do one of the following: - * Scroll the mouse wheel. - * Hold the right mouse button and drag the cursor. - -The available keyboard commands are as follows: - -* `a`: Toggles rotational animation mode. -* `c`: Toggles backface culling. -* `f`: Toggles fullscreen mode. -* `h`: Toggles shadow rendering. -* `i`: Toggles axis display mode (no axes, world axis, mesh axes, all axes). -* `l`: Toggles lighting mode (scene lighting, Raymond lighting, or direct lighting). -* `m`: Toggles face normal visualization. -* `n`: Toggles vertex normal visualization. -* `o`: Toggles orthographic camera mode. -* `q`: Quits the viewer. -* `r`: Starts recording a GIF, and pressing again stops recording and opens a file dialog. -* `s`: Opens a file dialog to save the current view as an image. -* `w`: Toggles wireframe mode (scene default, flip wireframes, all wireframe, or all solid). -* `z`: Resets the camera to the default view. - -As a note, displaying shadows significantly slows down rendering, so if you're -experiencing low framerates, just kill shadows or reduce the number of lights in -your scene. diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/runner/default_constructor.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/runner/default_constructor.py deleted file mode 100644 index 3f1f5b44168768dfda3947393a63a6cf9cf50b41..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/runner/default_constructor.py +++ /dev/null @@ -1,44 +0,0 @@ -from .builder import RUNNER_BUILDERS, RUNNERS - - -@RUNNER_BUILDERS.register_module() -class DefaultRunnerConstructor: - """Default constructor for runners. - - Custom existing `Runner` like `EpocBasedRunner` though `RunnerConstructor`. - For example, We can inject some new properties and functions for `Runner`. - - Example: - >>> from annotator.uniformer.mmcv.runner import RUNNER_BUILDERS, build_runner - >>> # Define a new RunnerReconstructor - >>> @RUNNER_BUILDERS.register_module() - >>> class MyRunnerConstructor: - ... def __init__(self, runner_cfg, default_args=None): - ... if not isinstance(runner_cfg, dict): - ... raise TypeError('runner_cfg should be a dict', - ... f'but got {type(runner_cfg)}') - ... self.runner_cfg = runner_cfg - ... self.default_args = default_args - ... - ... def __call__(self): - ... runner = RUNNERS.build(self.runner_cfg, - ... default_args=self.default_args) - ... # Add new properties for existing runner - ... runner.my_name = 'my_runner' - ... runner.my_function = lambda self: print(self.my_name) - ... ... - >>> # build your runner - >>> runner_cfg = dict(type='EpochBasedRunner', max_epochs=40, - ... constructor='MyRunnerConstructor') - >>> runner = build_runner(runner_cfg) - """ - - def __init__(self, runner_cfg, default_args=None): - if not isinstance(runner_cfg, dict): - raise TypeError('runner_cfg should be a dict', - f'but got {type(runner_cfg)}') - self.runner_cfg = runner_cfg - self.default_args = default_args - - def __call__(self): - return RUNNERS.build(self.runner_cfg, default_args=self.default_args) diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/datasets/hrf.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/datasets/hrf.py deleted file mode 100644 index 923203b51377f9344277fc561803d7a78bd2c684..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/datasets/hrf.py +++ /dev/null @@ -1,27 +0,0 @@ -import os.path as osp - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class HRFDataset(CustomDataset): - """HRF dataset. - - In segmentation map annotation for HRF, 0 stands for background, which is - included in 2 categories. ``reduce_zero_label`` is fixed to False. The - ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to - '.png'. - """ - - CLASSES = ('background', 'vessel') - - PALETTE = [[120, 120, 120], [6, 230, 230]] - - def __init__(self, **kwargs): - super(HRFDataset, self).__init__( - img_suffix='.png', - seg_map_suffix='.png', - reduce_zero_label=False, - **kwargs) - assert osp.exists(self.img_dir) diff --git a/spaces/PSLD/PSLD/stable-diffusion/run/inverse_bip.sh b/spaces/PSLD/PSLD/stable-diffusion/run/inverse_bip.sh deleted file mode 100644 index 47360fe2093d0d2e7e71da3598fc49974c2110ec..0000000000000000000000000000000000000000 --- a/spaces/PSLD/PSLD/stable-diffusion/run/inverse_bip.sh +++ /dev/null @@ -1,11 +0,0 @@ -export CUDA_VISIBLE_DEVICES='2' -python scripts/inverse.py \ - --file_id='00478.png' \ - --task_config='configs/box_inpainting_config_psld.yaml' \ - --inpainting=1 \ - --general_inverse=0 \ - --gamma=1e-1 \ - --omega=1 \ - --outdir='outputs/psld-samples-bip' -# above gamma=1e-2 and omega=1e-1 works better for FFHQ samples -# tune for ImageNet, maybe gamma = 1e-1, omega = 1. TODO: Jun 22, 2023 diff --git a/spaces/PaulHilders/IEAI_CLIPGroundingExplainability/app.py b/spaces/PaulHilders/IEAI_CLIPGroundingExplainability/app.py deleted file mode 100644 index e12dc268e3f0e77fd97dd2816cc868898b3eca60..0000000000000000000000000000000000000000 --- a/spaces/PaulHilders/IEAI_CLIPGroundingExplainability/app.py +++ /dev/null @@ -1,158 +0,0 @@ -import sys -import gradio as gr - -# sys.path.append("../") -sys.path.append("CLIP_explainability/Transformer-MM-Explainability/") - -import torch -import CLIP.clip as clip - -import spacy -from PIL import Image, ImageFont, ImageDraw, ImageOps - -import os -os.system('python -m spacy download en_core_web_sm') - - -from clip_grounding.utils.image import pad_to_square -from clip_grounding.datasets.png import ( - overlay_relevance_map_on_image, -) -from CLIP_explainability.utils import interpret, show_img_heatmap, show_heatmap_on_text - -clip.clip._MODELS = { - "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", - "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", -} - -device = "cuda" if torch.cuda.is_available() else "cpu" -model, preprocess = clip.load("ViT-B/32", device=device, jit=False) - -# nlp = spacy.load("en_core_web_sm") -import en_core_web_sm -nlp = en_core_web_sm.load() - -# Gradio Section: -def run_demo(image, text): - orig_image = pad_to_square(image) - img = preprocess(orig_image).unsqueeze(0).to(device) - text_input = clip.tokenize([text]).to(device) - - R_text, R_image = interpret(model=model, image=img, texts=text_input, device=device) - - image_relevance = show_img_heatmap(R_image[0], img, orig_image=orig_image, device=device, show=False) - overlapped = overlay_relevance_map_on_image(image, image_relevance) - - text_scores, text_tokens_decoded = show_heatmap_on_text(text, text_input, R_text[0], show=False) - - highlighted_text = [] - for i, token in enumerate(text_tokens_decoded): - highlighted_text.append((str(token), float(text_scores[i]))) - - return overlapped, highlighted_text - - -# Default demo: -input_img = gr.inputs.Image(type='pil', label="Original Image") -input_txt = "text" -inputs = [input_img, input_txt] - -outputs = [gr.inputs.Image(type='pil', label="Output Image"), "highlight"] - - -description = """A demonstration based on the Generic Attention-model Explainability method for Interpreting Bi-Modal - Transformers by Chefer et al. (2021): https://github.com/hila-chefer/Transformer-MM-Explainability. -

      - This demo shows attributions scores on both the image and the text input when presenting CLIP with a - pair. Attributions are computed as Gradient-weighted Attention Rollout (Chefer et al., - 2021), and can be thought of as an estimate of the effective attention CLIP pays to its input when - computing a multimodal representation. Warning: Note that attribution - methods such as the one from this demo can only give an estimate of the real underlying behavior - of the model.""" - -iface = gr.Interface(fn=run_demo, - inputs=inputs, - outputs=outputs, - title="CLIP Grounding Explainability", - description=description, - examples=[["example_images/London.png", "London Eye"], - ["example_images/London.png", "Big Ben"], - ["example_images/harrypotter.png", "Harry"], - ["example_images/harrypotter.png", "Hermione"], - ["example_images/harrypotter.png", "Ron"], - ["example_images/Amsterdam.png", "Amsterdam canal"], - ["example_images/Amsterdam.png", "Old buildings"], - ["example_images/Amsterdam.png", "Pink flowers"], - ["example_images/dogs_on_bed.png", "Two dogs"], - ["example_images/dogs_on_bed.png", "Book"], - ["example_images/dogs_on_bed.png", "Cat"]]) - -# NER demo: -def add_label_to_img(img, label, add_entity_label=True): - img = ImageOps.expand(img, border=45, fill=(255,255,255)) - draw = ImageDraw.Draw(img) - font = ImageFont.truetype("arial.ttf", 24) - if add_entity_label: - draw.text((5,5), f"Entity: {str(label)}" , align="center", fill=(0, 0, 0), font=font) - else: - draw.text((5,5), str(label), align="center", fill=(0, 0, 0), font=font) - - return img - -def NER_demo(image, text): - # Apply NER to extract named entities, and run the explainability method - # for each named entity. - highlighed_entities = [] - for ent in nlp(text).ents: - ent_text = ent.text - ent_label = ent.label_ - highlighed_entities.append((ent_text, ent_label)) - - # As the default image, we run the default demo on the input image and text: - overlapped, highlighted_text = run_demo(image, text) - - # Then, we run the demo for each of the named entities: - gallery_images = [add_label_to_img(overlapped, "Full explanation", add_entity_label=False)] - for ent_text, ent_label in highlighed_entities: - overlapped_ent, highlighted_text_ent = run_demo(image, ent_text) - overlapped_ent_labelled = add_label_to_img(overlapped_ent, f"{str(ent_text)} ({str(ent_label)})") - - gallery_images.append(overlapped_ent_labelled) - - return highlighed_entities, gallery_images - -input_img_NER = gr.inputs.Image(type='pil', label="Original Image") -input_txt_NER = "text" -inputs_NER = [input_img_NER, input_txt_NER] - -outputs_NER = ["highlight", gr.Gallery(type='pil', label="NER Entity explanations")] - -description_NER = """Automatically generated CLIP grounding explanations for - named entities, retrieved from the spacy NER model. Warning: Note - that attribution methods such as the one from this demo can only give an estimate of the real - underlying behavior of the model.""" - -iface_NER = gr.Interface(fn=NER_demo, - inputs=inputs_NER, - outputs=outputs_NER, - title="Named Entity Grounding explainability using CLIP", - description=description_NER, - examples=[["example_images/London.png", "In this image we see Big Ben and the London Eye, on both sides of the river Thames."]], - cache_examples=False) - -demo_tabs = gr.TabbedInterface([iface, iface_NER], ["Default", "NER"]) - -with demo_tabs: - gr.Markdown(""" - ### Acknowledgements - This demo was developed for the Interpretability & Explainability in AI course at the University of - Amsterdam. We would like to express our thanks to Jelle Zuidema, Jaap Jumelet, Tom Kersten, Christos - Athanasiadis, Peter Heemskerk, Zhi Zhang, and all the other TAs who helped us during this course. - - --- - ### References - \[1\]: Chefer, H., Gur, S., & Wolf, L. (2021). Generic attention-model explainability for interpreting bi-modal and encoder-decoder transformers.
      - \[2\]: Abnar, S., & Zuidema, W. (2020). Quantifying attention flow in transformers. arXiv preprint arXiv:2005.00928.
      - \[3\]: [https://samiraabnar.github.io/articles/2020-04/attention_flow](https://samiraabnar.github.io/articles/2020-04/attention_flow)
      - """) -demo_tabs.launch(show_error=True) \ No newline at end of file diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/gqa.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/gqa.py deleted file mode 100644 index 03eb6e20e12d5dc2f895c87f5f9e0a5978b00a53..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/gqa.py +++ /dev/null @@ -1,91 +0,0 @@ -import json -from pathlib import Path - -import torch -import torchvision - -from .modulated_coco import ConvertCocoPolysToMask, ModulatedDataset - - -class GQADataset(ModulatedDataset): - pass - - -class GQAQuestionAnswering(torchvision.datasets.CocoDetection): - def __init__(self, img_folder, ann_file, transforms, return_masks, return_tokens, tokenizer, ann_folder): - super(GQAQuestionAnswering, self).__init__(img_folder, ann_file) - self._transforms = transforms - self.prepare = ConvertCocoPolysToMask(return_masks, return_tokens, tokenizer=tokenizer) - with open(ann_folder / "gqa_answer2id.json", "r") as f: - self.answer2id = json.load(f) - with open(ann_folder / "gqa_answer2id_by_type.json", "r") as f: - self.answer2id_by_type = json.load(f) - self.type2id = {"obj": 0, "attr": 1, "rel": 2, "global": 3, "cat": 4} - - def __getitem__(self, idx): - img, target = super(GQAQuestionAnswering, self).__getitem__(idx) - image_id = self.ids[idx] - coco_img = self.coco.loadImgs(image_id)[0] - caption = coco_img["caption"] - dataset_name = coco_img["dataset_name"] - questionId = coco_img["questionId"] - target = {"image_id": image_id, "annotations": target, "caption": caption} - img, target = self.prepare(img, target) - if self._transforms is not None: - img, target = self._transforms(img, target) - target["dataset_name"] = dataset_name - target["questionId"] = questionId - - if coco_img["answer"] not in self.answer2id: - answer = "unknown" - else: - answer = coco_img["answer"] - - target["answer"] = torch.as_tensor(self.answer2id[answer], dtype=torch.long) - target["answer_type"] = torch.as_tensor(self.type2id[coco_img["question_type"]], dtype=torch.long) - - if coco_img["answer"] not in self.answer2id_by_type["answer_attr"]: - answer = "unknown" - else: - answer = coco_img["answer"] - target["answer_attr"] = torch.as_tensor( - self.answer2id_by_type["answer_attr"][answer] if coco_img["question_type"] == "attr" else -100, - dtype=torch.long, - ) - - if coco_img["answer"] not in self.answer2id_by_type["answer_global"]: - answer = "unknown" - else: - answer = coco_img["answer"] - target["answer_global"] = torch.as_tensor( - self.answer2id_by_type["answer_global"][answer] if coco_img["question_type"] == "global" else -100, - dtype=torch.long, - ) - - if coco_img["answer"] not in self.answer2id_by_type["answer_rel"]: - answer = "unknown" - else: - answer = coco_img["answer"] - target["answer_rel"] = torch.as_tensor( - self.answer2id_by_type["answer_rel"][answer] if coco_img["question_type"] == "rel" else -100, - dtype=torch.long, - ) - - if coco_img["answer"] not in self.answer2id_by_type["answer_cat"]: - answer = "unknown" - else: - answer = coco_img["answer"] - target["answer_cat"] = torch.as_tensor( - self.answer2id_by_type["answer_cat"][answer] if coco_img["question_type"] == "cat" else -100, - dtype=torch.long, - ) - - if coco_img["answer"] not in self.answer2id_by_type["answer_obj"]: - answer = "unknown" - else: - answer = coco_img["answer"] - target["answer_obj"] = torch.as_tensor( - self.answer2id_by_type["answer_obj"][answer] if coco_img["question_type"] == "obj" else -100, - dtype=torch.long, - ) - return img, target diff --git a/spaces/Pluviophile/vits-uma-genshin-honkai/utils.py b/spaces/Pluviophile/vits-uma-genshin-honkai/utils.py deleted file mode 100644 index ee4b01ddfbe8173965371b29f770f3e87615fe71..0000000000000000000000000000000000000000 --- a/spaces/Pluviophile/vits-uma-genshin-honkai/utils.py +++ /dev/null @@ -1,225 +0,0 @@ -import os -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -import librosa -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_audio_to_torch(full_path, target_sampling_rate): - audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True) - return torch.FloatTensor(audio.astype(np.float32)) - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/PulsarAI/huggingface-leaderboard/app.py b/spaces/PulsarAI/huggingface-leaderboard/app.py deleted file mode 100644 index c417a9647396ba6be038e9cb5d72257d4ebe18f1..0000000000000000000000000000000000000000 --- a/spaces/PulsarAI/huggingface-leaderboard/app.py +++ /dev/null @@ -1,585 +0,0 @@ -from openllm import * -import requests -import pandas as pd -from bs4 import BeautifulSoup -from tqdm import tqdm -from huggingface_hub import HfApi, CommitOperationAdd, create_commit -import gradio as gr -import os -import datetime - -api = HfApi() - - -HF_TOKEN = os.getenv('HF_TOKEN') - - -headers_models = ["🔢 Serial Number", "👤 Author Name", "📥 Total Downloads", "👍 Total Likes", "🤖 Number of Models", - "🏆 Best Model On Open LLM Leaderboard", "🥇 Best Rank On Open LLM Leaderboard", - "📊 Average Downloads per Model", "📈 Average Likes per Model", "🚀 Most Downloaded Model", - "📈 Most Download Count", "❤️ Most Liked Model", "👍 Most Like Count", "🔥 Trending Model", - "👑 Best Rank at Trending Models", "🏷️ Type"] - -headers_datasets = ["🔢 Serial Number", "👤 Author Name", "📥 Total Downloads", "👍 Total Likes", "📊 Number of Datasets", - "📊 Average Downloads per Dataset", "📈 Average Likes per Dataset", "🚀 Most Downloaded Dataset", - "📈 Most Download Count", "❤️ Most Liked Dataset", "👍 Most Like Count", "🔥 Trending Dataset", - "👑 Best Rank at Trending Datasets", "🏷️ Type"] - -headers_spaces = ["🔢 Serial Number", "👤 Author Name", "👍 Total Likes", "🚀 Number of Spaces", "📈 Average Likes per Space", - "❤️ Most Liked Space", "👍 Most Like Count", "🔥 Trending Space", "👑 Best Rank at Trending Spaces", - "🏷️ Type"] - - -def apply_headers(df, headers): - tmp = df.copy() - tmp.columns = headers - - return tmp - - -def get_time(): - return datetime.datetime.now().strftime("%d-%m-%Y %H-%M") - - -def upload_datasets(dfs): - - time = get_time() - - operations = [CommitOperationAdd(path_in_repo=f"{time}/models_df.csv", path_or_fileobj=(dfs[0].to_csv()).encode()), - CommitOperationAdd(path_in_repo=f"{time}/datasets_df.csv", path_or_fileobj=(dfs[1].to_csv()).encode()), - CommitOperationAdd(path_in_repo=f"{time}/spaces_df.csv", path_or_fileobj=(dfs[2].to_csv()).encode())] - - return (create_commit(repo_id="PulsarAI/huggingface-leaderboard-history", operations=operations, commit_message=f"Uploading history of {time}", repo_type="dataset", token=HF_TOKEN)) - - -def get_most(df_for_most_function): - download_sorted_df = df_for_most_function.sort_values(by=['downloads'], ascending=False) - most_downloaded = download_sorted_df.iloc[0] - - like_sorted_df = df_for_most_function.sort_values(by=['likes'], ascending=False) - most_liked = like_sorted_df.iloc[0] - - return {"Most Download": {"id": most_downloaded['id'], "downloads": most_downloaded['downloads'], - "likes": most_downloaded['likes']}, - "Most Likes": {"id": most_liked['id'], "downloads": most_liked['downloads'], "likes": most_liked['likes']}} - - -def get_sum(df_for_sum_function): - sum_downloads = sum(df_for_sum_function['downloads'].tolist()) - sum_likes = sum(df_for_sum_function['likes'].tolist()) - - return {"Downloads": sum_downloads, "Likes": sum_likes} - - -def get_openllm_leaderboard(): - try: - data = get_json_format_data() - finished_models = get_datas(data) - df = pd.DataFrame(finished_models) - return df['Model'].tolist() - except Exception as e: # something is wrong about the leaderboard so return empty list - print(e) - return [] - - -def get_ranking(model_list, target_org): - if not model_list: - return "Error on Leaderboard" - for index, model in enumerate(model_list): - if model.split("/")[0].lower() == target_org.lower(): - return [index + 1, model] - return "Not Found" - - -def get_models(which_one): - if which_one == "models": - data = api.list_models() - elif which_one == "datasets": - data = api.list_datasets() - elif which_one == "spaces": - data = api.list_spaces() - - all_list = [] - for i in tqdm(data, desc=f"Scraping {which_one}", position=0, leave=True): - i = i.__dict__ - - id = i["id"].split("/") - if len(id) != 1: - json_format_data = {"author": id[0], "id": "/".join(id), "downloads": i['downloads'], - "likes": i['likes']} if which_one != "spaces" else {"author": id[0], "id": "/".join(id), - "downloads": 0, "likes": i['likes']} - - all_list.append(json_format_data) - return all_list - - - -def search(models_dict, author_name): - return pd.DataFrame(models_dict.get(author_name, [])) - - -def group_models_by_author(all_things): - models_by_author = {} - for model in all_things: - author_name = model['author'] - if author_name not in models_by_author: - models_by_author[author_name] = [] - models_by_author[author_name].append(model) - return models_by_author - - -def make_leaderboard(orgs, users, which_one, data): - data_rows = [] - open_llm_leaderboard = get_openllm_leaderboard() if which_one == "models" else None - - trend = get_trending_list(1, which_one) - hepsi = [orgs, users] - - for index, orgs in enumerate(hepsi): - org_or_user = "Organization" if index == 0 else "User" - for org in tqdm(orgs, desc=f"Proccesing: ({which_one}) ({org_or_user})", position=0, leave=True): - rank = get_ranking_trend(trend, org) - - df = search(data, org) - - if len(df) == 0: - continue - num_things = len(df) - sum_info = get_sum(df) - most_info = get_most(df) - - if which_one == "models": - open_llm_leaderboard_get_org = get_ranking(open_llm_leaderboard, org) - - data_rows.append({ - "Author Name": org, - "Total Downloads": sum_info["Downloads"], - "Total Likes": sum_info["Likes"], - "Number of Models": num_things, - "Best Model On Open LLM Leaderboard": open_llm_leaderboard_get_org[1] if open_llm_leaderboard_get_org != "Not Found" else open_llm_leaderboard_get_org, - "Best Rank On Open LLM Leaderboard": open_llm_leaderboard_get_org[0] if open_llm_leaderboard_get_org != "Not Found" else open_llm_leaderboard_get_org, - "Average Downloads per Model": int(sum_info["Downloads"] / num_things) if num_things != 0 else 0, - "Average Likes per Model": int(sum_info["Likes"] / num_things) if num_things != 0 else 0, - "Most Downloaded Model": most_info["Most Download"]["id"], - "Most Download Count": most_info["Most Download"]["downloads"], - "Most Liked Model": most_info["Most Likes"]["id"], - "Most Like Count": most_info["Most Likes"]["likes"], - "Trending Model": rank['id'], - "Best Rank at Trending Models": rank['rank'], - "Type": org_or_user - }) - elif which_one == "datasets": - - data_rows.append({ - "Author Name": org, - "Total Downloads": sum_info["Downloads"], - "Total Likes": sum_info["Likes"], - "Number of Datasets": num_things, - "Average Downloads per Dataset": int(sum_info["Downloads"] / num_things) if num_things != 0 else 0, - "Average Likes per Dataset": int(sum_info["Likes"] / num_things) if num_things != 0 else 0, - "Most Downloaded Dataset": most_info["Most Download"]["id"], - "Most Download Count": most_info["Most Download"]["downloads"], - "Most Liked Dataset": most_info["Most Likes"]["id"], - "Most Like Count": most_info["Most Likes"]["likes"], - "Trending Dataset": rank['id'], - "Best Rank at Trending Datasets": rank['rank'], - "Type": org_or_user - }) - - elif which_one == "spaces": - - data_rows.append({ - "Author Name": org, - "Total Likes": sum_info["Likes"], - "Number of Spaces": num_things, - "Average Likes per Space": int(sum_info["Likes"] / num_things) if num_things != 0 else 0, - "Most Liked Space": most_info["Most Likes"]["id"], - "Most Like Count": most_info["Most Likes"]["likes"], - "Trending Space": rank['id'], - "Best Rank at Trending Spaces": rank['rank'], - "Type": org_or_user - }) - - leaderboard = pd.DataFrame(data_rows) - temp = ["Total Downloads"] if which_one != "spaces" else ["Total Likes"] - - leaderboard = leaderboard.sort_values(by=temp, ascending=False) - leaderboard.insert(0, "Serial Number", range(1, len(leaderboard) + 1)) - return leaderboard - - -def clickable(x, which_one): - if which_one == "models": - if x != "Not Found": - return f'{x}' - else: - return "Not Found" - else: - if x != "Not Found": - return f'{x}' - return "Not Found" - - -def models_df_to_clickable(df, columns, which_one): - for column in columns: - if column == "Author Name": - df[column] = df[column].apply(lambda x: clickable(x, "models")) - else: - df[column] = df[column].apply(lambda x: clickable(x, which_one)) - return df - - -def get_trending_list(pages, which_one): - trending_list = [] - for i in range(pages): - json_data = requests.get(f"https://huggingface.co/{which_one}-json?p={i}").json() - - for thing in json_data[which_one]: - id = thing["id"] - likes = thing["likes"] - - if which_one != "spaces": - downloads = thing["downloads"] - - trending_list.append({"id": id, "downloads": downloads, "likes": likes}) - else: - trending_list.append({"id": id, "likes": likes}) - - return trending_list - - -def get_ranking_trend(json_data, org_name): - names = [item['id'].split("/")[0] for item in json_data] - models = [item['id'] for item in json_data] - if org_name in names: - temp = names.index(org_name) - return {"id": models[temp], "rank": temp + 1} - else: - return {"id": "Not Found", "rank": "Not Found"} - - - -def fetch_data_from_url(url): - response = requests.get(url) - if response.status_code == 200: - data = response.text.splitlines() - return [line.rstrip("\n") for line in data] - else: - print(f"Failed to fetch data from URL: {url}") - return [] - -user_names_url = "https://huggingface.co/datasets/PulsarAI/user-orgs-huggingface-leaderboard/raw/main/user_names.txt" -org_names_url = "https://huggingface.co/datasets/PulsarAI/user-orgs-huggingface-leaderboard/raw/main/org_names.txt" - -user_names_in_list = fetch_data_from_url(user_names_url) -org_names_in_list = fetch_data_from_url(org_names_url) - -datetime_now = str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) -INTRODUCTION_TEXT = f""" -🎯 The Leaderboard aims to track users and organizations rankings and stats. This space is inspired by the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). - -## Available Dataframes: - -- 🏛️ Models - -- 📊 Datasets - -- 🚀 Spaces - -## Backend - -🛠️ The leaderboard's backend mainly runs on the [Hugging Face Hub API](https://huggingface.co/docs/huggingface_hub/v0.5.1/en/package_reference/hf_api). - -📒 **Note:** In the model's dataframe, there are some columns related to the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). This data is also retrieved through web scraping. - -📒 **Note:** In trending models/datasets/spaces, first 300 models/datasets/spaces is being retrieved from huggingface. - -## 🔍 Searching Organizations and Users - -You can search for organizations and users in the Search tab. In this tab, you can view an author's stats even if they are not at the top of the leaderboard. - -## Filtering Organizations and Users - -🧮 You can filter the dataset to show only Organizations or Users! - -✅ Use checkboxs for this! - -## Last Update - -⌛ This space is last updated in **{datetime_now}**. -""" - - - -def get_avatar(user_name, user): - try: - url = f"https://huggingface.co/{user_name}" - response = requests.get(url) - soup = BeautifulSoup(response.text, "html.parser") - if user: - - avatar = soup.find("img", {"class": "h-32 w-32 overflow-hidden rounded-full shadow-inner lg:h-48 lg:w-48"})['src'] - full = soup.find("span", {"class": "mr-3 leading-6"}).text - return [avatar, full] - - else: - - avatar = soup.find("img", {"class": "mb-2 mr-4 h-12 w-12 flex-none overflow-hidden rounded-lg sm:mb-0 sm:h-20 sm:w-20"})['src'] - full = soup.find("h1", {"class": "mb-2 mr-3 text-2xl font-bold md:mb-0"}).text - return [avatar, full] - except Exception as e: - print(e) - return "Error" - - -def update_table(orgs, users, how_much=400, return_all=False): - dataFrame = models_df - - if not orgs and users: - filtered_df = dataFrame[(dataFrame['Type'] != 'Organization') | (dataFrame['Type'] == 'User')] - - elif orgs and not users: - filtered_df = dataFrame[(dataFrame['Type'] == 'Organization') | (dataFrame['Type'] != 'User')] - - elif orgs and users: - filtered_df = dataFrame[(dataFrame['Type'] == 'Organization') | (dataFrame['Type'] == 'User')] - - else: - return apply_headers(dataFrame.head(0), headers_models) - - if return_all: - return apply_headers(filtered_df, headers_models) - else: - return apply_headers(filtered_df, headers_models).head(how_much) - - -def update_table_datasets(orgs, users, how_much=250, return_all=False): - dataFrame = dataset_df - - if not orgs and users: - filtered_df = dataFrame[(dataFrame['Type'] != 'Organization') | (dataFrame['Type'] == 'User')] - - elif orgs and not users: - filtered_df = dataFrame[(dataFrame['Type'] == 'Organization') | (dataFrame['Type'] != 'User')] - - elif orgs and users: - filtered_df = dataFrame[(dataFrame['Type'] == 'Organization') | (dataFrame['Type'] == 'User')] - - else: - return apply_headers(dataFrame, headers_datasets).head(0) - - if return_all: - return apply_headers(filtered_df, headers_datasets) - else: - return apply_headers(filtered_df, headers_datasets).head(how_much) - - -def update_table_spaces(orgs, users, how_much=200, return_all=False): - dataFrame = spaces_df - - if not orgs and users: - filtered_df = dataFrame[(dataFrame['Type'] != 'Organization') | (dataFrame['Type'] == 'User')] - - elif orgs and not users: - filtered_df = dataFrame[(dataFrame['Type'] == 'Organization') | (dataFrame['Type'] != 'User')] - - elif orgs and users: - filtered_df = dataFrame[(dataFrame['Type'] == 'Organization') | (dataFrame['Type'] == 'User')] - - else: - return apply_headers(dataFrame, headers_spaces).head(0) - - if return_all: - return apply_headers(filtered_df, headers_spaces) - else: - return apply_headers(filtered_df, headers_spaces).head(how_much) - - - -def search_df(author): - sonuc_models, sonuc_datasets, sonuc_spaces =[], [], [] - org_or_user = "User" if author in user_names_in_list else "Org" - - a = get_avatar(author, True if org_or_user=="User" else False) - - if a == "Error": - return "Error happened, maybe author name is not valid." - - # Search in models_df - df = models_df - for index, item in enumerate(df['Author Name'].tolist()): - if f'"https://huggingface.co/{author}"' in item: - sonuc_models = df.iloc[index] - break # Break out of the loop once a match is found - - # Search in dataset_df - df = dataset_df - for index, item in enumerate(df['Author Name'].tolist()): - if f'"https://huggingface.co/{author}"' in item: - sonuc_datasets = df.iloc[index] - break # Break out of the loop once a match is found - - # Search in spaces_df - df = spaces_df - for index, item in enumerate(df['Author Name'].tolist()): - if f'"https://huggingface.co/{author}"' in item: - sonuc_spaces = df.iloc[index] - break # Break out of the loop once a match is found - - - - author_name = sonuc_models['Author Name'] if len(sonuc_models) > 0 else "Not Found" - global_rank = sonuc_models['Serial Number'] if len(sonuc_models) > 0 else "Not Found" - - if len(sonuc_models) > 0: - if org_or_user == "User": - user_rank = filtered_model_users.index(f'{author}') - else: - user_rank = filtered_model_orgs.index(f'{author}') - else: - user_rank = "Not Found" - - global_datasets = sonuc_datasets['Serial Number'] if len(sonuc_datasets) > 0 else "Not Found" - - if len(sonuc_datasets) > 0: - if org_or_user == "User": - user_datasets = filtered_datasets_users.index(f'{author}') - else: - user_datasets = filtered_datasets_orgs.index(f'{author}') - else: - user_datasets = "Not Found" - - - global_spaces = sonuc_spaces['Serial Number'] if len(sonuc_spaces) > 0 else "Not Found" - - if len(sonuc_spaces) > 0: - if org_or_user == "User": - user_spaces = filtered_spaces_users.index(f'{author}') - else: - user_spaces = filtered_spaces_orgs.index(f'{author}') - else: - user_spaces = "Not Found" - - total_model_downloads = sonuc_models['Total Downloads'] if len(sonuc_models) > 0 else "Not Found" - total_model_likes = sonuc_models['Total Likes'] if len(sonuc_models) > 0 else "Not Found" - model_count = sonuc_models['Number of Models'] if len(sonuc_models) > 0 else "Not Found" - total_dataset_downloads = sonuc_datasets['Total Downloads'] if len(sonuc_datasets) > 0 else "Not Found" - total_dataset_likes = sonuc_datasets['Total Likes'] if len(sonuc_datasets) > 0 else "Not Found" - dataset_count = sonuc_datasets['Number of Datasets'] if len(sonuc_datasets) > 0 else "Not Found" - total_space_likes = sonuc_spaces['Total Likes'] if len(sonuc_spaces) > 0 else "Not Found" - space_count = sonuc_spaces['Number of Spaces'] if len(sonuc_spaces) > 0 else "Not Found" - - - - - markdown_text = f''' - -

      {author_name} ({a[1]})

      - - ## 🏆 Ranks - - Global: {global_rank} - - Models in authors category: {user_rank} - - Datasets (global): {global_datasets} - - Datasets in authors category: {user_datasets} - - Spaces (global): {global_spaces} - - Spaces in authors category: {user_spaces} - - ## 🤖 Models - - Total downloads: {total_model_downloads} - - Total Likes: {total_model_likes} - - Model count: {model_count} - - ## 📊 Datasets - - Total downloads: {total_dataset_downloads} - - Total Likes: {total_dataset_likes} - - Dataset count: {dataset_count} - - ## 🚀 Spaces - - Total Likes: {total_space_likes} - - Spaces count: {space_count} - ''' - - return markdown_text - - -with gr.Blocks() as demo: - gr.Markdown("""

      🤗 Huggingface Leaderboard

      """) - gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") - - all_models = get_models("models") - all_datasets = get_models("datasets") - all_spaces = get_models("spaces") - - with gr.Column(min_width=320): - with gr.Box(): - orgs = gr.Checkbox(value=True, label="Show Organizations", interactive=True) - users = gr.Checkbox(value=True, label="Show users", interactive=True) - - with gr.TabItem("🏛️ Models", id=1): - columns_to_convert = ["Author Name", "Best Model On Open LLM Leaderboard", "Most Downloaded Model", - "Most Liked Model", "Trending Model"] - models_df = make_leaderboard(org_names_in_list, user_names_in_list, "models", group_models_by_author(all_models)) - models_df = models_df_to_clickable(models_df, columns_to_convert, "models") - - gr_models = gr.Dataframe(apply_headers(models_df, headers_models).head(400), headers=headers_models, interactive=True, - datatype=["str", "markdown", "str", "str", "str", "markdown", "str", "str", "str", - "markdown", "str", "markdown", "str", "markdown", "str", "str"]) - - with gr.TabItem("📊 Datasets", id=2): - columns_to_convert = ["Author Name", "Most Downloaded Dataset", "Most Liked Dataset", "Trending Dataset"] - dataset_df = make_leaderboard(org_names_in_list, user_names_in_list, "datasets", group_models_by_author(all_datasets)) - dataset_df = models_df_to_clickable(dataset_df, columns_to_convert, "datasets") - - gr_datasets = gr.Dataframe(apply_headers(dataset_df, headers_datasets).head(250), headers=headers_datasets, interactive=False, - datatype=["str", "markdown", "str", "str", "str", "str", "str", "markdown", "str", - "markdown", "str", "markdown", "str", "str"]) - - with gr.TabItem("🚀 Spaces", id=3): - columns_to_convert = ["Author Name", "Most Liked Space", "Trending Space"] - - spaces_df = make_leaderboard(org_names_in_list, user_names_in_list, "spaces", group_models_by_author(all_spaces)) - spaces_df = models_df_to_clickable(spaces_df, columns_to_convert, "spaces") - - gr_spaces = gr.Dataframe(apply_headers(spaces_df, headers_spaces).head(200), headers=headers_spaces, interactive=False, - datatype=["str", "markdown", "str", "str", "str", "markdown", "str", "markdown", "str", - "str"]) - - - with gr.TabItem("🔍 Search", id=4): - with gr.Column(min_width=320): - search_bar = gr.Textbox( - placeholder=" 🔍 Search for your author and press ENTER", - show_label=False) - run_btn = gr.Button("Show stats for author") - yazi = gr.Markdown() - run_btn.click(fn=search_df, inputs=search_bar, outputs=yazi) - search_bar.submit(fn=search_df, inputs=search_bar, outputs=yazi) - - - commit = upload_datasets([models_df, dataset_df, spaces_df]) - print(commit) - - orgs.change(fn=update_table, inputs=[orgs, users], outputs=gr_models) - - orgs.change(fn=update_table_datasets, inputs=[orgs, users], outputs=gr_datasets) - - orgs.change(fn=update_table_spaces, inputs=[orgs, users], outputs=gr_spaces) - - users.change(fn=update_table, inputs=[orgs, users], outputs=gr_models) - - users.change(fn=update_table_datasets, inputs=[orgs, users], outputs=gr_datasets) - - users.change(fn=update_table_spaces, inputs=[orgs, users], outputs=gr_spaces) - - -filtered_model_users = update_table(orgs=False, users=True, return_all=True)['👤 Author Name'].tolist() -filtered_model_orgs = update_table(orgs=True, users=False, return_all=True)['👤 Author Name'].tolist() - -filtered_datasets_users = update_table_datasets(orgs=False, users=True, return_all=True)['👤 Author Name'].tolist() -filtered_datasets_orgs = update_table_datasets(orgs=True, users=False, return_all=True)['👤 Author Name'].tolist() - -filtered_spaces_users = update_table_spaces(orgs=False, users=True, return_all=True)['👤 Author Name'].tolist() -filtered_spaces_orgs = update_table_spaces(orgs=True, users=False, return_all=True)['👤 Author Name'].tolist() - -demo.launch(debug=True) - diff --git a/spaces/Purple11/Grounded-Diffusion/ldm/modules/distributions/__init__.py b/spaces/Purple11/Grounded-Diffusion/ldm/modules/distributions/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/RMXK/RVC_HFF/infer/lib/infer_pack/commons.py b/spaces/RMXK/RVC_HFF/infer/lib/infer_pack/commons.py deleted file mode 100644 index ccd334b7320543b0c3a2166f82093564c9721317..0000000000000000000000000000000000000000 --- a/spaces/RMXK/RVC_HFF/infer/lib/infer_pack/commons.py +++ /dev/null @@ -1,167 +0,0 @@ -import math - -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/RTL/videomatch/videomatch.py b/spaces/RTL/videomatch/videomatch.py deleted file mode 100644 index 98f23d06d9d159676f5a4b1162e689a8cfca3766..0000000000000000000000000000000000000000 --- a/spaces/RTL/videomatch/videomatch.py +++ /dev/null @@ -1,228 +0,0 @@ -import os -import logging -import faiss - -from kats.detectors.cusum_detection import CUSUMDetector -from kats.detectors.robust_stat_detection import RobustStatDetector -from kats.consts import TimeSeriesData - -from scipy import stats as st - -import numpy as np -import pandas as pd - -from videohash import compute_hashes, filepath_from_url -from config import FPS, ROLLING_WINDOW_SIZE - -def index_hashes_for_video(url: str) -> faiss.IndexBinaryIVF: - """ Compute hashes of a video and index the video using faiss indices and return the index. - - Args: - url (str): url to to compute hashes for and index. - - Returns: - index (IndexBinaryIVF): an abstract structure for a FAISS-based binary index of the hashes. - - """ - # If the url already had indices created, fetch those. - filepath = filepath_from_url(url) - if os.path.exists(f'{filepath}.index'): - logging.info(f"Loading indexed hashes from {filepath}.index") - binary_index = faiss.read_index_binary(f'{filepath}.index') - logging.info(f"Index {filepath}.index has in total {binary_index.ntotal} frames") - return binary_index - - # Create hash vectors for url by looping over hashes from the video. - hash_vectors = np.array([x['hash'] for x in compute_hashes(url)]) - logging.info(f"Computed hashes for {hash_vectors.shape} frames.") - - # Initializing the quantizer. - quantizer = faiss.IndexBinaryFlat(hash_vectors.shape[1]*8) - - # Initializing index. - index = faiss.IndexBinaryIVF(quantizer, hash_vectors.shape[1]*8, min(16, hash_vectors.shape[0])) - index.nprobe = 1 # Nr of nearest clusters to be searched per query. - - # Training and write the quantizer. - index.train(hash_vectors) - index.add(hash_vectors) - faiss.write_index_binary(index, f'{filepath}.index') - logging.info(f"Indexed hashes for {index.ntotal} frames to {filepath}.index.") - - return index - -def get_video_index(url: str): - """" Builds up a FAISS index for a video. - - Args: - filepath (str): Location of the source video (video that is to be indexed) - - Returns: - video_index (IndexBinaryIVF): an abstract structure for a FAISS-based binary index of the hashes. - hash_vectors (ndarray): vector of the indexed frames that can be searched - - """ - video_index = index_hashes_for_video(url) - - # Make sure the index is indexable - video_index.make_direct_map() - - # Retrieve original indices - hash_vectors = np.array([video_index.reconstruct(i) for i in range(video_index.ntotal)]) - return video_index, hash_vectors - -def compare_videos(hash_vectors, target_index, MIN_DISTANCE = 3): - """ The comparison between the target and the original video will be plotted based - on the matches between the target and the original video over time. The matches are determined - based on the minimum distance between hashes (as computed by faiss-vectors) before they're considered a match. - - The results are returned as a triplet of 1D arrays: - lims, D, I, where result for query i is in I[lims[i]:lims[i+1]] - (indices of neighbors), D[lims[i]:lims[i+1]] (distances). - (See: https://github.com/facebookresearch/faiss/wiki/Special-operations-on-indexes) - - Args: - hash_vectors (ndarray): vector of the indexed frames that can be searched. - target_index (IndexBinaryIVF): an abstract structure for a FAISS-based binary index of the hashes. - MIN_DISTANCE (int): minium distance for a match - - Returns: - lims (ndarray): from where to where in I and D the result for query i is - D (ndarray): distances of the vectors within a radius around the query point - I (ndarray): indices of the neighbours - hash_vectors (ndarray): vector of the indexed frames that can be searched. - - """ - lims, D, I = target_index.range_search(hash_vectors, MIN_DISTANCE) - return lims, D, I, hash_vectors - -def get_decent_distance(video_index, hash_vectors, target_index, MIN_DISTANCE, MAX_DISTANCE): - """ To get a decent heurstic for a base distance check every distance from MIN_DISTANCE to MAX_DISTANCE - until the number of matches found is equal to or higher than the number of frames in the source video. - If the number of matches with a certain video is larger than the amount of frames, we set the distance heuristic. - This was emperically determined to be a decent heuristic to find the distance heuristic - - Args: - video_index (IndexBinaryIVF): The index of the source video - hash_vectors (ndarray): The hash vectors of the target video - target_index (IndexBinaryIVF): The index of the target video - MIN_DISTANCE (int): Minimum distance between vectors to be considered a match. - MAX_DISTANCE (int): Maximum distance between vectors to prevent bad matches. - - Returns: - None if not distance is found, otherwise an integer representing the heuristic distance value. - - """ - # Go over every distance with a step size of 2, since the distance increases/decreases with that step size - for distance in np.arange(start = MIN_DISTANCE - 2, stop = MAX_DISTANCE + 2, step = 2, dtype=int): - distance = int(distance) # Cast for safety - _, D, _, _ = compare_videos(hash_vectors, target_index, MIN_DISTANCE = distance) - nr_source_frames = video_index.ntotal - nr_matches = len(D) - if nr_matches > 0: - logging.info(f"{(nr_matches/nr_source_frames) * 100.0:.1f}% of frames have a match for distance '{distance}' ({nr_matches} matches for {nr_source_frames} frames)") - if nr_matches >= nr_source_frames: - return distance - logging.warning(f"No matches found for any distance between {MIN_DISTANCE} and {MAX_DISTANCE}") - - return None - -def get_change_points(df, smoothing_window_size=10, method='ROBUST', metric="ROLL_OFFSET_MODE"): - """Using https://github.com/facebookresearch/Kats to analyze the data to find points where the metric - changes. - - Args: - df (DataFrame): Dataframe holding the information between the matching of two videos - smoothing_window_size (int): Smoothing window for the timeseries analysis. Defaults to 10. - method (str): Method for the timeseries analyis. Defaults to 'ROBUST'. - metric (str): Main reporting metric for the timeseries analysis. Defaults to "ROLL_OFFSET_MODE". - - Returns: - change_points [TimeSeriesChangePoint]: Array of time series change point objects. - - """ - # Convert the df to how kats wants it - tsd = TimeSeriesData(df.loc[:,['time', metric]]) - - # Depending on the method get the change points - if method.upper() == "CUSUM": - detector = CUSUMDetector(tsd) - elif method.upper() == "ROBUST": - detector = RobustStatDetector(tsd) - change_points = detector.detector(smoothing_window_size=smoothing_window_size, comparison_window=-2) - - # Log some statistics - if method.upper() == "CUSUM" and change_points != []: - mean_offset_prechange = change_points[0].mu0 - mean_offset_postchange = change_points[0].mu1 - jump_s = mean_offset_postchange - mean_offset_prechange - logging.info(f"Video jumps {jump_s:.1f}s in time at {mean_offset_prechange:.1f} seconds") - - return change_points - -def get_videomatch_df(lims, D, I, hash_vectors, distance, window_size=ROLLING_WINDOW_SIZE, vanilla_df=False): - """Get the dataframe holding all information of the comparison between two videos. - - Args: - lims (ndarray): from where to where in I and D the result for query i is - D (ndarray): distances of the vectors within a radius around the query point - I (ndarray): indices of the neighbours - hash_vectors (ndarray): vector of the indexed frames that can be searched. - distance (int): heuristic distance to use for the search for most accurate matches. - window_size (int): Rolling window size that is used when calculating the mode. Defaults to ROLLING_WINDOW_SIZE. - vanilla_df: Toggle for returning other baseline dataframe. Defaults to False. - - Returns: - df (DataFrame): Dataframe with extra information added about decision making regarding the match between videos. - - """ - # Get match locations in seconds - target = [(lims[i+1]-lims[i]) * [i] for i in range(hash_vectors.shape[0])] - target_s = [i/FPS for j in target for i in j] - source_s = [i/FPS for i in I] - - # Make dataframe - df = pd.DataFrame(zip(target_s, source_s, D, I), columns = ['TARGET_S', 'SOURCE_S', 'DISTANCE', 'INDICES']) - if vanilla_df: - return df - - # Weight values by distance of their match - df['TARGET_WEIGHT'] = 1 - df['DISTANCE']/distance # Higher value means a better match - df['SOURCE_WEIGHTED_VALUE'] = df['SOURCE_S'] * df['TARGET_WEIGHT'] # Multiply the weight (which indicates a better match) with the value for Y and aggregate to get a less noisy estimate of Y - - # Group by X so for every second/x there will be 1 source value in the end - grouped_X = df.groupby('TARGET_S').agg({'SOURCE_WEIGHTED_VALUE' : 'sum', 'TARGET_WEIGHT' : 'sum'}) - grouped_X['FINAL_SOURCE_VALUE'] = grouped_X['SOURCE_WEIGHTED_VALUE'] / grouped_X['TARGET_WEIGHT'] - - # Remake the dataframe - df = grouped_X.reset_index() - df = df.drop(columns=['SOURCE_WEIGHTED_VALUE', 'TARGET_WEIGHT']) - df = df.rename({'FINAL_SOURCE_VALUE' : 'SOURCE_S'}, axis='columns') - - # Add NAN to "missing" x values - step_size = 1/FPS - x_complete = np.round(np.arange(start=0.0, stop = max(df['TARGET_S'])+step_size, step = step_size), 1) # More robust - df['TARGET_S'] = np.round(df['TARGET_S'], 1) - df_complete = pd.DataFrame(x_complete, columns=['TARGET_S']) - - # Merge dataframes to get NAN values for every missing SOURCE_S - df = df_complete.merge(df, on='TARGET_S', how='left') - - # Interpolate between frames since there are missing values - df['SOURCE_LIP_S'] = df['SOURCE_S'].interpolate(method='linear', limit_direction='both', axis=0) - - # Add timeshift col and timeshift col with Linearly Interpolated Values (LIP) - df['TIMESHIFT'] = df['SOURCE_S'].shift(1) - df['SOURCE_S'] - df['TIMESHIFT_LIP'] = df['SOURCE_LIP_S'].shift(1) - df['SOURCE_LIP_S'] - - # Add offset col that assumes the video is played at the same speed as the other to do a "timeshift" - df['OFFSET'] = df['SOURCE_S'] - df['TARGET_S'] - np.min(df['SOURCE_S']) - df['OFFSET_LIP'] = df['SOURCE_LIP_S'] - df['TARGET_S'] - np.min(df['SOURCE_LIP_S']) - - # Add rolling window mode - df['ROLL_OFFSET_MODE'] = np.round(df['OFFSET_LIP'], 0).rolling(window_size, center=True, min_periods=1).apply(lambda x: st.mode(x)[0]) - - # Add time column for plotting - df['time'] = pd.to_datetime(df["TARGET_S"], unit='s') # Needs a datetime as input - - return df \ No newline at end of file diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/_loop.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/_loop.py deleted file mode 100644 index 01c6cafbe53f1fcb12f7b382b2b35e2fd2c69933..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/_loop.py +++ /dev/null @@ -1,43 +0,0 @@ -from typing import Iterable, Tuple, TypeVar - -T = TypeVar("T") - - -def loop_first(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: - """Iterate and generate a tuple with a flag for first value.""" - iter_values = iter(values) - try: - value = next(iter_values) - except StopIteration: - return - yield True, value - for value in iter_values: - yield False, value - - -def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: - """Iterate and generate a tuple with a flag for last value.""" - iter_values = iter(values) - try: - previous_value = next(iter_values) - except StopIteration: - return - for value in iter_values: - yield False, previous_value - previous_value = value - yield True, previous_value - - -def loop_first_last(values: Iterable[T]) -> Iterable[Tuple[bool, bool, T]]: - """Iterate and generate a tuple with a flag for first and last value.""" - iter_values = iter(values) - try: - previous_value = next(iter_values) - except StopIteration: - return - first = True - for value in iter_values: - yield first, False, previous_value - first = False - previous_value = value - yield first, True, previous_value diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/command/check.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/command/check.py deleted file mode 100644 index 539481c946043c53aa61bd62cfd4b4146934697d..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/command/check.py +++ /dev/null @@ -1,151 +0,0 @@ -"""distutils.command.check - -Implements the Distutils 'check' command. -""" -import contextlib - -from distutils.core import Command -from distutils.errors import DistutilsSetupError - -with contextlib.suppress(ImportError): - import docutils.utils - import docutils.parsers.rst - import docutils.frontend - import docutils.nodes - - class SilentReporter(docutils.utils.Reporter): - def __init__( - self, - source, - report_level, - halt_level, - stream=None, - debug=0, - encoding='ascii', - error_handler='replace', - ): - self.messages = [] - super().__init__( - source, report_level, halt_level, stream, debug, encoding, error_handler - ) - - def system_message(self, level, message, *children, **kwargs): - self.messages.append((level, message, children, kwargs)) - return docutils.nodes.system_message( - message, level=level, type=self.levels[level], *children, **kwargs - ) - - -class check(Command): - """This command checks the meta-data of the package.""" - - description = "perform some checks on the package" - user_options = [ - ('metadata', 'm', 'Verify meta-data'), - ( - 'restructuredtext', - 'r', - ( - 'Checks if long string meta-data syntax ' - 'are reStructuredText-compliant' - ), - ), - ('strict', 's', 'Will exit with an error if a check fails'), - ] - - boolean_options = ['metadata', 'restructuredtext', 'strict'] - - def initialize_options(self): - """Sets default values for options.""" - self.restructuredtext = 0 - self.metadata = 1 - self.strict = 0 - self._warnings = 0 - - def finalize_options(self): - pass - - def warn(self, msg): - """Counts the number of warnings that occurs.""" - self._warnings += 1 - return Command.warn(self, msg) - - def run(self): - """Runs the command.""" - # perform the various tests - if self.metadata: - self.check_metadata() - if self.restructuredtext: - if 'docutils' in globals(): - try: - self.check_restructuredtext() - except TypeError as exc: - raise DistutilsSetupError(str(exc)) - elif self.strict: - raise DistutilsSetupError('The docutils package is needed.') - - # let's raise an error in strict mode, if we have at least - # one warning - if self.strict and self._warnings > 0: - raise DistutilsSetupError('Please correct your package.') - - def check_metadata(self): - """Ensures that all required elements of meta-data are supplied. - - Required fields: - name, version - - Warns if any are missing. - """ - metadata = self.distribution.metadata - - missing = [] - for attr in 'name', 'version': - if not getattr(metadata, attr, None): - missing.append(attr) - - if missing: - self.warn("missing required meta-data: %s" % ', '.join(missing)) - - def check_restructuredtext(self): - """Checks if the long string fields are reST-compliant.""" - data = self.distribution.get_long_description() - for warning in self._check_rst_data(data): - line = warning[-1].get('line') - if line is None: - warning = warning[1] - else: - warning = '{} (line {})'.format(warning[1], line) - self.warn(warning) - - def _check_rst_data(self, data): - """Returns warnings when the provided data doesn't compile.""" - # the include and csv_table directives need this to be a path - source_path = self.distribution.script_name or 'setup.py' - parser = docutils.parsers.rst.Parser() - settings = docutils.frontend.OptionParser( - components=(docutils.parsers.rst.Parser,) - ).get_default_values() - settings.tab_width = 4 - settings.pep_references = None - settings.rfc_references = None - reporter = SilentReporter( - source_path, - settings.report_level, - settings.halt_level, - stream=settings.warning_stream, - debug=settings.debug, - encoding=settings.error_encoding, - error_handler=settings.error_encoding_error_handler, - ) - - document = docutils.nodes.document(settings, reporter, source=source_path) - document.note_source(source_path, -1) - try: - parser.parse(data, document) - except AttributeError as e: - reporter.messages.append( - (-1, 'Could not finish the parsing: %s.' % e, '', {}) - ) - - return reporter.messages diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/readers.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/readers.py deleted file mode 100644 index f1190ca452a1ce22ee9a1b304991d475281df8ca..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/readers.py +++ /dev/null @@ -1,122 +0,0 @@ -import collections -import pathlib -import operator - -from . import abc - -from ._itertools import unique_everseen -from ._compat import ZipPath - - -def remove_duplicates(items): - return iter(collections.OrderedDict.fromkeys(items)) - - -class FileReader(abc.TraversableResources): - def __init__(self, loader): - self.path = pathlib.Path(loader.path).parent - - def resource_path(self, resource): - """ - Return the file system path to prevent - `resources.path()` from creating a temporary - copy. - """ - return str(self.path.joinpath(resource)) - - def files(self): - return self.path - - -class ZipReader(abc.TraversableResources): - def __init__(self, loader, module): - _, _, name = module.rpartition('.') - self.prefix = loader.prefix.replace('\\', '/') + name + '/' - self.archive = loader.archive - - def open_resource(self, resource): - try: - return super().open_resource(resource) - except KeyError as exc: - raise FileNotFoundError(exc.args[0]) - - def is_resource(self, path): - # workaround for `zipfile.Path.is_file` returning true - # for non-existent paths. - target = self.files().joinpath(path) - return target.is_file() and target.exists() - - def files(self): - return ZipPath(self.archive, self.prefix) - - -class MultiplexedPath(abc.Traversable): - """ - Given a series of Traversable objects, implement a merged - version of the interface across all objects. Useful for - namespace packages which may be multihomed at a single - name. - """ - - def __init__(self, *paths): - self._paths = list(map(pathlib.Path, remove_duplicates(paths))) - if not self._paths: - message = 'MultiplexedPath must contain at least one path' - raise FileNotFoundError(message) - if not all(path.is_dir() for path in self._paths): - raise NotADirectoryError('MultiplexedPath only supports directories') - - def iterdir(self): - files = (file for path in self._paths for file in path.iterdir()) - return unique_everseen(files, key=operator.attrgetter('name')) - - def read_bytes(self): - raise FileNotFoundError(f'{self} is not a file') - - def read_text(self, *args, **kwargs): - raise FileNotFoundError(f'{self} is not a file') - - def is_dir(self): - return True - - def is_file(self): - return False - - def joinpath(self, child): - # first try to find child in current paths - for file in self.iterdir(): - if file.name == child: - return file - # if it does not exist, construct it with the first path - return self._paths[0] / child - - __truediv__ = joinpath - - def open(self, *args, **kwargs): - raise FileNotFoundError(f'{self} is not a file') - - @property - def name(self): - return self._paths[0].name - - def __repr__(self): - paths = ', '.join(f"'{path}'" for path in self._paths) - return f'MultiplexedPath({paths})' - - -class NamespaceReader(abc.TraversableResources): - def __init__(self, namespace_path): - if 'NamespacePath' not in str(namespace_path): - raise ValueError('Invalid path') - self.path = MultiplexedPath(*list(namespace_path)) - - def resource_path(self, resource): - """ - Return the file system path to prevent - `resources.path()` from creating a temporary - copy. - """ - return str(self.path.joinpath(resource)) - - def files(self): - return self.path diff --git a/spaces/Realcat/image-matching-webui/third_party/TopicFM/src/__init__.py b/spaces/Realcat/image-matching-webui/third_party/TopicFM/src/__init__.py deleted file mode 100644 index aa7ba68e1b8fa7c7854ca49680c07d54d468d83e..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/TopicFM/src/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from yacs.config import CfgNode -from .config.default import _CN - - -def lower_config(yacs_cfg): - if not isinstance(yacs_cfg, CfgNode): - return yacs_cfg - return {k.lower(): lower_config(v) for k, v in yacs_cfg.items()} - - -def get_model_cfg(): - cfg = lower_config(lower_config(_CN)) - return cfg["model"] diff --git a/spaces/Realcat/image-matching-webui/third_party/TopicFM/train.py b/spaces/Realcat/image-matching-webui/third_party/TopicFM/train.py deleted file mode 100644 index 9188b80a3fb407f4871b8147a2c90fa382380e25..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/TopicFM/train.py +++ /dev/null @@ -1,144 +0,0 @@ -import math -import argparse -import pprint -from distutils.util import strtobool -from pathlib import Path -from loguru import logger as loguru_logger - -import pytorch_lightning as pl -from pytorch_lightning.utilities import rank_zero_only -from pytorch_lightning.loggers import TensorBoardLogger -from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor -from pytorch_lightning.plugins import DDPPlugin - -from src.config.default import get_cfg_defaults -from src.utils.misc import get_rank_zero_only_logger, setup_gpus -from src.utils.profiler import build_profiler -from src.lightning_trainer.data import MultiSceneDataModule -from src.lightning_trainer.trainer import PL_Trainer - -loguru_logger = get_rank_zero_only_logger(loguru_logger) - - -def parse_args(): - # init a costum parser which will be added into pl.Trainer parser - # check documentation: https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-flags - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument("data_cfg_path", type=str, help="data config path") - parser.add_argument("main_cfg_path", type=str, help="main config path") - parser.add_argument("--exp_name", type=str, default="default_exp_name") - parser.add_argument("--batch_size", type=int, default=4, help="batch_size per gpu") - parser.add_argument("--num_workers", type=int, default=4) - parser.add_argument( - "--pin_memory", - type=lambda x: bool(strtobool(x)), - nargs="?", - default=True, - help="whether loading data to pinned memory or not", - ) - parser.add_argument( - "--ckpt_path", - type=str, - default=None, - help="pretrained checkpoint path, helpful for using a pre-trained coarse-only LoFTR", - ) - parser.add_argument( - "--disable_ckpt", - action="store_true", - help="disable checkpoint saving (useful for debugging).", - ) - parser.add_argument( - "--profiler_name", - type=str, - default=None, - help="options: [inference, pytorch], or leave it unset", - ) - parser.add_argument( - "--parallel_load_data", - action="store_true", - help="load datasets in with multiple processes.", - ) - - parser = pl.Trainer.add_argparse_args(parser) - return parser.parse_args() - - -def main(): - # parse arguments - args = parse_args() - rank_zero_only(pprint.pprint)(vars(args)) - - # init default-cfg and merge it with the main- and data-cfg - config = get_cfg_defaults() - config.merge_from_file(args.main_cfg_path) - config.merge_from_file(args.data_cfg_path) - pl.seed_everything(config.TRAINER.SEED) # reproducibility - # TODO: Use different seeds for each dataloader workers - # This is needed for data augmentation - - # scale lr and warmup-step automatically - args.gpus = _n_gpus = setup_gpus(args.gpus) - config.TRAINER.WORLD_SIZE = _n_gpus * args.num_nodes - config.TRAINER.TRUE_BATCH_SIZE = config.TRAINER.WORLD_SIZE * args.batch_size - _scaling = config.TRAINER.TRUE_BATCH_SIZE / config.TRAINER.CANONICAL_BS - config.TRAINER.SCALING = _scaling - config.TRAINER.TRUE_LR = config.TRAINER.CANONICAL_LR * _scaling - config.TRAINER.WARMUP_STEP = math.floor(config.TRAINER.WARMUP_STEP / _scaling) - - # lightning module - profiler = build_profiler(args.profiler_name) - model = PL_Trainer(config, pretrained_ckpt=args.ckpt_path, profiler=profiler) - loguru_logger.info(f"Model LightningModule initialized!") - - # lightning data - data_module = MultiSceneDataModule(args, config) - loguru_logger.info(f"Model DataModule initialized!") - - # TensorBoard Logger - logger = TensorBoardLogger( - save_dir="logs/tb_logs", name=args.exp_name, default_hp_metric=False - ) - ckpt_dir = Path(logger.log_dir) / "checkpoints" - - # Callbacks - # TODO: update ModelCheckpoint to monitor multiple metrics - ckpt_callback = ModelCheckpoint( - monitor="auc@10", - verbose=True, - save_top_k=5, - mode="max", - save_last=True, - dirpath=str(ckpt_dir), - filename="{epoch}-{auc@5:.3f}-{auc@10:.3f}-{auc@20:.3f}", - ) - lr_monitor = LearningRateMonitor(logging_interval="step") - callbacks = [lr_monitor] - if not args.disable_ckpt: - callbacks.append(ckpt_callback) - - # Lightning Trainer - trainer = pl.Trainer.from_argparse_args( - args, - plugins=DDPPlugin( - find_unused_parameters=False, - num_nodes=args.num_nodes, - sync_batchnorm=config.TRAINER.WORLD_SIZE > 0, - ), - gradient_clip_val=config.TRAINER.GRADIENT_CLIPPING, - callbacks=callbacks, - logger=logger, - sync_batchnorm=config.TRAINER.WORLD_SIZE > 0, - replace_sampler_ddp=False, # use custom sampler - reload_dataloaders_every_epoch=False, # avoid repeated samples! - weights_summary="full", - profiler=profiler, - ) - loguru_logger.info(f"Trainer initialized!") - loguru_logger.info(f"Start training!") - trainer.fit(model, datamodule=data_module) - - -if __name__ == "__main__": - main() diff --git a/spaces/Reha2704/VToonify/vtoonify/model/stylegan/lpips/networks_basic.py b/spaces/Reha2704/VToonify/vtoonify/model/stylegan/lpips/networks_basic.py deleted file mode 100644 index 201359c4e743aed285694668e13da6dd5a40b621..0000000000000000000000000000000000000000 --- a/spaces/Reha2704/VToonify/vtoonify/model/stylegan/lpips/networks_basic.py +++ /dev/null @@ -1,187 +0,0 @@ - -from __future__ import absolute_import - -import sys -import torch -import torch.nn as nn -import torch.nn.init as init -from torch.autograd import Variable -import numpy as np -from pdb import set_trace as st -from skimage import color -from IPython import embed -from model.stylegan.lpips import pretrained_networks as pn - -import model.stylegan.lpips as util - -def spatial_average(in_tens, keepdim=True): - return in_tens.mean([2,3],keepdim=keepdim) - -def upsample(in_tens, out_H=64): # assumes scale factor is same for H and W - in_H = in_tens.shape[2] - scale_factor = 1.*out_H/in_H - - return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) - -# Learned perceptual metric -class PNetLin(nn.Module): - def __init__(self, pnet_type='vgg', pnet_rand=False, pnet_tune=False, use_dropout=True, spatial=False, version='0.1', lpips=True): - super(PNetLin, self).__init__() - - self.pnet_type = pnet_type - self.pnet_tune = pnet_tune - self.pnet_rand = pnet_rand - self.spatial = spatial - self.lpips = lpips - self.version = version - self.scaling_layer = ScalingLayer() - - if(self.pnet_type in ['vgg','vgg16']): - net_type = pn.vgg16 - self.chns = [64,128,256,512,512] - elif(self.pnet_type=='alex'): - net_type = pn.alexnet - self.chns = [64,192,384,256,256] - elif(self.pnet_type=='squeeze'): - net_type = pn.squeezenet - self.chns = [64,128,256,384,384,512,512] - self.L = len(self.chns) - - self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune) - - if(lpips): - self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout) - self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout) - self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout) - self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout) - self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout) - self.lins = [self.lin0,self.lin1,self.lin2,self.lin3,self.lin4] - if(self.pnet_type=='squeeze'): # 7 layers for squeezenet - self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout) - self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout) - self.lins+=[self.lin5,self.lin6] - - def forward(self, in0, in1, retPerLayer=False): - # v0.0 - original release had a bug, where input was not scaled - in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) if self.version=='0.1' else (in0, in1) - outs0, outs1 = self.net.forward(in0_input), self.net.forward(in1_input) - feats0, feats1, diffs = {}, {}, {} - - for kk in range(self.L): - feats0[kk], feats1[kk] = util.normalize_tensor(outs0[kk]), util.normalize_tensor(outs1[kk]) - diffs[kk] = (feats0[kk]-feats1[kk])**2 - - if(self.lpips): - if(self.spatial): - res = [upsample(self.lins[kk].model(diffs[kk]), out_H=in0.shape[2]) for kk in range(self.L)] - else: - res = [spatial_average(self.lins[kk].model(diffs[kk]), keepdim=True) for kk in range(self.L)] - else: - if(self.spatial): - res = [upsample(diffs[kk].sum(dim=1,keepdim=True), out_H=in0.shape[2]) for kk in range(self.L)] - else: - res = [spatial_average(diffs[kk].sum(dim=1,keepdim=True), keepdim=True) for kk in range(self.L)] - - val = res[0] - for l in range(1,self.L): - val += res[l] - - if(retPerLayer): - return (val, res) - else: - return val - -class ScalingLayer(nn.Module): - def __init__(self): - super(ScalingLayer, self).__init__() - self.register_buffer('shift', torch.Tensor([-.030,-.088,-.188])[None,:,None,None]) - self.register_buffer('scale', torch.Tensor([.458,.448,.450])[None,:,None,None]) - - def forward(self, inp): - return (inp - self.shift) / self.scale - - -class NetLinLayer(nn.Module): - ''' A single linear layer which does a 1x1 conv ''' - def __init__(self, chn_in, chn_out=1, use_dropout=False): - super(NetLinLayer, self).__init__() - - layers = [nn.Dropout(),] if(use_dropout) else [] - layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),] - self.model = nn.Sequential(*layers) - - -class Dist2LogitLayer(nn.Module): - ''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) ''' - def __init__(self, chn_mid=32, use_sigmoid=True): - super(Dist2LogitLayer, self).__init__() - - layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True),] - layers += [nn.LeakyReLU(0.2,True),] - layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True),] - layers += [nn.LeakyReLU(0.2,True),] - layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True),] - if(use_sigmoid): - layers += [nn.Sigmoid(),] - self.model = nn.Sequential(*layers) - - def forward(self,d0,d1,eps=0.1): - return self.model.forward(torch.cat((d0,d1,d0-d1,d0/(d1+eps),d1/(d0+eps)),dim=1)) - -class BCERankingLoss(nn.Module): - def __init__(self, chn_mid=32): - super(BCERankingLoss, self).__init__() - self.net = Dist2LogitLayer(chn_mid=chn_mid) - # self.parameters = list(self.net.parameters()) - self.loss = torch.nn.BCELoss() - - def forward(self, d0, d1, judge): - per = (judge+1.)/2. - self.logit = self.net.forward(d0,d1) - return self.loss(self.logit, per) - -# L2, DSSIM metrics -class FakeNet(nn.Module): - def __init__(self, use_gpu=True, colorspace='Lab'): - super(FakeNet, self).__init__() - self.use_gpu = use_gpu - self.colorspace=colorspace - -class L2(FakeNet): - - def forward(self, in0, in1, retPerLayer=None): - assert(in0.size()[0]==1) # currently only supports batchSize 1 - - if(self.colorspace=='RGB'): - (N,C,X,Y) = in0.size() - value = torch.mean(torch.mean(torch.mean((in0-in1)**2,dim=1).view(N,1,X,Y),dim=2).view(N,1,1,Y),dim=3).view(N) - return value - elif(self.colorspace=='Lab'): - value = util.l2(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)), - util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float') - ret_var = Variable( torch.Tensor((value,) ) ) - if(self.use_gpu): - ret_var = ret_var.cuda() - return ret_var - -class DSSIM(FakeNet): - - def forward(self, in0, in1, retPerLayer=None): - assert(in0.size()[0]==1) # currently only supports batchSize 1 - - if(self.colorspace=='RGB'): - value = util.dssim(1.*util.tensor2im(in0.data), 1.*util.tensor2im(in1.data), range=255.).astype('float') - elif(self.colorspace=='Lab'): - value = util.dssim(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)), - util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float') - ret_var = Variable( torch.Tensor((value,) ) ) - if(self.use_gpu): - ret_var = ret_var.cuda() - return ret_var - -def print_network(net): - num_params = 0 - for param in net.parameters(): - num_params += param.numel() - print('Network',net) - print('Total number of parameters: %d' % num_params) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/cnn/bricks/upsample.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/cnn/bricks/upsample.py deleted file mode 100644 index a1a353767d0ce8518f0d7289bed10dba0178ed12..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/cnn/bricks/upsample.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -import torch.nn.functional as F - -from ..utils import xavier_init -from .registry import UPSAMPLE_LAYERS - -UPSAMPLE_LAYERS.register_module('nearest', module=nn.Upsample) -UPSAMPLE_LAYERS.register_module('bilinear', module=nn.Upsample) - - -@UPSAMPLE_LAYERS.register_module(name='pixel_shuffle') -class PixelShufflePack(nn.Module): - """Pixel Shuffle upsample layer. - - This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to - achieve a simple upsampling with pixel shuffle. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - scale_factor (int): Upsample ratio. - upsample_kernel (int): Kernel size of the conv layer to expand the - channels. - """ - - def __init__(self, in_channels, out_channels, scale_factor, - upsample_kernel): - super(PixelShufflePack, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.scale_factor = scale_factor - self.upsample_kernel = upsample_kernel - self.upsample_conv = nn.Conv2d( - self.in_channels, - self.out_channels * scale_factor * scale_factor, - self.upsample_kernel, - padding=(self.upsample_kernel - 1) // 2) - self.init_weights() - - def init_weights(self): - xavier_init(self.upsample_conv, distribution='uniform') - - def forward(self, x): - x = self.upsample_conv(x) - x = F.pixel_shuffle(x, self.scale_factor) - return x - - -def build_upsample_layer(cfg, *args, **kwargs): - """Build upsample layer. - - Args: - cfg (dict): The upsample layer config, which should contain: - - - type (str): Layer type. - - scale_factor (int): Upsample ratio, which is not applicable to - deconv. - - layer args: Args needed to instantiate a upsample layer. - args (argument list): Arguments passed to the ``__init__`` - method of the corresponding conv layer. - kwargs (keyword arguments): Keyword arguments passed to the - ``__init__`` method of the corresponding conv layer. - - Returns: - nn.Module: Created upsample layer. - """ - if not isinstance(cfg, dict): - raise TypeError(f'cfg must be a dict, but got {type(cfg)}') - if 'type' not in cfg: - raise KeyError( - f'the cfg dict must contain the key "type", but got {cfg}') - cfg_ = cfg.copy() - - layer_type = cfg_.pop('type') - if layer_type not in UPSAMPLE_LAYERS: - raise KeyError(f'Unrecognized upsample type {layer_type}') - else: - upsample = UPSAMPLE_LAYERS.get(layer_type) - - if upsample is nn.Upsample: - cfg_['mode'] = layer_type - layer = upsample(*args, **kwargs, **cfg_) - return layer diff --git a/spaces/Rongjiehuang/ProDiff/modules/FastDiff/module/WaveNet.py b/spaces/Rongjiehuang/ProDiff/modules/FastDiff/module/WaveNet.py deleted file mode 100644 index 15f5fdc75ff696646c86551642deaebf2dd89ead..0000000000000000000000000000000000000000 --- a/spaces/Rongjiehuang/ProDiff/modules/FastDiff/module/WaveNet.py +++ /dev/null @@ -1,189 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F -from modules.FastDiff.module.util import calc_noise_scale_embedding -def swish(x): - return x * torch.sigmoid(x) - - -# dilated conv layer with kaiming_normal initialization -# from https://github.com/ksw0306/FloWaveNet/blob/master/modules.py -class Conv(nn.Module): - def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1): - super(Conv, self).__init__() - self.padding = dilation * (kernel_size - 1) // 2 - self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, dilation=dilation, padding=self.padding) - self.conv = nn.utils.weight_norm(self.conv) - nn.init.kaiming_normal_(self.conv.weight) - - def forward(self, x): - out = self.conv(x) - return out - - -# conv1x1 layer with zero initialization -# from https://github.com/ksw0306/FloWaveNet/blob/master/modules.py but the scale parameter is removed -class ZeroConv1d(nn.Module): - def __init__(self, in_channel, out_channel): - super(ZeroConv1d, self).__init__() - self.conv = nn.Conv1d(in_channel, out_channel, kernel_size=1, padding=0) - self.conv.weight.data.zero_() - self.conv.bias.data.zero_() - - def forward(self, x): - out = self.conv(x) - return out - - -# every residual block (named residual layer in paper) -# contains one noncausal dilated conv -class Residual_block(nn.Module): - def __init__(self, res_channels, skip_channels, dilation, - noise_scale_embed_dim_out, multiband=True): - super(Residual_block, self).__init__() - self.res_channels = res_channels - - # the layer-specific fc for noise scale embedding - self.fc_t = nn.Linear(noise_scale_embed_dim_out, self.res_channels) - - # dilated conv layer - self.dilated_conv_layer = Conv(self.res_channels, 2 * self.res_channels, kernel_size=3, dilation=dilation) - - # add mel spectrogram upsampler and conditioner conv1x1 layer - self.upsample_conv2d = torch.nn.ModuleList() - if multiband is True: - params = 8 - else: - params = 16 - for s in [params, params]: ####### Very Important!!!!! ####### - conv_trans2d = torch.nn.ConvTranspose2d(1, 1, (3, 2 * s), padding=(1, s // 2), stride=(1, s)) - conv_trans2d = torch.nn.utils.weight_norm(conv_trans2d) - torch.nn.init.kaiming_normal_(conv_trans2d.weight) - self.upsample_conv2d.append(conv_trans2d) - self.mel_conv = Conv(80, 2 * self.res_channels, kernel_size=1) # 80 is mel bands - - # residual conv1x1 layer, connect to next residual layer - self.res_conv = nn.Conv1d(res_channels, res_channels, kernel_size=1) - self.res_conv = nn.utils.weight_norm(self.res_conv) - nn.init.kaiming_normal_(self.res_conv.weight) - - # skip conv1x1 layer, add to all skip outputs through skip connections - self.skip_conv = nn.Conv1d(res_channels, skip_channels, kernel_size=1) - self.skip_conv = nn.utils.weight_norm(self.skip_conv) - nn.init.kaiming_normal_(self.skip_conv.weight) - - def forward(self, input_data): - x, mel_spec, noise_scale_embed = input_data - h = x - B, C, L = x.shape # B, res_channels, L - assert C == self.res_channels - - # add in noise scale embedding - part_t = self.fc_t(noise_scale_embed) - part_t = part_t.view([B, self.res_channels, 1]) - h += part_t - - # dilated conv layer - h = self.dilated_conv_layer(h) - - # add mel spectrogram as (local) conditioner - assert mel_spec is not None - - # Upsample spectrogram to size of audio - mel_spec = torch.unsqueeze(mel_spec, dim=1) # (B, 1, 80, T') - mel_spec = F.leaky_relu(self.upsample_conv2d[0](mel_spec), 0.4) - mel_spec = F.leaky_relu(self.upsample_conv2d[1](mel_spec), 0.4) - mel_spec = torch.squeeze(mel_spec, dim=1) - - assert(mel_spec.size(2) >= L) - if mel_spec.size(2) > L: - mel_spec = mel_spec[:, :, :L] - - mel_spec = self.mel_conv(mel_spec) - h += mel_spec - - # gated-tanh nonlinearity - out = torch.tanh(h[:,:self.res_channels,:]) * torch.sigmoid(h[:,self.res_channels:,:]) - - # residual and skip outputs - res = self.res_conv(out) - assert x.shape == res.shape - skip = self.skip_conv(out) - - return (x + res) * math.sqrt(0.5), skip # normalize for training stability - - -class Residual_group(nn.Module): - def __init__(self, res_channels, skip_channels, num_res_layers, dilation_cycle, - noise_scale_embed_dim_in, - noise_scale_embed_dim_mid, - noise_scale_embed_dim_out, multiband): - super(Residual_group, self).__init__() - self.num_res_layers = num_res_layers - self.noise_scale_embed_dim_in = noise_scale_embed_dim_in - - # the shared two fc layers for noise scale embedding - self.fc_t1 = nn.Linear(noise_scale_embed_dim_in, noise_scale_embed_dim_mid) - self.fc_t2 = nn.Linear(noise_scale_embed_dim_mid, noise_scale_embed_dim_out) - - # stack all residual blocks with dilations 1, 2, ... , 512, ... , 1, 2, ..., 512 - self.residual_blocks = nn.ModuleList() - for n in range(self.num_res_layers): - self.residual_blocks.append(Residual_block(res_channels, skip_channels, - dilation=2 ** (n % dilation_cycle), - noise_scale_embed_dim_out=noise_scale_embed_dim_out, multiband=multiband)) - - def forward(self, input_data): - x, mel_spectrogram, noise_scales = input_data - - # embed noise scale - noise_scale_embed = calc_noise_scale_embedding(noise_scales, self.noise_scale_embed_dim_in) - noise_scale_embed = swish(self.fc_t1(noise_scale_embed)) - noise_scale_embed = swish(self.fc_t2(noise_scale_embed)) - - # pass all residual layers - h = x - skip = 0 - for n in range(self.num_res_layers): - h, skip_n = self.residual_blocks[n]((h, mel_spectrogram, noise_scale_embed)) # use the output from last residual layer - skip += skip_n # accumulate all skip outputs - - return skip * math.sqrt(1.0 / self.num_res_layers) # normalize for training stability - - -class WaveNet_vocoder(nn.Module): - def __init__(self, in_channels, res_channels, skip_channels, out_channels, - num_res_layers, dilation_cycle, - noise_scale_embed_dim_in, - noise_scale_embed_dim_mid, - noise_scale_embed_dim_out, multiband): - super(WaveNet_vocoder, self).__init__() - - # initial conv1x1 with relu - self.init_conv = nn.Sequential(Conv(in_channels, res_channels, kernel_size=1), nn.ReLU()) - - # all residual layers - self.residual_layer = Residual_group(res_channels=res_channels, - skip_channels=skip_channels, - num_res_layers=num_res_layers, - dilation_cycle=dilation_cycle, - noise_scale_embed_dim_in=noise_scale_embed_dim_in, - noise_scale_embed_dim_mid=noise_scale_embed_dim_mid, - noise_scale_embed_dim_out=noise_scale_embed_dim_out, multiband=multiband) - - # final conv1x1 -> relu -> zeroconv1x1 - self.final_conv = nn.Sequential(Conv(skip_channels, skip_channels, kernel_size=1), - nn.ReLU(), - ZeroConv1d(skip_channels, out_channels)) - - def forward(self, input_data): - audio, mel_spectrogram, noise_scales = input_data # b x band x T, b x 80 x T', b x 1 - x = audio - x = self.init_conv(x) - x = self.residual_layer((x, mel_spectrogram, noise_scales)) - x = self.final_conv(x) - - return x - diff --git a/spaces/SAUL19/imagen-audio/app.py b/spaces/SAUL19/imagen-audio/app.py deleted file mode 100644 index 0eca146a9424c1c07680f157eeea61d7233f6686..0000000000000000000000000000000000000000 --- a/spaces/SAUL19/imagen-audio/app.py +++ /dev/null @@ -1,59 +0,0 @@ -import gradio as gr -from gradio.inputs import Textbox - -import uuid -import torch -from diffusers import StableDiffusionPipeline -import boto3 -from io import BytesIO -import os - -AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID") -AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY") -S3_BUCKET_NAME = os.getenv("BUCKET_NAME") - -model_id = "CompVis/stable-diffusion-v1-4" -device = "cuda" if torch.cuda.is_available() else "cpu" - -pipe = StableDiffusionPipeline.from_pretrained( - model_id, torch_dtype=torch.float32) - -pipe = pipe.to(device) - -def text_to_image(prompt): - - # Create an instance of the S3 client - s3 = boto3.client('s3', - aws_access_key_id=AWS_ACCESS_KEY_ID, - aws_secret_access_key=AWS_SECRET_ACCESS_KEY) - - # Obtener un nombre único utilizando UUID - unique_name = str(uuid.uuid4()) - image_name = '-'.join(unique_name.split()) + ".webp" - - def save_image_to_s3(image): - # Create a BytesIO object to store the image. - image_buffer = BytesIO() - image.save(image_buffer, format='WEBP') - image_buffer.seek(0) - - # Full path of the file in the bucket - s3_key = "public/images/" + image_name - - # Upload the image to the S3 bucket - s3.upload_fileobj(image_buffer, S3_BUCKET_NAME, s3_key) - - def generator_image(prompt): - prompt = prompt - image = pipe(prompt).images[0] - - # Save the image in S3 - save_image_to_s3(image) - - generator_image(prompt) - return image_name - - - -iface = gr.Interface(fn=text_to_image, inputs=[Textbox(label="prompt")], outputs="text") -iface.launch() \ No newline at end of file diff --git a/spaces/SERER/VITS-Umamusume-voice-synthesizer/data_utils.py b/spaces/SERER/VITS-Umamusume-voice-synthesizer/data_utils.py deleted file mode 100644 index e9246c6c8f2ff3c37a7f8529ea1593c7f80f887e..0000000000000000000000000000000000000000 --- a/spaces/SERER/VITS-Umamusume-voice-synthesizer/data_utils.py +++ /dev/null @@ -1,393 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data - -import commons -from mel_processing import spectrogram_torch -from utils import load_wav_to_torch, load_filepaths_and_text -from text import text_to_sequence, cleaned_text_to_sequence - - -class TextAudioLoader(torch.utils.data.Dataset): - """ - 1) loads audio, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - def __init__(self, audiopaths_and_text, hparams): - self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text) - self.text_cleaners = hparams.text_cleaners - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - - self.cleaned_text = getattr(hparams, "cleaned_text", False) - - self.add_blank = hparams.add_blank - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 190) - - random.seed(1234) - random.shuffle(self.audiopaths_and_text) - self._filter() - - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - - audiopaths_and_text_new = [] - lengths = [] - for audiopath, text in self.audiopaths_and_text: - if self.min_text_len <= len(text) and len(text) <= self.max_text_len: - audiopaths_and_text_new.append([audiopath, text]) - lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) - self.audiopaths_and_text = audiopaths_and_text_new - self.lengths = lengths - - def get_audio_text_pair(self, audiopath_and_text): - # separate filename and text - audiopath, text = audiopath_and_text[0], audiopath_and_text[1] - text = self.get_text(text) - spec, wav = self.get_audio(audiopath) - return (text, spec, wav) - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} {} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - return spec, audio_norm - - def get_text(self, text): - if self.cleaned_text: - text_norm = cleaned_text_to_sequence(text) - else: - text_norm = text_to_sequence(text, self.text_cleaners) - if self.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - - def __getitem__(self, index): - return self.get_audio_text_pair(self.audiopaths_and_text[index]) - - def __len__(self): - return len(self.audiopaths_and_text) - - -class TextAudioCollate(): - """ Zero-pads model inputs and targets - """ - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text and aduio - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), - dim=0, descending=True) - - max_text_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - text_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - - text_padded = torch.LongTensor(len(batch), max_text_len) - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - text_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - text = row[0] - text_padded[i, :text.size(0)] = text - text_lengths[i] = text.size(0) - - spec = row[1] - spec_padded[i, :, :spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, :wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - if self.return_ids: - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths - - -"""Multi speaker version""" -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - def __init__(self, audiopaths_sid_text, hparams): - self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text) - self.text_cleaners = hparams.text_cleaners - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - - self.cleaned_text = getattr(hparams, "cleaned_text", False) - - self.add_blank = hparams.add_blank - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 190) - - random.seed(1234) - random.shuffle(self.audiopaths_sid_text) - self._filter() - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - - audiopaths_sid_text_new = [] - lengths = [] - for audiopath, sid, text in self.audiopaths_sid_text: - audiopath = "E:/uma_voice/" + audiopath - if self.min_text_len <= len(text) and len(text) <= self.max_text_len: - audiopaths_sid_text_new.append([audiopath, sid, text]) - lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) - self.audiopaths_sid_text = audiopaths_sid_text_new - self.lengths = lengths - - def get_audio_text_speaker_pair(self, audiopath_sid_text): - # separate filename, speaker_id and text - audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2] - text = self.get_text(text) - spec, wav = self.get_audio(audiopath) - sid = self.get_sid(sid) - return (text, spec, wav, sid) - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} {} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - return spec, audio_norm - - def get_text(self, text): - if self.cleaned_text: - text_norm = cleaned_text_to_sequence(text) - else: - text_norm = text_to_sequence(text, self.text_cleaners) - if self.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - - def get_sid(self, sid): - sid = torch.LongTensor([int(sid)]) - return sid - - def __getitem__(self, index): - return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index]) - - def __len__(self): - return len(self.audiopaths_sid_text) - - -class TextAudioSpeakerCollate(): - """ Zero-pads model inputs and targets - """ - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text, audio and speaker identities - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized, sid] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), - dim=0, descending=True) - - max_text_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - text_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - sid = torch.LongTensor(len(batch)) - - text_padded = torch.LongTensor(len(batch), max_text_len) - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - text_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - text = row[0] - text_padded[i, :text.size(0)] = text - text_lengths[i] = text.size(0) - - spec = row[1] - spec_padded[i, :, :spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, :wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - sid[i] = row[3] - - if self.return_ids: - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid - - -class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): - """ - Maintain similar input lengths in a batch. - Length groups are specified by boundaries. - Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. - - It removes samples which are not included in the boundaries. - Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. - """ - def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): - super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) - self.lengths = dataset.lengths - self.batch_size = batch_size - self.boundaries = boundaries - - self.buckets, self.num_samples_per_bucket = self._create_buckets() - self.total_size = sum(self.num_samples_per_bucket) - self.num_samples = self.total_size // self.num_replicas - - def _create_buckets(self): - buckets = [[] for _ in range(len(self.boundaries) - 1)] - for i in range(len(self.lengths)): - length = self.lengths[i] - idx_bucket = self._bisect(length) - if idx_bucket != -1: - buckets[idx_bucket].append(i) - - for i in range(len(buckets) - 1, 0, -1): - if len(buckets[i]) == 0: - buckets.pop(i) - self.boundaries.pop(i+1) - - num_samples_per_bucket = [] - for i in range(len(buckets)): - len_bucket = len(buckets[i]) - total_batch_size = self.num_replicas * self.batch_size - rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size - num_samples_per_bucket.append(len_bucket + rem) - return buckets, num_samples_per_bucket - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - - indices = [] - if self.shuffle: - for bucket in self.buckets: - indices.append(torch.randperm(len(bucket), generator=g).tolist()) - else: - for bucket in self.buckets: - indices.append(list(range(len(bucket)))) - - batches = [] - for i in range(len(self.buckets)): - bucket = self.buckets[i] - len_bucket = len(bucket) - ids_bucket = indices[i] - num_samples_bucket = self.num_samples_per_bucket[i] - - # add extra samples to make it evenly divisible - rem = num_samples_bucket - len_bucket - ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] - - # subsample - ids_bucket = ids_bucket[self.rank::self.num_replicas] - - # batching - for j in range(len(ids_bucket) // self.batch_size): - batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]] - batches.append(batch) - - if self.shuffle: - batch_ids = torch.randperm(len(batches), generator=g).tolist() - batches = [batches[i] for i in batch_ids] - self.batches = batches - - assert len(self.batches) * self.batch_size == self.num_samples - return iter(self.batches) - - def _bisect(self, x, lo=0, hi=None): - if hi is None: - hi = len(self.boundaries) - 1 - - if hi > lo: - mid = (hi + lo) // 2 - if self.boundaries[mid] < x and x <= self.boundaries[mid+1]: - return mid - elif x <= self.boundaries[mid]: - return self._bisect(x, lo, mid) - else: - return self._bisect(x, mid + 1, hi) - else: - return -1 - - def __len__(self): - return self.num_samples // self.batch_size diff --git a/spaces/SIGGRAPH2022/Text2Human/Text2Human/models/losses/__init__.py b/spaces/SIGGRAPH2022/Text2Human/Text2Human/models/losses/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/SUSSYMANBI/nerijs-pixel-art-xl-sdxl/app.py b/spaces/SUSSYMANBI/nerijs-pixel-art-xl-sdxl/app.py deleted file mode 100644 index c85ba4eb2a82562cd9234b5447dbb4c691492133..0000000000000000000000000000000000000000 --- a/spaces/SUSSYMANBI/nerijs-pixel-art-xl-sdxl/app.py +++ /dev/null @@ -1,4 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/nerijs/pixel-art-xl").launch() -title = ('Pixelart diffusion') \ No newline at end of file diff --git a/spaces/Salesforce/BLIP/utils.py b/spaces/Salesforce/BLIP/utils.py deleted file mode 100644 index ebe0e1dc2f5d200156d5dd1acc305a8b7b7b98da..0000000000000000000000000000000000000000 --- a/spaces/Salesforce/BLIP/utils.py +++ /dev/null @@ -1,278 +0,0 @@ -import math -def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr): - """Decay the learning rate""" - lr = (init_lr - min_lr) * 0.5 * (1. + math.cos(math.pi * epoch / max_epoch)) + min_lr - for param_group in optimizer.param_groups: - param_group['lr'] = lr - -def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr): - """Warmup the learning rate""" - lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max_step) - for param_group in optimizer.param_groups: - param_group['lr'] = lr - -def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate): - """Decay the learning rate""" - lr = max(min_lr, init_lr * (decay_rate**epoch)) - for param_group in optimizer.param_groups: - param_group['lr'] = lr - -import numpy as np -import io -import os -import time -from collections import defaultdict, deque -import datetime - -import torch -import torch.distributed as dist - -class SmoothedValue(object): - """Track a series of values and provide access to smoothed values over a - window or the global series average. - """ - - def __init__(self, window_size=20, fmt=None): - if fmt is None: - fmt = "{median:.4f} ({global_avg:.4f})" - self.deque = deque(maxlen=window_size) - self.total = 0.0 - self.count = 0 - self.fmt = fmt - - def update(self, value, n=1): - self.deque.append(value) - self.count += n - self.total += value * n - - def synchronize_between_processes(self): - """ - Warning: does not synchronize the deque! - """ - if not is_dist_avail_and_initialized(): - return - t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') - dist.barrier() - dist.all_reduce(t) - t = t.tolist() - self.count = int(t[0]) - self.total = t[1] - - @property - def median(self): - d = torch.tensor(list(self.deque)) - return d.median().item() - - @property - def avg(self): - d = torch.tensor(list(self.deque), dtype=torch.float32) - return d.mean().item() - - @property - def global_avg(self): - return self.total / self.count - - @property - def max(self): - return max(self.deque) - - @property - def value(self): - return self.deque[-1] - - def __str__(self): - return self.fmt.format( - median=self.median, - avg=self.avg, - global_avg=self.global_avg, - max=self.max, - value=self.value) - - -class MetricLogger(object): - def __init__(self, delimiter="\t"): - self.meters = defaultdict(SmoothedValue) - self.delimiter = delimiter - - def update(self, **kwargs): - for k, v in kwargs.items(): - if isinstance(v, torch.Tensor): - v = v.item() - assert isinstance(v, (float, int)) - self.meters[k].update(v) - - def __getattr__(self, attr): - if attr in self.meters: - return self.meters[attr] - if attr in self.__dict__: - return self.__dict__[attr] - raise AttributeError("'{}' object has no attribute '{}'".format( - type(self).__name__, attr)) - - def __str__(self): - loss_str = [] - for name, meter in self.meters.items(): - loss_str.append( - "{}: {}".format(name, str(meter)) - ) - return self.delimiter.join(loss_str) - - def global_avg(self): - loss_str = [] - for name, meter in self.meters.items(): - loss_str.append( - "{}: {:.4f}".format(name, meter.global_avg) - ) - return self.delimiter.join(loss_str) - - def synchronize_between_processes(self): - for meter in self.meters.values(): - meter.synchronize_between_processes() - - def add_meter(self, name, meter): - self.meters[name] = meter - - def log_every(self, iterable, print_freq, header=None): - i = 0 - if not header: - header = '' - start_time = time.time() - end = time.time() - iter_time = SmoothedValue(fmt='{avg:.4f}') - data_time = SmoothedValue(fmt='{avg:.4f}') - space_fmt = ':' + str(len(str(len(iterable)))) + 'd' - log_msg = [ - header, - '[{0' + space_fmt + '}/{1}]', - 'eta: {eta}', - '{meters}', - 'time: {time}', - 'data: {data}' - ] - if torch.cuda.is_available(): - log_msg.append('max mem: {memory:.0f}') - log_msg = self.delimiter.join(log_msg) - MB = 1024.0 * 1024.0 - for obj in iterable: - data_time.update(time.time() - end) - yield obj - iter_time.update(time.time() - end) - if i % print_freq == 0 or i == len(iterable) - 1: - eta_seconds = iter_time.global_avg * (len(iterable) - i) - eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) - if torch.cuda.is_available(): - print(log_msg.format( - i, len(iterable), eta=eta_string, - meters=str(self), - time=str(iter_time), data=str(data_time), - memory=torch.cuda.max_memory_allocated() / MB)) - else: - print(log_msg.format( - i, len(iterable), eta=eta_string, - meters=str(self), - time=str(iter_time), data=str(data_time))) - i += 1 - end = time.time() - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print('{} Total time: {} ({:.4f} s / it)'.format( - header, total_time_str, total_time / len(iterable))) - - -class AttrDict(dict): - def __init__(self, *args, **kwargs): - super(AttrDict, self).__init__(*args, **kwargs) - self.__dict__ = self - - -def compute_acc(logits, label, reduction='mean'): - ret = (torch.argmax(logits, dim=1) == label).float() - if reduction == 'none': - return ret.detach() - elif reduction == 'mean': - return ret.mean().item() - -def compute_n_params(model, return_str=True): - tot = 0 - for p in model.parameters(): - w = 1 - for x in p.shape: - w *= x - tot += w - if return_str: - if tot >= 1e6: - return '{:.1f}M'.format(tot / 1e6) - else: - return '{:.1f}K'.format(tot / 1e3) - else: - return tot - -def setup_for_distributed(is_master): - """ - This function disables printing when not in master process - """ - import builtins as __builtin__ - builtin_print = __builtin__.print - - def print(*args, **kwargs): - force = kwargs.pop('force', False) - if is_master or force: - builtin_print(*args, **kwargs) - - __builtin__.print = print - - -def is_dist_avail_and_initialized(): - if not dist.is_available(): - return False - if not dist.is_initialized(): - return False - return True - - -def get_world_size(): - if not is_dist_avail_and_initialized(): - return 1 - return dist.get_world_size() - - -def get_rank(): - if not is_dist_avail_and_initialized(): - return 0 - return dist.get_rank() - - -def is_main_process(): - return get_rank() == 0 - - -def save_on_master(*args, **kwargs): - if is_main_process(): - torch.save(*args, **kwargs) - - -def init_distributed_mode(args): - if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: - args.rank = int(os.environ["RANK"]) - args.world_size = int(os.environ['WORLD_SIZE']) - args.gpu = int(os.environ['LOCAL_RANK']) - elif 'SLURM_PROCID' in os.environ: - args.rank = int(os.environ['SLURM_PROCID']) - args.gpu = args.rank % torch.cuda.device_count() - else: - print('Not using distributed mode') - args.distributed = False - return - - args.distributed = True - - torch.cuda.set_device(args.gpu) - args.dist_backend = 'nccl' - print('| distributed init (rank {}, word {}): {}'.format( - args.rank, args.world_size, args.dist_url), flush=True) - torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, - world_size=args.world_size, rank=args.rank) - torch.distributed.barrier() - setup_for_distributed(args.rank == 0) - - \ No newline at end of file diff --git a/spaces/Salesforce/EDICT/my_diffusers/schedulers/scheduling_utils.py b/spaces/Salesforce/EDICT/my_diffusers/schedulers/scheduling_utils.py deleted file mode 100644 index f2bcd73acf32c1e152a5d8708479731996731c6d..0000000000000000000000000000000000000000 --- a/spaces/Salesforce/EDICT/my_diffusers/schedulers/scheduling_utils.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from dataclasses import dataclass -from typing import Union - -import numpy as np -import torch - -from ..utils import BaseOutput - - -SCHEDULER_CONFIG_NAME = "scheduler_config.json" - - -@dataclass -class SchedulerOutput(BaseOutput): - """ - Base class for the scheduler's step function output. - - Args: - prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the - denoising loop. - """ - - prev_sample: torch.FloatTensor - - -class SchedulerMixin: - """ - Mixin containing common functions for the schedulers. - """ - - config_name = SCHEDULER_CONFIG_NAME - ignore_for_config = ["tensor_format"] - - def set_format(self, tensor_format="pt"): - self.tensor_format = tensor_format - if tensor_format == "pt": - for key, value in vars(self).items(): - if isinstance(value, np.ndarray): - setattr(self, key, torch.from_numpy(value)) - - return self - - def clip(self, tensor, min_value=None, max_value=None): - tensor_format = getattr(self, "tensor_format", "pt") - - if tensor_format == "np": - return np.clip(tensor, min_value, max_value) - elif tensor_format == "pt": - return torch.clamp(tensor, min_value, max_value) - - raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.") - - def log(self, tensor): - tensor_format = getattr(self, "tensor_format", "pt") - - if tensor_format == "np": - return np.log(tensor) - elif tensor_format == "pt": - return torch.log(tensor) - - raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.") - - def match_shape(self, values: Union[np.ndarray, torch.Tensor], broadcast_array: Union[np.ndarray, torch.Tensor]): - """ - Turns a 1-D array into an array or tensor with len(broadcast_array.shape) dims. - - Args: - values: an array or tensor of values to extract. - broadcast_array: an array with a larger shape of K dimensions with the batch - dimension equal to the length of timesteps. - Returns: - a tensor of shape [batch_size, 1, ...] where the shape has K dims. - """ - - tensor_format = getattr(self, "tensor_format", "pt") - values = values.flatten() - - while len(values.shape) < len(broadcast_array.shape): - values = values[..., None] - if tensor_format == "pt": - values = values.to(broadcast_array.device) - - return values - - def norm(self, tensor): - tensor_format = getattr(self, "tensor_format", "pt") - if tensor_format == "np": - return np.linalg.norm(tensor) - elif tensor_format == "pt": - return torch.norm(tensor.reshape(tensor.shape[0], -1), dim=-1).mean() - - raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.") - - def randn_like(self, tensor, generator=None): - tensor_format = getattr(self, "tensor_format", "pt") - if tensor_format == "np": - return np.random.randn(*np.shape(tensor)) - elif tensor_format == "pt": - # return torch.randn_like(tensor) - return torch.randn(tensor.shape, layout=tensor.layout, generator=generator).to(tensor.device) - - raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.") - - def zeros_like(self, tensor): - tensor_format = getattr(self, "tensor_format", "pt") - if tensor_format == "np": - return np.zeros_like(tensor) - elif tensor_format == "pt": - return torch.zeros_like(tensor) - - raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.") diff --git a/spaces/Sandiago21/speech-to-speech-translation-spanish/README.md b/spaces/Sandiago21/speech-to-speech-translation-spanish/README.md deleted file mode 100644 index f663ff4ac1894064d2a35d0b0c244c90cf146dc1..0000000000000000000000000000000000000000 --- a/spaces/Sandiago21/speech-to-speech-translation-spanish/README.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: speech-to-speech-translation-spanish -app_file: app.py -sdk: gradio -sdk_version: 3.36.0 ---- diff --git a/spaces/Sapphire-356/Video2MC/joints_detectors/openpose/README.md b/spaces/Sapphire-356/Video2MC/joints_detectors/openpose/README.md deleted file mode 100644 index 648314d88913e688c3542c4f1521234e7a626586..0000000000000000000000000000000000000000 --- a/spaces/Sapphire-356/Video2MC/joints_detectors/openpose/README.md +++ /dev/null @@ -1,4 +0,0 @@ -需要将该环境conda的python与openpose编译,才能调用openpose python API - - - diff --git a/spaces/Silentlin/DiffSinger/utils/__init__.py b/spaces/Silentlin/DiffSinger/utils/__init__.py deleted file mode 100644 index 4ea5c5a67e038c2213247dfb905942882c090a77..0000000000000000000000000000000000000000 --- a/spaces/Silentlin/DiffSinger/utils/__init__.py +++ /dev/null @@ -1,250 +0,0 @@ -import glob -import logging -import re -import time -from collections import defaultdict -import os -import sys -import shutil -import types -import numpy as np -import torch -import torch.nn.functional as F -import torch.distributed as dist -from torch import nn - - -def tensors_to_scalars(metrics): - new_metrics = {} - for k, v in metrics.items(): - if isinstance(v, torch.Tensor): - v = v.item() - if type(v) is dict: - v = tensors_to_scalars(v) - new_metrics[k] = v - return new_metrics - - -class AvgrageMeter(object): - - def __init__(self): - self.reset() - - def reset(self): - self.avg = 0 - self.sum = 0 - self.cnt = 0 - - def update(self, val, n=1): - self.sum += val * n - self.cnt += n - self.avg = self.sum / self.cnt - - -def collate_1d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None, shift_id=1): - """Convert a list of 1d tensors into a padded 2d tensor.""" - size = max(v.size(0) for v in values) if max_len is None else max_len - res = values[0].new(len(values), size).fill_(pad_idx) - - def copy_tensor(src, dst): - assert dst.numel() == src.numel() - if shift_right: - dst[1:] = src[:-1] - dst[0] = shift_id - else: - dst.copy_(src) - - for i, v in enumerate(values): - copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)]) - return res - - -def collate_2d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None): - """Convert a list of 2d tensors into a padded 3d tensor.""" - size = max(v.size(0) for v in values) if max_len is None else max_len - res = values[0].new(len(values), size, values[0].shape[1]).fill_(pad_idx) - - def copy_tensor(src, dst): - assert dst.numel() == src.numel() - if shift_right: - dst[1:] = src[:-1] - else: - dst.copy_(src) - - for i, v in enumerate(values): - copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)]) - return res - - -def _is_batch_full(batch, num_tokens, max_tokens, max_sentences): - if len(batch) == 0: - return 0 - if len(batch) == max_sentences: - return 1 - if num_tokens > max_tokens: - return 1 - return 0 - - -def batch_by_size( - indices, num_tokens_fn, max_tokens=None, max_sentences=None, - required_batch_size_multiple=1, distributed=False -): - """ - Yield mini-batches of indices bucketed by size. Batches may contain - sequences of different lengths. - - Args: - indices (List[int]): ordered list of dataset indices - num_tokens_fn (callable): function that returns the number of tokens at - a given index - max_tokens (int, optional): max number of tokens in each batch - (default: None). - max_sentences (int, optional): max number of sentences in each - batch (default: None). - required_batch_size_multiple (int, optional): require batch size to - be a multiple of N (default: 1). - """ - max_tokens = max_tokens if max_tokens is not None else sys.maxsize - max_sentences = max_sentences if max_sentences is not None else sys.maxsize - bsz_mult = required_batch_size_multiple - - if isinstance(indices, types.GeneratorType): - indices = np.fromiter(indices, dtype=np.int64, count=-1) - - sample_len = 0 - sample_lens = [] - batch = [] - batches = [] - for i in range(len(indices)): - idx = indices[i] - num_tokens = num_tokens_fn(idx) - sample_lens.append(num_tokens) - sample_len = max(sample_len, num_tokens) - assert sample_len <= max_tokens, ( - "sentence at index {} of size {} exceeds max_tokens " - "limit of {}!".format(idx, sample_len, max_tokens) - ) - num_tokens = (len(batch) + 1) * sample_len - - if _is_batch_full(batch, num_tokens, max_tokens, max_sentences): - mod_len = max( - bsz_mult * (len(batch) // bsz_mult), - len(batch) % bsz_mult, - ) - batches.append(batch[:mod_len]) - batch = batch[mod_len:] - sample_lens = sample_lens[mod_len:] - sample_len = max(sample_lens) if len(sample_lens) > 0 else 0 - batch.append(idx) - if len(batch) > 0: - batches.append(batch) - return batches - - -def make_positions(tensor, padding_idx): - """Replace non-padding symbols with their position numbers. - - Position numbers begin at padding_idx+1. Padding symbols are ignored. - """ - # The series of casts and type-conversions here are carefully - # balanced to both work with ONNX export and XLA. In particular XLA - # prefers ints, cumsum defaults to output longs, and ONNX doesn't know - # how to handle the dtype kwarg in cumsum. - mask = tensor.ne(padding_idx).int() - return ( - torch.cumsum(mask, dim=1).type_as(mask) * mask - ).long() + padding_idx - - -def softmax(x, dim): - return F.softmax(x, dim=dim, dtype=torch.float32) - - -def unpack_dict_to_list(samples): - samples_ = [] - bsz = samples.get('outputs').size(0) - for i in range(bsz): - res = {} - for k, v in samples.items(): - try: - res[k] = v[i] - except: - pass - samples_.append(res) - return samples_ - - -def load_ckpt(cur_model, ckpt_base_dir, prefix_in_ckpt='model', force=True, strict=True): - if os.path.isfile(ckpt_base_dir): - base_dir = os.path.dirname(ckpt_base_dir) - checkpoint_path = [ckpt_base_dir] - else: - base_dir = ckpt_base_dir - checkpoint_path = sorted(glob.glob(f'{base_dir}/model_ckpt_steps_*.ckpt'), key= - lambda x: int(re.findall(f'{base_dir}/model_ckpt_steps_(\d+).ckpt', x)[0])) - if len(checkpoint_path) > 0: - checkpoint_path = checkpoint_path[-1] - state_dict = torch.load(checkpoint_path, map_location="cpu")["state_dict"] - state_dict = {k[len(prefix_in_ckpt) + 1:]: v for k, v in state_dict.items() - if k.startswith(f'{prefix_in_ckpt}.')} - if not strict: - cur_model_state_dict = cur_model.state_dict() - unmatched_keys = [] - for key, param in state_dict.items(): - if key in cur_model_state_dict: - new_param = cur_model_state_dict[key] - if new_param.shape != param.shape: - unmatched_keys.append(key) - print("| Unmatched keys: ", key, new_param.shape, param.shape) - for key in unmatched_keys: - del state_dict[key] - cur_model.load_state_dict(state_dict, strict=strict) - print(f"| load '{prefix_in_ckpt}' from '{checkpoint_path}'.") - else: - e_msg = f"| ckpt not found in {base_dir}." - if force: - assert False, e_msg - else: - print(e_msg) - - -def remove_padding(x, padding_idx=0): - if x is None: - return None - assert len(x.shape) in [1, 2] - if len(x.shape) == 2: # [T, H] - return x[np.abs(x).sum(-1) != padding_idx] - elif len(x.shape) == 1: # [T] - return x[x != padding_idx] - - -class Timer: - timer_map = {} - - def __init__(self, name, print_time=False): - if name not in Timer.timer_map: - Timer.timer_map[name] = 0 - self.name = name - self.print_time = print_time - - def __enter__(self): - self.t = time.time() - - def __exit__(self, exc_type, exc_val, exc_tb): - Timer.timer_map[self.name] += time.time() - self.t - if self.print_time: - print(self.name, Timer.timer_map[self.name]) - - -def print_arch(model, model_name='model'): - print(f"| {model_name} Arch: ", model) - num_params(model, model_name=model_name) - - -def num_params(model, print_out=True, model_name="model"): - parameters = filter(lambda p: p.requires_grad, model.parameters()) - parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000 - if print_out: - print(f'| {model_name} Trainable Parameters: %.3fM' % parameters) - return parameters diff --git a/spaces/Souranil/VAE/train.py b/spaces/Souranil/VAE/train.py deleted file mode 100644 index 24789a608aed076f9e78a6782900cbad2ec303b9..0000000000000000000000000000000000000000 --- a/spaces/Souranil/VAE/train.py +++ /dev/null @@ -1,37 +0,0 @@ -from pytorch_lightning import Trainer -from models import vae_models -from config import config -from pytorch_lightning.callbacks import LearningRateMonitor -from pytorch_lightning.loggers import TensorBoardLogger -import os -os.environ['KMP_DUPLICATE_LIB_OK']='True' - - -def make_model(config): - model_type = config.model_type - model_config = config.model_config - - if model_type not in vae_models.keys(): - raise NotImplementedError("Model Architecture not implemented") - else: - return vae_models[model_type](**model_config.dict()) - - -if __name__ == "__main__": - model = make_model(config) - train_config = config.train_config - logger = TensorBoardLogger(**config.log_config.dict()) - trainer = Trainer(**train_config.dict(), logger=logger, - callbacks=LearningRateMonitor()) - if train_config.auto_lr_find: - lr_finder = trainer.tuner.lr_find(model) - new_lr = lr_finder.suggestion() - print("Learning Rate Chosen:", new_lr) - model.lr = new_lr - trainer.fit(model) - else: - trainer.fit(model) - if not os.path.isdir("./saved_models"): - os.mkdir("./saved_models") - trainer.save_checkpoint( - f"saved_models/{config.model_type}_alpha_{config.model_config.alpha}_dim_{config.model_config.hidden_size}.ckpt") diff --git a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/models/audiogen.py b/spaces/SuYuanS/AudioCraft_Plus/audiocraft/models/audiogen.py deleted file mode 100644 index 6adefb97401c10422c9711d222c0857f5593dceb..0000000000000000000000000000000000000000 --- a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/models/audiogen.py +++ /dev/null @@ -1,276 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Main model for using AudioGen. This will combine all the required components -and provide easy access to the generation API. -""" - -import typing as tp - -import torch - -from .encodec import CompressionModel -from .lm import LMModel -from .builders import get_debug_compression_model, get_debug_lm_model -from .loaders import load_compression_model, load_lm_model -from ..data.audio_utils import convert_audio -from ..modules.conditioners import ConditioningAttributes -from ..utils.autocast import TorchAutocast - - -class AudioGen: - """AudioGen main model with convenient generation API. - - Args: - name (str): name of the model. - compression_model (CompressionModel): Compression model - used to map audio to invertible discrete representations. - lm (LMModel): Language model over discrete representations. - max_duration (float, optional): maximum duration the model can produce, - otherwise, inferred from the training params. - """ - def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel, - max_duration: tp.Optional[float] = None): - self.name = name - self.compression_model = compression_model - self.lm = lm - if max_duration is None: - if hasattr(lm, 'cfg'): - max_duration = lm.cfg.dataset.segment_duration # type: ignore - else: - raise ValueError("You must provide max_duration when building directly AudioGen") - assert max_duration is not None - self.max_duration: float = max_duration - self.device = next(iter(lm.parameters())).device - self.generation_params: dict = {} - self.set_generation_params(duration=5) # 5 seconds by default - self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None - if self.device.type == 'cpu': - self.autocast = TorchAutocast(enabled=False) - else: - self.autocast = TorchAutocast( - enabled=True, device_type=self.device.type, dtype=torch.float16) - - @property - def frame_rate(self) -> float: - """Roughly the number of AR steps per seconds.""" - return self.compression_model.frame_rate - - @property - def sample_rate(self) -> int: - """Sample rate of the generated audio.""" - return self.compression_model.sample_rate - - @property - def audio_channels(self) -> int: - """Audio channels of the generated audio.""" - return self.compression_model.channels - - @staticmethod - def get_pretrained(name: str = 'facebook/audiogen-medium', device=None): - """Return pretrained model, we provide a single model for now: - - facebook/audiogen-medium (1.5B), text to sound, - # see: https://huggingface.co/facebook/audiogen-medium - """ - if device is None: - if torch.cuda.device_count(): - device = 'cuda' - else: - device = 'cpu' - - if name == 'debug': - # used only for unit tests - compression_model = get_debug_compression_model(device, sample_rate=16000) - lm = get_debug_lm_model(device) - return AudioGen(name, compression_model, lm, max_duration=10) - - compression_model = load_compression_model(name, device=device) - lm = load_lm_model(name, device=device) - assert 'self_wav' not in lm.condition_provider.conditioners, \ - "AudioGen do not support waveform conditioning for now" - return AudioGen(name, compression_model, lm) - - def set_generation_params(self, use_sampling: bool = True, top_k: int = 250, - top_p: float = 0.0, temperature: float = 1.0, - duration: float = 10.0, cfg_coef: float = 3.0, - two_step_cfg: bool = False, extend_stride: float = 2): - """Set the generation parameters for AudioGen. - - Args: - use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True. - top_k (int, optional): top_k used for sampling. Defaults to 250. - top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0. - temperature (float, optional): Softmax temperature parameter. Defaults to 1.0. - duration (float, optional): Duration of the generated waveform. Defaults to 10.0. - cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0. - two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance, - instead of batching together the two. This has some impact on how things - are padded but seems to have little impact in practice. - extend_stride: when doing extended generation (i.e. more than 10 seconds), by how much - should we extend the audio each time. Larger values will mean less context is - preserved, and shorter value will require extra computations. - """ - assert extend_stride < self.max_duration, "Cannot stride by more than max generation duration." - self.extend_stride = extend_stride - self.duration = duration - self.generation_params = { - 'use_sampling': use_sampling, - 'temp': temperature, - 'top_k': top_k, - 'top_p': top_p, - 'cfg_coef': cfg_coef, - 'two_step_cfg': two_step_cfg, - } - - def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None): - """Override the default progress callback.""" - self._progress_callback = progress_callback - - def generate(self, descriptions: tp.List[str], progress: bool = False) -> torch.Tensor: - """Generate samples conditioned on text. - - Args: - descriptions (list of str): A list of strings used as text conditioning. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None) - assert prompt_tokens is None - return self._generate_tokens(attributes, prompt_tokens, progress) - - def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int, - descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None, - progress: bool = False) -> torch.Tensor: - """Generate samples conditioned on audio prompts. - - Args: - prompt (torch.Tensor): A batch of waveforms used for continuation. - Prompt should be [B, C, T], or [C, T] if only one sample is generated. - prompt_sample_rate (int): Sampling rate of the given audio waveforms. - descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - if prompt.dim() == 2: - prompt = prompt[None] - if prompt.dim() != 3: - raise ValueError("prompt should have 3 dimensions: [B, C, T] (C = 1).") - prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels) - if descriptions is None: - descriptions = [None] * len(prompt) - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt) - assert prompt_tokens is not None - return self._generate_tokens(attributes, prompt_tokens, progress) - - @torch.no_grad() - def _prepare_tokens_and_attributes( - self, - descriptions: tp.Sequence[tp.Optional[str]], - prompt: tp.Optional[torch.Tensor], - ) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]: - """Prepare model inputs. - - Args: - descriptions (list of str): A list of strings used as text conditioning. - prompt (torch.Tensor): A batch of waveforms used for continuation. - """ - attributes = [ - ConditioningAttributes(text={'description': description}) - for description in descriptions] - - if prompt is not None: - if descriptions is not None: - assert len(descriptions) == len(prompt), "Prompt and nb. descriptions doesn't match" - prompt = prompt.to(self.device) - prompt_tokens, scale = self.compression_model.encode(prompt) - assert scale is None - else: - prompt_tokens = None - return attributes, prompt_tokens - - def _generate_tokens(self, attributes: tp.List[ConditioningAttributes], - prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor: - """Generate discrete audio tokens given audio prompt and/or conditions. - - Args: - attributes (list of ConditioningAttributes): Conditions used for generation (here text). - prompt_tokens (torch.Tensor, optional): Audio prompt used for continuation. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - Returns: - torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params. - """ - i = 0 - prompt_list = attributes[0].text['description'] - total_gen_len = int(self.duration * self.frame_rate) - max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate) - current_gen_offset: int = 0 - - def _progress_callback(generated_tokens: int, tokens_to_generate: int): - generated_tokens += current_gen_offset - if self._progress_callback is not None: - # Note that total_gen_len might be quite wrong depending on the - # codebook pattern used, but with delay it is almost accurate. - self._progress_callback(generated_tokens, total_gen_len) - else: - print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\r') - - if prompt_tokens is not None: - assert max_prompt_len >= prompt_tokens.shape[-1], \ - "Prompt is longer than audio to generate" - - callback = None - if progress: - callback = _progress_callback - - if self.duration <= self.max_duration: - # generate by sampling from LM, simple case. - with self.autocast: - attributes[0].text['description'] = prompt_list[0] - gen_tokens = self.lm.generate( - prompt_tokens, attributes, - callback=callback, max_gen_len=total_gen_len, **self.generation_params) - - else: - all_tokens = [] - if prompt_tokens is None: - prompt_length = 0 - else: - all_tokens.append(prompt_tokens) - prompt_length = prompt_tokens.shape[-1] - - stride_tokens = int(self.frame_rate * self.extend_stride) - - while current_gen_offset + prompt_length < total_gen_len: - time_offset = current_gen_offset / self.frame_rate - chunk_duration = min(self.duration - time_offset, self.max_duration) - max_gen_len = int(chunk_duration * self.frame_rate) - with self.autocast: - if i >= len(prompt_list): - i = len(prompt_list) - 1 - attributes[0].text['description'] = prompt_list[i] - gen_tokens = self.lm.generate( - prompt_tokens, attributes, - callback=callback, max_gen_len=max_gen_len, **self.generation_params) - i = i + 1 - if prompt_tokens is None: - all_tokens.append(gen_tokens) - else: - all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:]) - prompt_tokens = gen_tokens[:, :, stride_tokens:] - prompt_length = prompt_tokens.shape[-1] - current_gen_offset += stride_tokens - - gen_tokens = torch.cat(all_tokens, dim=-1) - - # generate audio - assert gen_tokens.dim() == 3 - with torch.no_grad(): - gen_audio = self.compression_model.decode(gen_tokens, None) - return gen_audio - - def to(self, device: str): - self.compression_model.to(device) - self.lm.to(device) - return self \ No newline at end of file diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiohttp/formdata.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiohttp/formdata.py deleted file mode 100644 index e7cd24ca9f7afb2bd31f1c653d9e15acb4fedc8b..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiohttp/formdata.py +++ /dev/null @@ -1,172 +0,0 @@ -import io -from typing import Any, Iterable, List, Optional -from urllib.parse import urlencode - -from multidict import MultiDict, MultiDictProxy - -from . import hdrs, multipart, payload -from .helpers import guess_filename -from .payload import Payload - -__all__ = ("FormData",) - - -class FormData: - """Helper class for form body generation. - - Supports multipart/form-data and application/x-www-form-urlencoded. - """ - - def __init__( - self, - fields: Iterable[Any] = (), - quote_fields: bool = True, - charset: Optional[str] = None, - ) -> None: - self._writer = multipart.MultipartWriter("form-data") - self._fields: List[Any] = [] - self._is_multipart = False - self._is_processed = False - self._quote_fields = quote_fields - self._charset = charset - - if isinstance(fields, dict): - fields = list(fields.items()) - elif not isinstance(fields, (list, tuple)): - fields = (fields,) - self.add_fields(*fields) - - @property - def is_multipart(self) -> bool: - return self._is_multipart - - def add_field( - self, - name: str, - value: Any, - *, - content_type: Optional[str] = None, - filename: Optional[str] = None, - content_transfer_encoding: Optional[str] = None, - ) -> None: - - if isinstance(value, io.IOBase): - self._is_multipart = True - elif isinstance(value, (bytes, bytearray, memoryview)): - if filename is None and content_transfer_encoding is None: - filename = name - - type_options: MultiDict[str] = MultiDict({"name": name}) - if filename is not None and not isinstance(filename, str): - raise TypeError( - "filename must be an instance of str. " "Got: %s" % filename - ) - if filename is None and isinstance(value, io.IOBase): - filename = guess_filename(value, name) - if filename is not None: - type_options["filename"] = filename - self._is_multipart = True - - headers = {} - if content_type is not None: - if not isinstance(content_type, str): - raise TypeError( - "content_type must be an instance of str. " "Got: %s" % content_type - ) - headers[hdrs.CONTENT_TYPE] = content_type - self._is_multipart = True - if content_transfer_encoding is not None: - if not isinstance(content_transfer_encoding, str): - raise TypeError( - "content_transfer_encoding must be an instance" - " of str. Got: %s" % content_transfer_encoding - ) - headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding - self._is_multipart = True - - self._fields.append((type_options, headers, value)) - - def add_fields(self, *fields: Any) -> None: - to_add = list(fields) - - while to_add: - rec = to_add.pop(0) - - if isinstance(rec, io.IOBase): - k = guess_filename(rec, "unknown") - self.add_field(k, rec) # type: ignore[arg-type] - - elif isinstance(rec, (MultiDictProxy, MultiDict)): - to_add.extend(rec.items()) - - elif isinstance(rec, (list, tuple)) and len(rec) == 2: - k, fp = rec - self.add_field(k, fp) # type: ignore[arg-type] - - else: - raise TypeError( - "Only io.IOBase, multidict and (name, file) " - "pairs allowed, use .add_field() for passing " - "more complex parameters, got {!r}".format(rec) - ) - - def _gen_form_urlencoded(self) -> payload.BytesPayload: - # form data (x-www-form-urlencoded) - data = [] - for type_options, _, value in self._fields: - data.append((type_options["name"], value)) - - charset = self._charset if self._charset is not None else "utf-8" - - if charset == "utf-8": - content_type = "application/x-www-form-urlencoded" - else: - content_type = "application/x-www-form-urlencoded; " "charset=%s" % charset - - return payload.BytesPayload( - urlencode(data, doseq=True, encoding=charset).encode(), - content_type=content_type, - ) - - def _gen_form_data(self) -> multipart.MultipartWriter: - """Encode a list of fields using the multipart/form-data MIME format""" - if self._is_processed: - raise RuntimeError("Form data has been processed already") - for dispparams, headers, value in self._fields: - try: - if hdrs.CONTENT_TYPE in headers: - part = payload.get_payload( - value, - content_type=headers[hdrs.CONTENT_TYPE], - headers=headers, - encoding=self._charset, - ) - else: - part = payload.get_payload( - value, headers=headers, encoding=self._charset - ) - except Exception as exc: - raise TypeError( - "Can not serialize value type: %r\n " - "headers: %r\n value: %r" % (type(value), headers, value) - ) from exc - - if dispparams: - part.set_content_disposition( - "form-data", quote_fields=self._quote_fields, **dispparams - ) - # FIXME cgi.FieldStorage doesn't likes body parts with - # Content-Length which were sent via chunked transfer encoding - assert part.headers is not None - part.headers.popall(hdrs.CONTENT_LENGTH, None) - - self._writer.append_payload(part) - - self._is_processed = True - return self._writer - - def __call__(self) -> Payload: - if self._is_multipart: - return self._gen_form_data() - else: - return self._gen_form_urlencoded() diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/colorama/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/colorama/__init__.py deleted file mode 100644 index 383101cdb38706c305449674044e9288b92b7d75..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/colorama/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -from .initialise import init, deinit, reinit, colorama_text, just_fix_windows_console -from .ansi import Fore, Back, Style, Cursor -from .ansitowin32 import AnsiToWin32 - -__version__ = '0.4.6' - diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_execfile.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_execfile.py deleted file mode 100644 index 28ae403512042417b13059b059018dad19a82ed1..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_execfile.py +++ /dev/null @@ -1,14 +0,0 @@ -# We must redefine it in Py3k if it's not already there -def execfile(file, glob=None, loc=None): - if glob is None: - import sys - glob = sys._getframe().f_back.f_globals - if loc is None: - loc = glob - - import tokenize - with tokenize.open(file) as stream: - contents = stream.read() - - # execute the script (note: it's important to compile first to have the filename set in debug mode) - exec(compile(contents + "\n", file, 'exec'), glob, loc) diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/core/seg/__init__.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/core/seg/__init__.py deleted file mode 100644 index 93bc129b685e4a3efca2cc891729981b2865900d..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/core/seg/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .builder import build_pixel_sampler -from .sampler import BasePixelSampler, OHEMPixelSampler - -__all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler'] diff --git a/spaces/TRI-ML/risk_biased_prediction/risk_biased/__init__.py b/spaces/TRI-ML/risk_biased_prediction/risk_biased/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/cli/spinners.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/cli/spinners.py deleted file mode 100644 index cf2b976f377c2656afb3d84add8d30b0fc280c03..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/cli/spinners.py +++ /dev/null @@ -1,159 +0,0 @@ -import contextlib -import itertools -import logging -import sys -import time -from typing import IO, Generator, Optional - -from pip._internal.utils.compat import WINDOWS -from pip._internal.utils.logging import get_indentation - -logger = logging.getLogger(__name__) - - -class SpinnerInterface: - def spin(self) -> None: - raise NotImplementedError() - - def finish(self, final_status: str) -> None: - raise NotImplementedError() - - -class InteractiveSpinner(SpinnerInterface): - def __init__( - self, - message: str, - file: Optional[IO[str]] = None, - spin_chars: str = "-\\|/", - # Empirically, 8 updates/second looks nice - min_update_interval_seconds: float = 0.125, - ): - self._message = message - if file is None: - file = sys.stdout - self._file = file - self._rate_limiter = RateLimiter(min_update_interval_seconds) - self._finished = False - - self._spin_cycle = itertools.cycle(spin_chars) - - self._file.write(" " * get_indentation() + self._message + " ... ") - self._width = 0 - - def _write(self, status: str) -> None: - assert not self._finished - # Erase what we wrote before by backspacing to the beginning, writing - # spaces to overwrite the old text, and then backspacing again - backup = "\b" * self._width - self._file.write(backup + " " * self._width + backup) - # Now we have a blank slate to add our status - self._file.write(status) - self._width = len(status) - self._file.flush() - self._rate_limiter.reset() - - def spin(self) -> None: - if self._finished: - return - if not self._rate_limiter.ready(): - return - self._write(next(self._spin_cycle)) - - def finish(self, final_status: str) -> None: - if self._finished: - return - self._write(final_status) - self._file.write("\n") - self._file.flush() - self._finished = True - - -# Used for dumb terminals, non-interactive installs (no tty), etc. -# We still print updates occasionally (once every 60 seconds by default) to -# act as a keep-alive for systems like Travis-CI that take lack-of-output as -# an indication that a task has frozen. -class NonInteractiveSpinner(SpinnerInterface): - def __init__(self, message: str, min_update_interval_seconds: float = 60.0) -> None: - self._message = message - self._finished = False - self._rate_limiter = RateLimiter(min_update_interval_seconds) - self._update("started") - - def _update(self, status: str) -> None: - assert not self._finished - self._rate_limiter.reset() - logger.info("%s: %s", self._message, status) - - def spin(self) -> None: - if self._finished: - return - if not self._rate_limiter.ready(): - return - self._update("still running...") - - def finish(self, final_status: str) -> None: - if self._finished: - return - self._update(f"finished with status '{final_status}'") - self._finished = True - - -class RateLimiter: - def __init__(self, min_update_interval_seconds: float) -> None: - self._min_update_interval_seconds = min_update_interval_seconds - self._last_update: float = 0 - - def ready(self) -> bool: - now = time.time() - delta = now - self._last_update - return delta >= self._min_update_interval_seconds - - def reset(self) -> None: - self._last_update = time.time() - - -@contextlib.contextmanager -def open_spinner(message: str) -> Generator[SpinnerInterface, None, None]: - # Interactive spinner goes directly to sys.stdout rather than being routed - # through the logging system, but it acts like it has level INFO, - # i.e. it's only displayed if we're at level INFO or better. - # Non-interactive spinner goes through the logging system, so it is always - # in sync with logging configuration. - if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO: - spinner: SpinnerInterface = InteractiveSpinner(message) - else: - spinner = NonInteractiveSpinner(message) - try: - with hidden_cursor(sys.stdout): - yield spinner - except KeyboardInterrupt: - spinner.finish("canceled") - raise - except Exception: - spinner.finish("error") - raise - else: - spinner.finish("done") - - -HIDE_CURSOR = "\x1b[?25l" -SHOW_CURSOR = "\x1b[?25h" - - -@contextlib.contextmanager -def hidden_cursor(file: IO[str]) -> Generator[None, None, None]: - # The Windows terminal does not support the hide/show cursor ANSI codes, - # even via colorama. So don't even try. - if WINDOWS: - yield - # We don't want to clutter the output with control characters if we're - # writing to a file, or if the user is running with --quiet. - # See https://github.com/pypa/pip/issues/3418 - elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO: - yield - else: - file.write(HIDE_CURSOR) - try: - yield - finally: - file.write(SHOW_CURSOR) diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/color_triplet.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/color_triplet.py deleted file mode 100644 index 02cab328251af9bfa809981aaa44933c407e2cd7..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/color_triplet.py +++ /dev/null @@ -1,38 +0,0 @@ -from typing import NamedTuple, Tuple - - -class ColorTriplet(NamedTuple): - """The red, green, and blue components of a color.""" - - red: int - """Red component in 0 to 255 range.""" - green: int - """Green component in 0 to 255 range.""" - blue: int - """Blue component in 0 to 255 range.""" - - @property - def hex(self) -> str: - """get the color triplet in CSS style.""" - red, green, blue = self - return f"#{red:02x}{green:02x}{blue:02x}" - - @property - def rgb(self) -> str: - """The color in RGB format. - - Returns: - str: An rgb color, e.g. ``"rgb(100,23,255)"``. - """ - red, green, blue = self - return f"rgb({red},{green},{blue})" - - @property - def normalized(self) -> Tuple[float, float, float]: - """Convert components into floats between 0 and 1. - - Returns: - Tuple[float, float, float]: A tuple of three normalized colour components. - """ - red, green, blue = self - return red / 255.0, green / 255.0, blue / 255.0 diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/config/defaults.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/config/defaults.py deleted file mode 100644 index 848486dfe91a62559e6ae35120a4dac26d4bd66d..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/config/defaults.py +++ /dev/null @@ -1,635 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from .config import CfgNode as CN - -# NOTE: given the new config system -# (https://detectron2.readthedocs.io/en/latest/tutorials/lazyconfigs.html), -# we will stop adding new functionalities to default CfgNode. - -# ----------------------------------------------------------------------------- -# Convention about Training / Test specific parameters -# ----------------------------------------------------------------------------- -# Whenever an argument can be either used for training or for testing, the -# corresponding name will be post-fixed by a _TRAIN for a training parameter, -# or _TEST for a test-specific parameter. -# For example, the number of images during training will be -# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be -# IMAGES_PER_BATCH_TEST - -# ----------------------------------------------------------------------------- -# Config definition -# ----------------------------------------------------------------------------- - -_C = CN() - -# The version number, to upgrade from old configs to new ones if any -# changes happen. It's recommended to keep a VERSION in your config file. -_C.VERSION = 2 - -_C.MODEL = CN() -_C.MODEL.LOAD_PROPOSALS = False -_C.MODEL.MASK_ON = False -_C.MODEL.KEYPOINT_ON = False -_C.MODEL.DEVICE = "cuda" -_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN" - -# Path (a file path, or URL like detectron2://.., https://..) to a checkpoint file -# to be loaded to the model. You can find available models in the model zoo. -_C.MODEL.WEIGHTS = "" - -# Values to be used for image normalization (BGR order, since INPUT.FORMAT defaults to BGR). -# To train on images of different number of channels, just set different mean & std. -# Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675] -_C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675] -# When using pre-trained models in Detectron1 or any MSRA models, -# std has been absorbed into its conv1 weights, so the std needs to be set 1. -# Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std) -_C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0] - - -# ----------------------------------------------------------------------------- -# INPUT -# ----------------------------------------------------------------------------- -_C.INPUT = CN() -# By default, {MIN,MAX}_SIZE options are used in transforms.ResizeShortestEdge. -# Please refer to ResizeShortestEdge for detailed definition. -# Size of the smallest side of the image during training -_C.INPUT.MIN_SIZE_TRAIN = (800,) -# Sample size of smallest side by choice or random selection from range give by -# INPUT.MIN_SIZE_TRAIN -_C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice" -# Maximum size of the side of the image during training -_C.INPUT.MAX_SIZE_TRAIN = 1333 -# Size of the smallest side of the image during testing. Set to zero to disable resize in testing. -_C.INPUT.MIN_SIZE_TEST = 800 -# Maximum size of the side of the image during testing -_C.INPUT.MAX_SIZE_TEST = 1333 -# Mode for flipping images used in data augmentation during training -# choose one of ["horizontal, "vertical", "none"] -_C.INPUT.RANDOM_FLIP = "horizontal" - -# `True` if cropping is used for data augmentation during training -_C.INPUT.CROP = CN({"ENABLED": False}) -# Cropping type. See documentation of `detectron2.data.transforms.RandomCrop` for explanation. -_C.INPUT.CROP.TYPE = "relative_range" -# Size of crop in range (0, 1] if CROP.TYPE is "relative" or "relative_range" and in number of -# pixels if CROP.TYPE is "absolute" -_C.INPUT.CROP.SIZE = [0.9, 0.9] - - -# Whether the model needs RGB, YUV, HSV etc. -# Should be one of the modes defined here, as we use PIL to read the image: -# https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes -# with BGR being the one exception. One can set image format to BGR, we will -# internally use RGB for conversion and flip the channels over -_C.INPUT.FORMAT = "BGR" -# The ground truth mask format that the model will use. -# Mask R-CNN supports either "polygon" or "bitmask" as ground truth. -_C.INPUT.MASK_FORMAT = "polygon" # alternative: "bitmask" - - -# ----------------------------------------------------------------------------- -# Dataset -# ----------------------------------------------------------------------------- -_C.DATASETS = CN() -# List of the dataset names for training. Must be registered in DatasetCatalog -# Samples from these datasets will be merged and used as one dataset. -_C.DATASETS.TRAIN = () -# List of the pre-computed proposal files for training, which must be consistent -# with datasets listed in DATASETS.TRAIN. -_C.DATASETS.PROPOSAL_FILES_TRAIN = () -# Number of top scoring precomputed proposals to keep for training -_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000 -# List of the dataset names for testing. Must be registered in DatasetCatalog -_C.DATASETS.TEST = () -# List of the pre-computed proposal files for test, which must be consistent -# with datasets listed in DATASETS.TEST. -_C.DATASETS.PROPOSAL_FILES_TEST = () -# Number of top scoring precomputed proposals to keep for test -_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000 - -# ----------------------------------------------------------------------------- -# DataLoader -# ----------------------------------------------------------------------------- -_C.DATALOADER = CN() -# Number of data loading threads -_C.DATALOADER.NUM_WORKERS = 4 -# If True, each batch should contain only images for which the aspect ratio -# is compatible. This groups portrait images together, and landscape images -# are not batched with portrait images. -_C.DATALOADER.ASPECT_RATIO_GROUPING = True -# Options: TrainingSampler, RepeatFactorTrainingSampler -_C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler" -# Repeat threshold for RepeatFactorTrainingSampler -_C.DATALOADER.REPEAT_THRESHOLD = 0.0 -# Tf True, when working on datasets that have instance annotations, the -# training dataloader will filter out images without associated annotations -_C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True - -# ---------------------------------------------------------------------------- # -# Backbone options -# ---------------------------------------------------------------------------- # -_C.MODEL.BACKBONE = CN() - -_C.MODEL.BACKBONE.NAME = "build_resnet_backbone" -# Freeze the first several stages so they are not trained. -# There are 5 stages in ResNet. The first is a convolution, and the following -# stages are each group of residual blocks. -_C.MODEL.BACKBONE.FREEZE_AT = 2 - - -# ---------------------------------------------------------------------------- # -# FPN options -# ---------------------------------------------------------------------------- # -_C.MODEL.FPN = CN() -# Names of the input feature maps to be used by FPN -# They must have contiguous power of 2 strides -# e.g., ["res2", "res3", "res4", "res5"] -_C.MODEL.FPN.IN_FEATURES = [] -_C.MODEL.FPN.OUT_CHANNELS = 256 - -# Options: "" (no norm), "GN" -_C.MODEL.FPN.NORM = "" - -# Types for fusing the FPN top-down and lateral features. Can be either "sum" or "avg" -_C.MODEL.FPN.FUSE_TYPE = "sum" - - -# ---------------------------------------------------------------------------- # -# Proposal generator options -# ---------------------------------------------------------------------------- # -_C.MODEL.PROPOSAL_GENERATOR = CN() -# Current proposal generators include "RPN", "RRPN" and "PrecomputedProposals" -_C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN" -# Proposal height and width both need to be greater than MIN_SIZE -# (a the scale used during training or inference) -_C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0 - - -# ---------------------------------------------------------------------------- # -# Anchor generator options -# ---------------------------------------------------------------------------- # -_C.MODEL.ANCHOR_GENERATOR = CN() -# The generator can be any name in the ANCHOR_GENERATOR registry -_C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator" -# Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input. -# Format: list[list[float]]. SIZES[i] specifies the list of sizes to use for -# IN_FEATURES[i]; len(SIZES) must be equal to len(IN_FEATURES) or 1. -# When len(SIZES) == 1, SIZES[0] is used for all IN_FEATURES. -_C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]] -# Anchor aspect ratios. For each area given in `SIZES`, anchors with different aspect -# ratios are generated by an anchor generator. -# Format: list[list[float]]. ASPECT_RATIOS[i] specifies the list of aspect ratios (H/W) -# to use for IN_FEATURES[i]; len(ASPECT_RATIOS) == len(IN_FEATURES) must be true, -# or len(ASPECT_RATIOS) == 1 is true and aspect ratio list ASPECT_RATIOS[0] is used -# for all IN_FEATURES. -_C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]] -# Anchor angles. -# list[list[float]], the angle in degrees, for each input feature map. -# ANGLES[i] specifies the list of angles for IN_FEATURES[i]. -_C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]] -# Relative offset between the center of the first anchor and the top-left corner of the image -# Value has to be in [0, 1). Recommend to use 0.5, which means half stride. -# The value is not expected to affect model accuracy. -_C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0 - -# ---------------------------------------------------------------------------- # -# RPN options -# ---------------------------------------------------------------------------- # -_C.MODEL.RPN = CN() -_C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" # used by RPN_HEAD_REGISTRY - -# Names of the input feature maps to be used by RPN -# e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN -_C.MODEL.RPN.IN_FEATURES = ["res4"] -# Remove RPN anchors that go outside the image by BOUNDARY_THRESH pixels -# Set to -1 or a large value, e.g. 100000, to disable pruning anchors -_C.MODEL.RPN.BOUNDARY_THRESH = -1 -# IOU overlap ratios [BG_IOU_THRESHOLD, FG_IOU_THRESHOLD] -# Minimum overlap required between an anchor and ground-truth box for the -# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD -# ==> positive RPN example: 1) -# Maximum overlap allowed between an anchor and ground-truth box for the -# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD -# ==> negative RPN example: 0) -# Anchors with overlap in between (BG_IOU_THRESHOLD <= IoU < FG_IOU_THRESHOLD) -# are ignored (-1) -_C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7] -_C.MODEL.RPN.IOU_LABELS = [0, -1, 1] -# Number of regions per image used to train RPN -_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256 -# Target fraction of foreground (positive) examples per RPN minibatch -_C.MODEL.RPN.POSITIVE_FRACTION = 0.5 -# Options are: "smooth_l1", "giou", "diou", "ciou" -_C.MODEL.RPN.BBOX_REG_LOSS_TYPE = "smooth_l1" -_C.MODEL.RPN.BBOX_REG_LOSS_WEIGHT = 1.0 -# Weights on (dx, dy, dw, dh) for normalizing RPN anchor regression targets -_C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) -# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1. -_C.MODEL.RPN.SMOOTH_L1_BETA = 0.0 -_C.MODEL.RPN.LOSS_WEIGHT = 1.0 -# Number of top scoring RPN proposals to keep before applying NMS -# When FPN is used, this is *per FPN level* (not total) -_C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000 -_C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000 -# Number of top scoring RPN proposals to keep after applying NMS -# When FPN is used, this limit is applied per level and then again to the union -# of proposals from all levels -# NOTE: When FPN is used, the meaning of this config is different from Detectron1. -# It means per-batch topk in Detectron1, but per-image topk here. -# See the "find_top_rpn_proposals" function for details. -_C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000 -_C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000 -# NMS threshold used on RPN proposals -_C.MODEL.RPN.NMS_THRESH = 0.7 -# Set this to -1 to use the same number of output channels as input channels. -_C.MODEL.RPN.CONV_DIMS = [-1] - -# ---------------------------------------------------------------------------- # -# ROI HEADS options -# ---------------------------------------------------------------------------- # -_C.MODEL.ROI_HEADS = CN() -_C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads" -# Number of foreground classes -_C.MODEL.ROI_HEADS.NUM_CLASSES = 80 -# Names of the input feature maps to be used by ROI heads -# Currently all heads (box, mask, ...) use the same input feature map list -# e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN -_C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"] -# IOU overlap ratios [IOU_THRESHOLD] -# Overlap threshold for an RoI to be considered background (if < IOU_THRESHOLD) -# Overlap threshold for an RoI to be considered foreground (if >= IOU_THRESHOLD) -_C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5] -_C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1] -# RoI minibatch size *per image* (number of regions of interest [ROIs]) during training -# Total number of RoIs per training minibatch = -# ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH -# E.g., a common configuration is: 512 * 16 = 8192 -_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 -# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0) -_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25 - -# Only used on test mode - -# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to -# balance obtaining high recall with not having too many low precision -# detections that will slow down inference post processing steps (like NMS) -# A default threshold of 0.0 increases AP by ~0.2-0.3 but significantly slows down -# inference. -_C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05 -# Overlap threshold used for non-maximum suppression (suppress boxes with -# IoU >= this threshold) -_C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5 -# If True, augment proposals with ground-truth boxes before sampling proposals to -# train ROI heads. -_C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True - -# ---------------------------------------------------------------------------- # -# Box Head -# ---------------------------------------------------------------------------- # -_C.MODEL.ROI_BOX_HEAD = CN() -# C4 don't use head name option -# Options for non-C4 models: FastRCNNConvFCHead, -_C.MODEL.ROI_BOX_HEAD.NAME = "" -# Options are: "smooth_l1", "giou", "diou", "ciou" -_C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE = "smooth_l1" -# The final scaling coefficient on the box regression loss, used to balance the magnitude of its -# gradients with other losses in the model. See also `MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT`. -_C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT = 1.0 -# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets -# These are empirically chosen to approximately lead to unit variance targets -_C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0) -# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1. -_C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0 -_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14 -_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0 -# Type of pooling operation applied to the incoming feature map for each RoI -_C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2" - -_C.MODEL.ROI_BOX_HEAD.NUM_FC = 0 -# Hidden layer dimension for FC layers in the RoI box head -_C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024 -_C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0 -# Channel dimension for Conv layers in the RoI box head -_C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256 -# Normalization method for the convolution layers. -# Options: "" (no norm), "GN", "SyncBN". -_C.MODEL.ROI_BOX_HEAD.NORM = "" -# Whether to use class agnostic for bbox regression -_C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False -# If true, RoI heads use bounding boxes predicted by the box head rather than proposal boxes. -_C.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES = False - -# ---------------------------------------------------------------------------- # -# Cascaded Box Head -# ---------------------------------------------------------------------------- # -_C.MODEL.ROI_BOX_CASCADE_HEAD = CN() -# The number of cascade stages is implicitly defined by the length of the following two configs. -_C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = ( - (10.0, 10.0, 5.0, 5.0), - (20.0, 20.0, 10.0, 10.0), - (30.0, 30.0, 15.0, 15.0), -) -_C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7) - - -# ---------------------------------------------------------------------------- # -# Mask Head -# ---------------------------------------------------------------------------- # -_C.MODEL.ROI_MASK_HEAD = CN() -_C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead" -_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14 -_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0 -_C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 # The number of convs in the mask head -_C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256 -# Normalization method for the convolution layers. -# Options: "" (no norm), "GN", "SyncBN". -_C.MODEL.ROI_MASK_HEAD.NORM = "" -# Whether to use class agnostic for mask prediction -_C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False -# Type of pooling operation applied to the incoming feature map for each RoI -_C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2" - - -# ---------------------------------------------------------------------------- # -# Keypoint Head -# ---------------------------------------------------------------------------- # -_C.MODEL.ROI_KEYPOINT_HEAD = CN() -_C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead" -_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14 -_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0 -_C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8)) -_C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 # 17 is the number of keypoints in COCO. - -# Images with too few (or no) keypoints are excluded from training. -_C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1 -# Normalize by the total number of visible keypoints in the minibatch if True. -# Otherwise, normalize by the total number of keypoints that could ever exist -# in the minibatch. -# The keypoint softmax loss is only calculated on visible keypoints. -# Since the number of visible keypoints can vary significantly between -# minibatches, this has the effect of up-weighting the importance of -# minibatches with few visible keypoints. (Imagine the extreme case of -# only one visible keypoint versus N: in the case of N, each one -# contributes 1/N to the gradient compared to the single keypoint -# determining the gradient direction). Instead, we can normalize the -# loss by the total number of keypoints, if it were the case that all -# keypoints were visible in a full minibatch. (Returning to the example, -# this means that the one visible keypoint contributes as much as each -# of the N keypoints.) -_C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True -# Multi-task loss weight to use for keypoints -# Recommended values: -# - use 1.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is True -# - use 4.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is False -_C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0 -# Type of pooling operation applied to the incoming feature map for each RoI -_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2" - -# ---------------------------------------------------------------------------- # -# Semantic Segmentation Head -# ---------------------------------------------------------------------------- # -_C.MODEL.SEM_SEG_HEAD = CN() -_C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead" -_C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"] -# Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for -# the correposnding pixel. -_C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255 -# Number of classes in the semantic segmentation head -_C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54 -# Number of channels in the 3x3 convs inside semantic-FPN heads. -_C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128 -# Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride. -_C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4 -# Normalization method for the convolution layers. Options: "" (no norm), "GN". -_C.MODEL.SEM_SEG_HEAD.NORM = "GN" -_C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0 - -_C.MODEL.PANOPTIC_FPN = CN() -# Scaling of all losses from instance detection / segmentation head. -_C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0 - -# options when combining instance & semantic segmentation outputs -_C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True}) # "COMBINE.ENABLED" is deprecated & not used -_C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5 -_C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096 -_C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5 - - -# ---------------------------------------------------------------------------- # -# RetinaNet Head -# ---------------------------------------------------------------------------- # -_C.MODEL.RETINANET = CN() - -# This is the number of foreground classes. -_C.MODEL.RETINANET.NUM_CLASSES = 80 - -_C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"] - -# Convolutions to use in the cls and bbox tower -# NOTE: this doesn't include the last conv for logits -_C.MODEL.RETINANET.NUM_CONVS = 4 - -# IoU overlap ratio [bg, fg] for labeling anchors. -# Anchors with < bg are labeled negative (0) -# Anchors with >= bg and < fg are ignored (-1) -# Anchors with >= fg are labeled positive (1) -_C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5] -_C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1] - -# Prior prob for rare case (i.e. foreground) at the beginning of training. -# This is used to set the bias for the logits layer of the classifier subnet. -# This improves training stability in the case of heavy class imbalance. -_C.MODEL.RETINANET.PRIOR_PROB = 0.01 - -# Inference cls score threshold, only anchors with score > INFERENCE_TH are -# considered for inference (to improve speed) -_C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05 -# Select topk candidates before NMS -_C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000 -_C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5 - -# Weights on (dx, dy, dw, dh) for normalizing Retinanet anchor regression targets -_C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) - -# Loss parameters -_C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0 -_C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25 -_C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1 -# Options are: "smooth_l1", "giou", "diou", "ciou" -_C.MODEL.RETINANET.BBOX_REG_LOSS_TYPE = "smooth_l1" - -# One of BN, SyncBN, FrozenBN, GN -# Only supports GN until unshared norm is implemented -_C.MODEL.RETINANET.NORM = "" - - -# ---------------------------------------------------------------------------- # -# ResNe[X]t options (ResNets = {ResNet, ResNeXt} -# Note that parts of a resnet may be used for both the backbone and the head -# These options apply to both -# ---------------------------------------------------------------------------- # -_C.MODEL.RESNETS = CN() - -_C.MODEL.RESNETS.DEPTH = 50 -_C.MODEL.RESNETS.OUT_FEATURES = ["res4"] # res4 for C4 backbone, res2..5 for FPN backbone - -# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt -_C.MODEL.RESNETS.NUM_GROUPS = 1 - -# Options: FrozenBN, GN, "SyncBN", "BN" -_C.MODEL.RESNETS.NORM = "FrozenBN" - -# Baseline width of each group. -# Scaling this parameters will scale the width of all bottleneck layers. -_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64 - -# Place the stride 2 conv on the 1x1 filter -# Use True only for the original MSRA ResNet; use False for C2 and Torch models -_C.MODEL.RESNETS.STRIDE_IN_1X1 = True - -# Apply dilation in stage "res5" -_C.MODEL.RESNETS.RES5_DILATION = 1 - -# Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet -# For R18 and R34, this needs to be set to 64 -_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256 -_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64 - -# Apply Deformable Convolution in stages -# Specify if apply deform_conv on Res2, Res3, Res4, Res5 -_C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False] -# Use True to use modulated deform_conv (DeformableV2, https://arxiv.org/abs/1811.11168); -# Use False for DeformableV1. -_C.MODEL.RESNETS.DEFORM_MODULATED = False -# Number of groups in deformable conv. -_C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1 - - -# ---------------------------------------------------------------------------- # -# Solver -# ---------------------------------------------------------------------------- # -_C.SOLVER = CN() - -# Options: WarmupMultiStepLR, WarmupCosineLR. -# See detectron2/solver/build.py for definition. -_C.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR" - -_C.SOLVER.MAX_ITER = 40000 - -_C.SOLVER.BASE_LR = 0.001 - -_C.SOLVER.MOMENTUM = 0.9 - -_C.SOLVER.NESTEROV = False - -_C.SOLVER.WEIGHT_DECAY = 0.0001 -# The weight decay that's applied to parameters of normalization layers -# (typically the affine transformation) -_C.SOLVER.WEIGHT_DECAY_NORM = 0.0 - -_C.SOLVER.GAMMA = 0.1 -# The iteration number to decrease learning rate by GAMMA. -_C.SOLVER.STEPS = (30000,) - -_C.SOLVER.WARMUP_FACTOR = 1.0 / 1000 -_C.SOLVER.WARMUP_ITERS = 1000 -_C.SOLVER.WARMUP_METHOD = "linear" - -# Save a checkpoint after every this number of iterations -_C.SOLVER.CHECKPOINT_PERIOD = 5000 - -# Number of images per batch across all machines. This is also the number -# of training images per step (i.e. per iteration). If we use 16 GPUs -# and IMS_PER_BATCH = 32, each GPU will see 2 images per batch. -# May be adjusted automatically if REFERENCE_WORLD_SIZE is set. -_C.SOLVER.IMS_PER_BATCH = 16 - -# The reference number of workers (GPUs) this config is meant to train with. -# It takes no effect when set to 0. -# With a non-zero value, it will be used by DefaultTrainer to compute a desired -# per-worker batch size, and then scale the other related configs (total batch size, -# learning rate, etc) to match the per-worker batch size. -# See documentation of `DefaultTrainer.auto_scale_workers` for details: -_C.SOLVER.REFERENCE_WORLD_SIZE = 0 - -# Detectron v1 (and previous detection code) used a 2x higher LR and 0 WD for -# biases. This is not useful (at least for recent models). You should avoid -# changing these and they exist only to reproduce Detectron v1 training if -# desired. -_C.SOLVER.BIAS_LR_FACTOR = 1.0 -_C.SOLVER.WEIGHT_DECAY_BIAS = None # None means following WEIGHT_DECAY - -# Gradient clipping -_C.SOLVER.CLIP_GRADIENTS = CN({"ENABLED": False}) -# Type of gradient clipping, currently 2 values are supported: -# - "value": the absolute values of elements of each gradients are clipped -# - "norm": the norm of the gradient for each parameter is clipped thus -# affecting all elements in the parameter -_C.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "value" -# Maximum absolute value used for clipping gradients -_C.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 1.0 -# Floating point number p for L-p norm to be used with the "norm" -# gradient clipping type; for L-inf, please specify .inf -_C.SOLVER.CLIP_GRADIENTS.NORM_TYPE = 2.0 - -# Enable automatic mixed precision for training -# Note that this does not change model's inference behavior. -# To use AMP in inference, run inference under autocast() -_C.SOLVER.AMP = CN({"ENABLED": False}) - -# ---------------------------------------------------------------------------- # -# Specific test options -# ---------------------------------------------------------------------------- # -_C.TEST = CN() -# For end-to-end tests to verify the expected accuracy. -# Each item is [task, metric, value, tolerance] -# e.g.: [['bbox', 'AP', 38.5, 0.2]] -_C.TEST.EXPECTED_RESULTS = [] -# The period (in terms of steps) to evaluate the model during training. -# Set to 0 to disable. -_C.TEST.EVAL_PERIOD = 0 -# The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval -# When empty, it will use the defaults in COCO. -# Otherwise it should be a list[float] with the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. -_C.TEST.KEYPOINT_OKS_SIGMAS = [] -# Maximum number of detections to return per image during inference (100 is -# based on the limit established for the COCO dataset). -_C.TEST.DETECTIONS_PER_IMAGE = 100 - -_C.TEST.AUG = CN({"ENABLED": False}) -_C.TEST.AUG.MIN_SIZES = (400, 500, 600, 700, 800, 900, 1000, 1100, 1200) -_C.TEST.AUG.MAX_SIZE = 4000 -_C.TEST.AUG.FLIP = True - -_C.TEST.PRECISE_BN = CN({"ENABLED": False}) -_C.TEST.PRECISE_BN.NUM_ITER = 200 - -# ---------------------------------------------------------------------------- # -# Misc options -# ---------------------------------------------------------------------------- # -# Directory where output files are written -_C.OUTPUT_DIR = "./output" -# Set seed to negative to fully randomize everything. -# Set seed to positive to use a fixed seed. Note that a fixed seed increases -# reproducibility but does not guarantee fully deterministic behavior. -# Disabling all parallelism further increases reproducibility. -_C.SEED = -1 -# Benchmark different cudnn algorithms. -# If input images have very different sizes, this option will have large overhead -# for about 10k iterations. It usually hurts total time, but can benefit for certain models. -# If input images have the same or similar sizes, benchmark is often helpful. -_C.CUDNN_BENCHMARK = False -# The period (in terms of steps) for minibatch visualization at train time. -# Set to 0 to disable. -_C.VIS_PERIOD = 0 - -# global config is for quick hack purposes. -# You can set them in command line or config files, -# and access it with: -# -# from detectron2.config import global_cfg -# print(global_cfg.HACK) -# -# Do not commit any configs into it. -_C.GLOBAL = CN() -_C.GLOBAL.HACK = 1.0 diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h deleted file mode 100644 index b54a5dde2ca11a74d29c4d8adb7fe1634f5baf9c..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -#pragma once - -#include -#include - -#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1 -// Designates functions callable from the host (CPU) and the device (GPU) -#define HOST_DEVICE __host__ __device__ -#define HOST_DEVICE_INLINE HOST_DEVICE __forceinline__ -#else -#include -#define HOST_DEVICE -#define HOST_DEVICE_INLINE HOST_DEVICE inline -#endif - -namespace detectron2 { - -namespace { - -template -struct RotatedBox { - T x_ctr, y_ctr, w, h, a; -}; - -template -struct Point { - T x, y; - HOST_DEVICE_INLINE Point(const T& px = 0, const T& py = 0) : x(px), y(py) {} - HOST_DEVICE_INLINE Point operator+(const Point& p) const { - return Point(x + p.x, y + p.y); - } - HOST_DEVICE_INLINE Point& operator+=(const Point& p) { - x += p.x; - y += p.y; - return *this; - } - HOST_DEVICE_INLINE Point operator-(const Point& p) const { - return Point(x - p.x, y - p.y); - } - HOST_DEVICE_INLINE Point operator*(const T coeff) const { - return Point(x * coeff, y * coeff); - } -}; - -template -HOST_DEVICE_INLINE T dot_2d(const Point& A, const Point& B) { - return A.x * B.x + A.y * B.y; -} - -// R: result type. can be different from input type -template -HOST_DEVICE_INLINE R cross_2d(const Point& A, const Point& B) { - return static_cast(A.x) * static_cast(B.y) - - static_cast(B.x) * static_cast(A.y); -} - -template -HOST_DEVICE_INLINE void get_rotated_vertices( - const RotatedBox& box, - Point (&pts)[4]) { - // M_PI / 180. == 0.01745329251 - double theta = box.a * 0.01745329251; - T cosTheta2 = (T)cos(theta) * 0.5f; - T sinTheta2 = (T)sin(theta) * 0.5f; - - // y: top --> down; x: left --> right - pts[0].x = box.x_ctr + sinTheta2 * box.h + cosTheta2 * box.w; - pts[0].y = box.y_ctr + cosTheta2 * box.h - sinTheta2 * box.w; - pts[1].x = box.x_ctr - sinTheta2 * box.h + cosTheta2 * box.w; - pts[1].y = box.y_ctr - cosTheta2 * box.h - sinTheta2 * box.w; - pts[2].x = 2 * box.x_ctr - pts[0].x; - pts[2].y = 2 * box.y_ctr - pts[0].y; - pts[3].x = 2 * box.x_ctr - pts[1].x; - pts[3].y = 2 * box.y_ctr - pts[1].y; -} - -template -HOST_DEVICE_INLINE int get_intersection_points( - const Point (&pts1)[4], - const Point (&pts2)[4], - Point (&intersections)[24]) { - // Line vector - // A line from p1 to p2 is: p1 + (p2-p1)*t, t=[0,1] - Point vec1[4], vec2[4]; - for (int i = 0; i < 4; i++) { - vec1[i] = pts1[(i + 1) % 4] - pts1[i]; - vec2[i] = pts2[(i + 1) % 4] - pts2[i]; - } - - // When computing the intersection area, it doesn't hurt if we have - // more (duplicated/approximate) intersections/vertices than needed, - // while it can cause drastic difference if we miss an intersection/vertex. - // Therefore, we add an epsilon to relax the comparisons between - // the float point numbers that decide the intersection points. - double EPS = 1e-5; - - // Line test - test all line combos for intersection - int num = 0; // number of intersections - for (int i = 0; i < 4; i++) { - for (int j = 0; j < 4; j++) { - // Solve for 2x2 Ax=b - T det = cross_2d(vec2[j], vec1[i]); - - // This takes care of parallel lines - if (fabs(det) <= 1e-14) { - continue; - } - - auto vec12 = pts2[j] - pts1[i]; - - T t1 = cross_2d(vec2[j], vec12) / det; - T t2 = cross_2d(vec1[i], vec12) / det; - - if (t1 > -EPS && t1 < 1.0f + EPS && t2 > -EPS && t2 < 1.0f + EPS) { - intersections[num++] = pts1[i] + vec1[i] * t1; - } - } - } - - // Check for vertices of rect1 inside rect2 - { - const auto& AB = vec2[0]; - const auto& DA = vec2[3]; - auto ABdotAB = dot_2d(AB, AB); - auto ADdotAD = dot_2d(DA, DA); - for (int i = 0; i < 4; i++) { - // assume ABCD is the rectangle, and P is the point to be judged - // P is inside ABCD iff. P's projection on AB lies within AB - // and P's projection on AD lies within AD - - auto AP = pts1[i] - pts2[0]; - - auto APdotAB = dot_2d(AP, AB); - auto APdotAD = -dot_2d(AP, DA); - - if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) && - (APdotAD < ADdotAD + EPS)) { - intersections[num++] = pts1[i]; - } - } - } - - // Reverse the check - check for vertices of rect2 inside rect1 - { - const auto& AB = vec1[0]; - const auto& DA = vec1[3]; - auto ABdotAB = dot_2d(AB, AB); - auto ADdotAD = dot_2d(DA, DA); - for (int i = 0; i < 4; i++) { - auto AP = pts2[i] - pts1[0]; - - auto APdotAB = dot_2d(AP, AB); - auto APdotAD = -dot_2d(AP, DA); - - if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) && - (APdotAD < ADdotAD + EPS)) { - intersections[num++] = pts2[i]; - } - } - } - - return num; -} - -template -HOST_DEVICE_INLINE int convex_hull_graham( - const Point (&p)[24], - const int& num_in, - Point (&q)[24], - bool shift_to_zero = false) { - assert(num_in >= 2); - - // Step 1: - // Find point with minimum y - // if more than 1 points have the same minimum y, - // pick the one with the minimum x. - int t = 0; - for (int i = 1; i < num_in; i++) { - if (p[i].y < p[t].y || (p[i].y == p[t].y && p[i].x < p[t].x)) { - t = i; - } - } - auto& start = p[t]; // starting point - - // Step 2: - // Subtract starting point from every points (for sorting in the next step) - for (int i = 0; i < num_in; i++) { - q[i] = p[i] - start; - } - - // Swap the starting point to position 0 - auto tmp = q[0]; - q[0] = q[t]; - q[t] = tmp; - - // Step 3: - // Sort point 1 ~ num_in according to their relative cross-product values - // (essentially sorting according to angles) - // If the angles are the same, sort according to their distance to origin - T dist[24]; -#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1 - // compute distance to origin before sort, and sort them together with the - // points - for (int i = 0; i < num_in; i++) { - dist[i] = dot_2d(q[i], q[i]); - } - - // CUDA version - // In the future, we can potentially use thrust - // for sorting here to improve speed (though not guaranteed) - for (int i = 1; i < num_in - 1; i++) { - for (int j = i + 1; j < num_in; j++) { - T crossProduct = cross_2d(q[i], q[j]); - if ((crossProduct < -1e-6) || - (fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) { - auto q_tmp = q[i]; - q[i] = q[j]; - q[j] = q_tmp; - auto dist_tmp = dist[i]; - dist[i] = dist[j]; - dist[j] = dist_tmp; - } - } - } -#else - // CPU version - std::sort( - q + 1, q + num_in, [](const Point& A, const Point& B) -> bool { - T temp = cross_2d(A, B); - if (fabs(temp) < 1e-6) { - return dot_2d(A, A) < dot_2d(B, B); - } else { - return temp > 0; - } - }); - // compute distance to origin after sort, since the points are now different. - for (int i = 0; i < num_in; i++) { - dist[i] = dot_2d(q[i], q[i]); - } -#endif - - // Step 4: - // Make sure there are at least 2 points (that don't overlap with each other) - // in the stack - int k; // index of the non-overlapped second point - for (k = 1; k < num_in; k++) { - if (dist[k] > 1e-8) { - break; - } - } - if (k == num_in) { - // We reach the end, which means the convex hull is just one point - q[0] = p[t]; - return 1; - } - q[1] = q[k]; - int m = 2; // 2 points in the stack - // Step 5: - // Finally we can start the scanning process. - // When a non-convex relationship between the 3 points is found - // (either concave shape or duplicated points), - // we pop the previous point from the stack - // until the 3-point relationship is convex again, or - // until the stack only contains two points - for (int i = k + 1; i < num_in; i++) { - while (m > 1) { - auto q1 = q[i] - q[m - 2], q2 = q[m - 1] - q[m - 2]; - // cross_2d() uses FMA and therefore computes round(round(q1.x*q2.y) - - // q2.x*q1.y) So it may not return 0 even when q1==q2. Therefore we - // compare round(q1.x*q2.y) and round(q2.x*q1.y) directly. (round means - // round to nearest floating point). - if (q1.x * q2.y >= q2.x * q1.y) - m--; - else - break; - } - // Using double also helps, but float can solve the issue for now. - // while (m > 1 && cross_2d(q[i] - q[m - 2], q[m - 1] - q[m - 2]) - // >= 0) { - // m--; - // } - q[m++] = q[i]; - } - - // Step 6 (Optional): - // In general sense we need the original coordinates, so we - // need to shift the points back (reverting Step 2) - // But if we're only interested in getting the area/perimeter of the shape - // We can simply return. - if (!shift_to_zero) { - for (int i = 0; i < m; i++) { - q[i] += start; - } - } - - return m; -} - -template -HOST_DEVICE_INLINE T polygon_area(const Point (&q)[24], const int& m) { - if (m <= 2) { - return 0; - } - - T area = 0; - for (int i = 1; i < m - 1; i++) { - area += fabs(cross_2d(q[i] - q[0], q[i + 1] - q[0])); - } - - return area / 2.0; -} - -template -HOST_DEVICE_INLINE T rotated_boxes_intersection( - const RotatedBox& box1, - const RotatedBox& box2) { - // There are up to 4 x 4 + 4 + 4 = 24 intersections (including dups) returned - // from rotated_rect_intersection_pts - Point intersectPts[24], orderedPts[24]; - - Point pts1[4]; - Point pts2[4]; - get_rotated_vertices(box1, pts1); - get_rotated_vertices(box2, pts2); - - int num = get_intersection_points(pts1, pts2, intersectPts); - - if (num <= 2) { - return 0.0; - } - - // Convex Hull to order the intersection points in clockwise order and find - // the contour area. - int num_convex = convex_hull_graham(intersectPts, num, orderedPts, true); - return polygon_area(orderedPts, num_convex); -} - -} // namespace - -template -HOST_DEVICE_INLINE T -single_box_iou_rotated(T const* const box1_raw, T const* const box2_raw) { - // shift center to the middle point to achieve higher precision in result - RotatedBox box1, box2; - auto center_shift_x = (box1_raw[0] + box2_raw[0]) / 2.0; - auto center_shift_y = (box1_raw[1] + box2_raw[1]) / 2.0; - box1.x_ctr = box1_raw[0] - center_shift_x; - box1.y_ctr = box1_raw[1] - center_shift_y; - box1.w = box1_raw[2]; - box1.h = box1_raw[3]; - box1.a = box1_raw[4]; - box2.x_ctr = box2_raw[0] - center_shift_x; - box2.y_ctr = box2_raw[1] - center_shift_y; - box2.w = box2_raw[2]; - box2.h = box2_raw[3]; - box2.a = box2_raw[4]; - - T area1 = box1.w * box1.h; - T area2 = box2.w * box2.h; - if (area1 < 1e-14 || area2 < 1e-14) { - return 0.f; - } - - T intersection = rotated_boxes_intersection(box1, box2); - T iou = intersection / (area1 + area2 - intersection); - return iou; -} - -} // namespace detectron2 diff --git a/spaces/ThirdEyeData/Customer-Conversion-Prediction/matumizi/mcsim.py b/spaces/ThirdEyeData/Customer-Conversion-Prediction/matumizi/mcsim.py deleted file mode 100644 index e136a80450d7eb8a8b7aa67eeafff3fdb06ec8ec..0000000000000000000000000000000000000000 --- a/spaces/ThirdEyeData/Customer-Conversion-Prediction/matumizi/mcsim.py +++ /dev/null @@ -1,552 +0,0 @@ -#!/usr/local/bin/python3 - -# avenir-python: Machine Learning -# Author: Pranab Ghosh -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You may -# obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -# Package imports -import os -import sys -import matplotlib.pyplot as plt -import numpy as np -import matplotlib -import random -import jprops -import statistics -from matplotlib import pyplot -from .util import * -from .mlutil import * -from .sampler import * - -class MonteCarloSimulator(object): - """ - monte carlo simulator for intergation, various statistic for complex fumctions - """ - def __init__(self, numIter, callback, logFilePath, logLevName): - """ - constructor - - Parameters - numIter :num of iterations - callback : call back method - logFilePath : log file path - logLevName : log level - """ - self.samplers = list() - self.numIter = numIter; - self.callback = callback - self.extraArgs = None - self.output = list() - self.sum = None - self.mean = None - self.sd = None - self.replSamplers = dict() - self.prSamples = None - - self.logger = None - if logFilePath is not None: - self.logger = createLogger(__name__, logFilePath, logLevName) - self.logger.info("******** stating new session of MonteCarloSimulator") - - - def registerBernoulliTrialSampler(self, pr): - """ - bernoulli trial sampler - - Parameters - pr : probability - """ - self.samplers.append(BernoulliTrialSampler(pr)) - - def registerPoissonSampler(self, rateOccur, maxSamp): - """ - poisson sampler - - Parameters - rateOccur : rate of occurence - maxSamp : max limit on no of samples - """ - self.samplers.append(PoissonSampler(rateOccur, maxSamp)) - - def registerUniformSampler(self, minv, maxv): - """ - uniform sampler - - Parameters - minv : min value - maxv : max value - """ - self.samplers.append(UniformNumericSampler(minv, maxv)) - - def registerTriangularSampler(self, min, max, vertexValue, vertexPos=None): - """ - triangular sampler - - Parameters - xmin : min value - xmax : max value - vertexValue : distr value at vertex - vertexPos : vertex pposition - """ - self.samplers.append(TriangularRejectSampler(min, max, vertexValue, vertexPos)) - - def registerGaussianSampler(self, mean, sd): - """ - gaussian sampler - - Parameters - mean : mean - sd : std deviation - """ - self.samplers.append(GaussianRejectSampler(mean, sd)) - - def registerNormalSampler(self, mean, sd): - """ - gaussian sampler using numpy - - Parameters - mean : mean - sd : std deviation - """ - self.samplers.append(NormalSampler(mean, sd)) - - def registerLogNormalSampler(self, mean, sd): - """ - log normal sampler using numpy - - Parameters - mean : mean - sd : std deviation - """ - self.samplers.append(LogNormalSampler(mean, sd)) - - def registerParetoSampler(self, mode, shape): - """ - pareto sampler using numpy - - Parameters - mode : mode - shape : shape - """ - self.samplers.append(ParetoSampler(mode, shape)) - - def registerGammaSampler(self, shape, scale): - """ - gamma sampler using numpy - - Parameters - shape : shape - scale : scale - """ - self.samplers.append(GammaSampler(shape, scale)) - - def registerDiscreteRejectSampler(self, xmin, xmax, step, *values): - """ - disccrete int sampler - - Parameters - xmin : min value - xmax : max value - step : discrete step - values : distr values - """ - self.samplers.append(DiscreteRejectSampler(xmin, xmax, step, *values)) - - def registerNonParametricSampler(self, minv, binWidth, *values): - """ - nonparametric sampler - - Parameters - xmin : min value - binWidth : bin width - values : distr values - """ - sampler = NonParamRejectSampler(minv, binWidth, *values) - sampler.sampleAsFloat() - self.samplers.append(sampler) - - def registerMultiVarNormalSampler(self, numVar, *values): - """ - multi var gaussian sampler using numpy - - Parameters - numVar : no of variables - values : numVar mean values followed by numVar x numVar values for covar matrix - """ - self.samplers.append(MultiVarNormalSampler(numVar, *values)) - - def registerJointNonParamRejectSampler(self, xmin, xbinWidth, xnbin, ymin, ybinWidth, ynbin, *values): - """ - joint nonparametric sampler - - Parameters - xmin : min value for x - xbinWidth : bin width for x - xnbin : no of bins for x - ymin : min value for y - ybinWidth : bin width for y - ynbin : no of bins for y - values : distr values - """ - self.samplers.append(JointNonParamRejectSampler(xmin, xbinWidth, xnbin, ymin, ybinWidth, ynbin, *values)) - - def registerRangePermutationSampler(self, minv, maxv, *numShuffles): - """ - permutation sampler with range - - Parameters - minv : min of range - maxv : max of range - numShuffles : no of shuffles or range of no of shuffles - """ - self.samplers.append(PermutationSampler.createSamplerWithRange(minv, maxv, *numShuffles)) - - def registerValuesPermutationSampler(self, values, *numShuffles): - """ - permutation sampler with values - - Parameters - values : list data - numShuffles : no of shuffles or range of no of shuffles - """ - self.samplers.append(PermutationSampler.createSamplerWithValues(values, *numShuffles)) - - def registerNormalSamplerWithTrendCycle(self, mean, stdDev, trend, cycle, step=1): - """ - normal sampler with trend and cycle - - Parameters - mean : mean - stdDev : std deviation - dmean : trend delta - cycle : cycle values wrt base mean - step : adjustment step for cycle and trend - """ - self.samplers.append(NormalSamplerWithTrendCycle(mean, stdDev, trend, cycle, step)) - - def registerCustomSampler(self, sampler): - """ - eventsampler - - Parameters - sampler : sampler with sample() method - """ - self.samplers.append(sampler) - - def registerEventSampler(self, intvSampler, valSampler=None): - """ - event sampler - - Parameters - intvSampler : interval sampler - valSampler : value sampler - """ - self.samplers.append(EventSampler(intvSampler, valSampler)) - - def registerMetropolitanSampler(self, propStdDev, minv, binWidth, values): - """ - metropolitan sampler - - Parameters - propStdDev : proposal distr std dev - minv : min domain value for target distr - binWidth : bin width - values : target distr values - """ - self.samplers.append(MetropolitanSampler(propStdDev, minv, binWidth, values)) - - def setSampler(self, var, iter, sampler): - """ - set sampler for some variable when iteration reaches certain point - - Parameters - var : sampler index - iter : iteration count - sampler : new sampler - """ - key = (var, iter) - self.replSamplers[key] = sampler - - def registerExtraArgs(self, *args): - """ - extra args - - Parameters - args : extra argument list - """ - self.extraArgs = args - - def replSampler(self, iter): - """ - replace samper for this iteration - - Parameters - iter : iteration number - """ - if len(self.replSamplers) > 0: - for v in range(self.numVars): - key = (v, iter) - if key in self.replSamplers: - sampler = self.replSamplers[key] - self.samplers[v] = sampler - - def run(self): - """ - run simulator - """ - self.sum = None - self.mean = None - self.sd = None - self.numVars = len(self.samplers) - vOut = 0 - - #print(formatAny(self.numIter, "num iterations")) - for i in range(self.numIter): - self.replSampler(i) - args = list() - for s in self.samplers: - arg = s.sample() - if type(arg) is list: - args.extend(arg) - else: - args.append(arg) - - slen = len(args) - if self.extraArgs: - args.extend(self.extraArgs) - args.append(self) - args.append(i) - vOut = self.callback(args) - self.output.append(vOut) - self.prSamples = args[:slen] - - def getOutput(self): - """ - get raw output - """ - return self.output - - def setOutput(self, values): - """ - set raw output - - Parameters - values : output values - """ - self.output = values - self.numIter = len(values) - - def drawHist(self, myTitle, myXlabel, myYlabel): - """ - draw histogram - - Parameters - myTitle : title - myXlabel : label for x - myYlabel : label for y - """ - pyplot.hist(self.output, density=True) - pyplot.title(myTitle) - pyplot.xlabel(myXlabel) - pyplot.ylabel(myYlabel) - pyplot.show() - - def getSum(self): - """ - get sum - """ - if not self.sum: - self.sum = sum(self.output) - return self.sum - - def getMean(self): - """ - get average - """ - if self.mean is None: - self.mean = statistics.mean(self.output) - return self.mean - - def getStdDev(self): - """ - get std dev - """ - if self.sd is None: - self.sd = statistics.stdev(self.output, xbar=self.mean) if self.mean else statistics.stdev(self.output) - return self.sd - - - def getMedian(self): - """ - get average - """ - med = statistics.median(self.output) - return med - - def getMax(self): - """ - get max - """ - return max(self.output) - - def getMin(self): - """ - get min - """ - return min(self.output) - - def getIntegral(self, bounds): - """ - integral - - Parameters - bounds : bound on sum - """ - if not self.sum: - self.sum = sum(self.output) - return self.sum * bounds / self.numIter - - def getLowerTailStat(self, zvalue, numIntPoints=50): - """ - get lower tail stat - - Parameters - zvalue : zscore upper bound - numIntPoints : no of interpolation point for cum distribution - """ - mean = self.getMean() - sd = self.getStdDev() - tailStart = self.getMin() - tailEnd = mean - zvalue * sd - cvaCounts = self.cumDistr(tailStart, tailEnd, numIntPoints) - - reqConf = floatRange(0.0, 0.150, .01) - msg = "p value outside interpolation range, reduce zvalue and try again {:.5f} {:.5f}".format(reqConf[-1], cvaCounts[-1][1]) - assert reqConf[-1] < cvaCounts[-1][1], msg - critValues = self.interpolateCritValues(reqConf, cvaCounts, True, tailStart, tailEnd) - return critValues - - def getPercentile(self, cvalue): - """ - percentile - - Parameters - cvalue : value for percentile - """ - count = 0 - for v in self.output: - if v < cvalue: - count += 1 - percent = int(count * 100.0 / self.numIter) - return percent - - - def getCritValue(self, pvalue): - """ - critical value for probabaility threshold - - Parameters - pvalue : pvalue - """ - assertWithinRange(pvalue, 0.0, 1.0, "invalid probabaility value") - svalues = self.output.sorted() - ppval = None - cpval = None - intv = 1.0 / (self.numIter - 1) - for i in range(self.numIter - 1): - cpval = (i + 1) / self.numIter - if cpval > pvalue: - sl = svalues[i] - svalues[i-1] - cval = svalues[i-1] + sl * (pvalue - ppval) - break - ppval = cpval - return cval - - - def getUpperTailStat(self, zvalue, numIntPoints=50): - """ - upper tail stat - - Parameters - zvalue : zscore upper bound - numIntPoints : no of interpolation point for cum distribution - """ - mean = self.getMean() - sd = self.getStdDev() - tailStart = mean + zvalue * sd - tailEnd = self.getMax() - cvaCounts = self.cumDistr(tailStart, tailEnd, numIntPoints) - - reqConf = floatRange(0.85, 1.0, .01) - msg = "p value outside interpolation range, reduce zvalue and try again {:.5f} {:.5f}".format(reqConf[0], cvaCounts[0][1]) - assert reqConf[0] > cvaCounts[0][1], msg - critValues = self.interpolateCritValues(reqConf, cvaCounts, False, tailStart, tailEnd) - return critValues - - def cumDistr(self, tailStart, tailEnd, numIntPoints): - """ - cumulative distribution at tail - - Parameters - tailStart : tail start - tailEnd : tail end - numIntPoints : no of interpolation points - """ - delta = (tailEnd - tailStart) / numIntPoints - cvalues = floatRange(tailStart, tailEnd, delta) - cvaCounts = list() - for cv in cvalues: - count = 0 - for v in self.output: - if v < cv: - count += 1 - p = (cv, count/self.numIter) - if self.logger is not None: - self.logger.info("{:.3f} {:.3f}".format(p[0], p[1])) - cvaCounts.append(p) - return cvaCounts - - def interpolateCritValues(self, reqConf, cvaCounts, lowertTail, tailStart, tailEnd): - """ - interpolate for spefici confidence limits - - Parameters - reqConf : confidence level values - cvaCounts : cum values - lowertTail : True if lower tail - tailStart ; tail start - tailEnd : tail end - """ - critValues = list() - if self.logger is not None: - self.logger.info("target conf limit " + str(reqConf)) - reqConfSub = reqConf[1:] if lowertTail else reqConf[:-1] - for rc in reqConfSub: - for i in range(len(cvaCounts) -1): - if rc >= cvaCounts[i][1] and rc < cvaCounts[i+1][1]: - #print("interpoltate between " + str(cvaCounts[i]) + " and " + str(cvaCounts[i+1])) - slope = (cvaCounts[i+1][0] - cvaCounts[i][0]) / (cvaCounts[i+1][1] - cvaCounts[i][1]) - cval = cvaCounts[i][0] + slope * (rc - cvaCounts[i][1]) - p = (rc, cval) - if self.logger is not None: - self.logger.debug("interpolated crit values {:.3f} {:.3f}".format(p[0], p[1])) - critValues.append(p) - break - if lowertTail: - p = (0.0, tailStart) - critValues.insert(0, p) - else: - p = (1.0, tailEnd) - critValues.append(p) - return critValues diff --git a/spaces/ThomasSimonini/Murder-on-horsea-island-prototype/app.py b/spaces/ThomasSimonini/Murder-on-horsea-island-prototype/app.py deleted file mode 100644 index 969881a81c56583031933e0db5f58dc485e41e89..0000000000000000000000000000000000000000 --- a/spaces/ThomasSimonini/Murder-on-horsea-island-prototype/app.py +++ /dev/null @@ -1,120 +0,0 @@ -import gradio as gr -import random -import requests - -import numpy as np - -import pandas as pd - - -# Template -title = "Murder on Horsea Island Prototype with Sentence Similarity (Paraphrase XLM-R multilingual)🔪 (WORK IN PROGRESS)" -description = "Prototype of the Unity Game (to test the questions)." -article = """ -""" -theme="huggingface" - -# examples = - -# API -SS_API_URL = "https://api-inference.huggingface.co/models/sentence-transformers/paraphrase-xlm-r-multilingual-v1" - - - -# Build the 3 different questions array before starting -def build_initial_questions_and_answers(): - # Eleanor - eleanor_df = pd.read_csv("eleanor.csv", delimiter=",") - eleanor_len = eleanor_df.shape[0] - eleanor_questions = [eleanor_df["Questions"][i] for i in range(eleanor_len)] - eleanor_answers = [eleanor_df["Answers"][i] for i in range(eleanor_len)] - - # Tom - tom_df = pd.read_csv("tom.csv", delimiter=",") - tom_len = tom_df.shape[0] - tom_questions = [tom_df["Questions"][i] for i in range(tom_len)] - tom_answers = [tom_df["Answers"][i] for i in range(tom_len)] - - # Charles - charles_df = pd.read_csv("charles.csv", delimiter=",") - charles_len = charles_df.shape[0] - charles_questions = [charles_df["Questions"][i] for i in range(charles_len)] - charles_answers = [charles_df["Answers"][i] for i in range(charles_len)] - - return eleanor_questions, eleanor_answers, tom_questions, tom_answers, charles_questions, charles_answers - - - -def build_json(message, questions): - json = { - "inputs": { - "source_sentence": message, - "sentences": questions - }, - } - return json - - -def query(payload, model): - response = requests.post(SS_API_URL, json=payload) - return response.json() - -def answer(output_json, character): - # First we handle output_json - idx = np.argmax(output_json) - - if (character == "eleanor"): - answer_ = eleanor_answers[idx] - - elif (character == "tom"): - answer_ = tom_answers[idx] - - else: - answer_ = charles_answers[idx] - - return answer_ - - -def chat(message, character): - history = gr.get_state() or [] - - if (character == "eleanor"): - json = build_json(message, eleanor_questions) - - elif (character == "tom"): - json = build_json(message, tom_questions) - - else: - json = build_json(message, charles_questions) - - output = query(json) - - answer_ = answer(output, character) - - - history.append((message, answer_)) - gr.set_state(history) - html = "" - for user_msg, resp_msg in history: - html += f"{user_msg}" - html += f"{resp_msg}" - html += "" - return html - -eleanor_questions, eleanor_answers, tom_questions, tom_answers, charles_questions, charles_answers = build_initial_questions_and_answers() - -choices = ["Eleanor", "Tom", "Charles (The Butler)"] -character = gr.inputs.Radio(choices, type="value", default=None, label=None) - -iface = gr.Interface(chat, ["text", character], "html", css=""" - .chatbox {display:flex;flex-direction:column} - .user_msg, .resp_msg {padding:4px;margin-bottom:4px;border-radius:4px;width:80%} - .user_msg {background-color:cornflowerblue;color:white;align-self:start} - .resp_msg {background-color:lightgray;align-self:self-end} -""", allow_screenshot=False, allow_flagging=False) - -iface.launch() - -if __name__ == "__main__": - - iface.launch() \ No newline at end of file diff --git a/spaces/Tonic/QuranInUrdu/app.py b/spaces/Tonic/QuranInUrdu/app.py deleted file mode 100644 index 3759a4d07770a6a637b64c05ff99668c555576e6..0000000000000000000000000000000000000000 --- a/spaces/Tonic/QuranInUrdu/app.py +++ /dev/null @@ -1,94 +0,0 @@ -from typing import List -import typing -from aiser import RestAiServer, KnowledgeBase, SemanticSearchResult, Agent -from aiser.models import ChatMessage -import asyncio -import gradio as gr -import requests -import os -import logging - -# Configure logging -logging.basicConfig(level=logging.DEBUG) - -# Define environment variables -API_URL = os.getenv("API_URL", "http://ec2-54-166-81-166.compute-1.amazonaws.com:3000/api/v1/prediction/117a5076-c05e-4208-91d9-d0e772bf981e") -API_TOKEN = os.getenv("API_TOKEN", "Bearer 0Ouk5cgljCYuuF3LDfBkIAcuqj9hgWaaK5qRCLfbfrg=") - -class ChatBot: - def __init__(self): - self.history = [] - - def predict(self, input): - new_user_input = input # User input should be converted into model input format - - # Prepare payload for API call - payload = {"question": new_user_input} - - # Make an external API call - headers = {"Authorization": API_TOKEN} - response = requests.post(API_URL, headers=headers, json=payload) - - # Initialize the response text with an error message by default - response_text = f"API call failed with status code {response.status_code}" - - if response.status_code == 200: - response_text = response.text # Get the raw text response - - # Process the API response and update history - self.history.append(response_text) - - # Log API request and response - logging.debug(f"API Request: {API_URL}, Payload: {payload}, Headers: {headers}") - logging.debug(f"API Response: {response.status_code}, Content: {response_text}") - - # Return the response text - return response_text - -bot = ChatBot() - -title = "👋🏻النور اسلام میں خوش آمدید🌠" -description = "یہاں آپ اسلام یا اپنی زندگی کے بارے میں سوالات پوچھ سکتے ہیں:" -examples = ["محنت کے بارے میں قرآن کیا کہتا ہے؟"] - -iface = gr.Interface( - fn=bot.predict, - title=title, - description=description, - examples=examples, - inputs="text", - outputs="text") - -iface.launch() - -# Placeholder classes, replace with actual implementations -class KnowledgeBaseExample(KnowledgeBase): - def perform_semantic_search(self, query_text: str, desired_number_of_results: int) -> List[SemanticSearchResult]: - result_example = SemanticSearchResult( - content="This is an example of a semantic search result", - score=0.5, - ) - return [result_example for _ in range(desired_number_of_results)] - -class AgentExample(Agent): - async def reply(self, messages: typing.List[ChatMessage]) -> typing.AsyncGenerator[ChatMessage, None]: - reply_message = "This is an example of a reply from an agent" - for character in reply_message: - yield ChatMessage(text_content=character) - await asyncio.sleep(0.1) - -if __name__ == '__main__': - server = RestAiServer( - agents=[ - AgentExample( - agent_id='10209b93-2dd0-47a0-8eb2-33fb018a783b' # replace with your agent id - ), - ], - knowledge_bases=[ - KnowledgeBaseExample( - knowledge_base_id='85bc1c72-b8e0-4042-abcf-8eb2d478f207' # replace with your knowledge base id - ), - ], - port=5000 - ) - server.run() \ No newline at end of file diff --git a/spaces/Woocy/541GPT/chatgpt - macOS.command b/spaces/Woocy/541GPT/chatgpt - macOS.command deleted file mode 100644 index fa015edca9e6916f24394813ce8ba77d2072e296..0000000000000000000000000000000000000000 --- a/spaces/Woocy/541GPT/chatgpt - macOS.command +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -echo Opening ChuanhuChatGPT... -cd "$(dirname "${BASH_SOURCE[0]}")" -nohup python3 ChuanhuChatbot.py >/dev/null 2>&1 & -sleep 5 -open http://127.0.0.1:7860 -echo Finished opening ChuanhuChatGPT (http://127.0.0.1:7860/). If you kill ChuanhuChatbot, Use "pkill -f 'ChuanhuChatbot'" command in terminal. \ No newline at end of file diff --git a/spaces/XlalalaX/VITS-Umamusume-voice-synthesizer/models.py b/spaces/XlalalaX/VITS-Umamusume-voice-synthesizer/models.py deleted file mode 100644 index 7dcd22edf811b952514080f5f06cc43d635ead28..0000000000000000000000000000000000000000 --- a/spaces/XlalalaX/VITS-Umamusume-voice-synthesizer/models.py +++ /dev/null @@ -1,542 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - emotion_embedding): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emotion_embedding = emotion_embedding - - if self.n_vocab!=0: - self.emb = nn.Embedding(n_vocab, hidden_channels) - if emotion_embedding: - self.emotion_emb = nn.Linear(1024, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, emotion_embedding=None): - if self.n_vocab!=0: - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - if emotion_embedding is not None: - x = x + self.emotion_emb(emotion_embedding.unsqueeze(1)) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - emotion_embedding=False, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - emotion_embedding) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None, emotion_embedding=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - diff --git a/spaces/XzJosh/nine2-Bert-VITS2/server.py b/spaces/XzJosh/nine2-Bert-VITS2/server.py deleted file mode 100644 index c736ca4f95fec853950eef6654ef79856beffc0a..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/nine2-Bert-VITS2/server.py +++ /dev/null @@ -1,123 +0,0 @@ -from flask import Flask, request, Response -from io import BytesIO -import torch -from av import open as avopen - -import commons -import utils -from models import SynthesizerTrn -from text.symbols import symbols -from text import cleaned_text_to_sequence, get_bert -from text.cleaner import clean_text -from scipy.io import wavfile - -# Flask Init -app = Flask(__name__) -app.config['JSON_AS_ASCII'] = False -def get_text(text, language_str, hps): - norm_text, phone, tone, word2ph = clean_text(text, language_str) - print([f"{p}{t}" for p, t in zip(phone, tone)]) - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - - if hps.data.add_blank: - phone = commons.intersperse(phone, 0) - tone = commons.intersperse(tone, 0) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - bert = get_bert(norm_text, word2ph, language_str) - - assert bert.shape[-1] == len(phone) - - phone = torch.LongTensor(phone) - tone = torch.LongTensor(tone) - language = torch.LongTensor(language) - - return bert, phone, tone, language - -def infer(text, sdp_ratio, noise_scale, noise_scale_w,length_scale,sid): - bert, phones, tones, lang_ids = get_text(text,"ZH", hps,) - with torch.no_grad(): - x_tst=phones.to(dev).unsqueeze(0) - tones=tones.to(dev).unsqueeze(0) - lang_ids=lang_ids.to(dev).unsqueeze(0) - bert = bert.to(dev).unsqueeze(0) - x_tst_lengths = torch.LongTensor([phones.size(0)]).to(dev) - speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(dev) - audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids,bert, sdp_ratio=sdp_ratio - , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy() - return audio - -def replace_punctuation(text, i=2): - punctuation = ",。?!" - for char in punctuation: - text = text.replace(char, char * i) - return text - -def wav2(i, o, format): - inp = avopen(i, 'rb') - out = avopen(o, 'wb', format=format) - if format == "ogg": format = "libvorbis" - - ostream = out.add_stream(format) - - for frame in inp.decode(audio=0): - for p in ostream.encode(frame): out.mux(p) - - for p in ostream.encode(None): out.mux(p) - - out.close() - inp.close() - -# Load Generator -hps = utils.get_hparams_from_file("./configs/config.json") - -dev='cuda' -net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model).to(dev) -_ = net_g.eval() - -_ = utils.load_checkpoint("logs/G_649000.pth", net_g, None,skip_optimizer=True) - -@app.route("/",methods=['GET','POST']) -def main(): - if request.method == 'GET': - try: - speaker = request.args.get('speaker') - text = request.args.get('text').replace("/n","") - sdp_ratio = float(request.args.get("sdp_ratio", 0.2)) - noise = float(request.args.get("noise", 0.5)) - noisew = float(request.args.get("noisew", 0.6)) - length = float(request.args.get("length", 1.2)) - if length >= 2: - return "Too big length" - if len(text) >=200: - return "Too long text" - fmt = request.args.get("format", "wav") - if None in (speaker, text): - return "Missing Parameter" - if fmt not in ("mp3", "wav", "ogg"): - return "Invalid Format" - except: - return "Invalid Parameter" - - with torch.no_grad(): - audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise, noise_scale_w=noisew, length_scale=length, sid=speaker) - - with BytesIO() as wav: - wavfile.write(wav, hps.data.sampling_rate, audio) - torch.cuda.empty_cache() - if fmt == "wav": - return Response(wav.getvalue(), mimetype="audio/wav") - wav.seek(0, 0) - with BytesIO() as ofp: - wav2(wav, ofp, fmt) - return Response( - ofp.getvalue(), - mimetype="audio/mpeg" if fmt == "mp3" else "audio/ogg" - ) diff --git a/spaces/YlcldKlns/bing/src/lib/bots/bing/index.ts b/spaces/YlcldKlns/bing/src/lib/bots/bing/index.ts deleted file mode 100644 index c75c69f94af8c3db92d4c90d465c219a2af72a4d..0000000000000000000000000000000000000000 --- a/spaces/YlcldKlns/bing/src/lib/bots/bing/index.ts +++ /dev/null @@ -1,432 +0,0 @@ -import { fetch, WebSocket, debug } from '@/lib/isomorphic' -import WebSocketAsPromised from 'websocket-as-promised' -import { - SendMessageParams, - BingConversationStyle, - ConversationResponse, - ChatResponseMessage, - ConversationInfo, - InvocationEventType, - ChatError, - ErrorCode, - ChatUpdateCompleteResponse, - ImageInfo, - KBlobResponse -} from './types' - -import { convertMessageToMarkdown, websocketUtils, streamAsyncIterable } from './utils' -import { WatchDog, createChunkDecoder } from '@/lib/utils' - -type Params = SendMessageParams<{ bingConversationStyle: BingConversationStyle }> - -const OPTIONS_SETS = [ - 'nlu_direct_response_filter', - 'deepleo', - 'disable_emoji_spoken_text', - 'responsible_ai_policy_235', - 'enablemm', - 'iycapbing', - 'iyxapbing', - 'objopinion', - 'rweasgv2', - 'dagslnv1', - 'dv3sugg', - 'autosave', - 'iyoloxap', - 'iyoloneutral', - 'clgalileo', - 'gencontentv3', -] - -export class BingWebBot { - protected conversationContext?: ConversationInfo - protected cookie: string - protected ua: string - protected endpoint = '' - private lastText = '' - private asyncTasks: Array> = [] - - constructor(opts: { - cookie: string - ua: string - bingConversationStyle?: BingConversationStyle - conversationContext?: ConversationInfo - }) { - const { cookie, ua, conversationContext } = opts - this.cookie = cookie?.includes(';') ? cookie : `_EDGE_V=1; _U=${cookie}` - this.ua = ua - this.conversationContext = conversationContext - } - - static buildChatRequest(conversation: ConversationInfo) { - const optionsSets = OPTIONS_SETS - if (conversation.conversationStyle === BingConversationStyle.Precise) { - optionsSets.push('h3precise') - } else if (conversation.conversationStyle === BingConversationStyle.Creative) { - optionsSets.push('h3imaginative') - } - return { - arguments: [ - { - source: 'cib', - optionsSets, - allowedMessageTypes: [ - 'ActionRequest', - 'Chat', - 'Context', - 'InternalSearchQuery', - 'InternalSearchResult', - 'Disengaged', - 'InternalLoaderMessage', - 'Progress', - 'RenderCardRequest', - 'SemanticSerp', - 'GenerateContentQuery', - 'SearchQuery', - ], - sliceIds: [ - 'winmuid1tf', - 'anssupfor_c', - 'imgchatgptv2', - 'tts2cf', - 'contansperf', - 'mlchatpc8500w', - 'mlchatpc2', - 'ctrlworkpay', - 'winshortmsgtf', - 'cibctrl', - 'sydtransctrl', - 'sydconfigoptc', - '0705trt4', - '517opinion', - '628ajcopus0', - '330uaugs0', - '529rwea', - '0626snptrcs0', - '424dagslnv1', - ], - isStartOfSession: conversation.invocationId === 0, - message: { - author: 'user', - inputMethod: 'Keyboard', - text: conversation.prompt, - imageUrl: conversation.imageUrl, - messageType: 'Chat', - }, - conversationId: conversation.conversationId, - conversationSignature: conversation.conversationSignature, - participant: { id: conversation.clientId }, - }, - ], - invocationId: conversation.invocationId.toString(), - target: 'chat', - type: InvocationEventType.StreamInvocation, - } - } - - async createConversation(): Promise { - const headers = { - 'Accept-Encoding': 'gzip, deflate, br, zsdch', - 'User-Agent': this.ua, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - cookie: this.cookie, - } - - let resp: ConversationResponse | undefined - try { - const response = await fetch(this.endpoint + '/api/create', { method: 'POST', headers, redirect: 'error', mode: 'cors', credentials: 'include' }) - if (response.status === 404) { - throw new ChatError('Not Found', ErrorCode.NOTFOUND_ERROR) - } - resp = await response.json() as ConversationResponse - } catch (err) { - console.error('create conversation error', err) - } - - if (!resp?.result) { - throw new ChatError('你的 VPS 或代理可能被封禁,如有疑问,请前往 https://github.com/weaigc/bingo 咨询', ErrorCode.BING_IP_FORBIDDEN) - } - - const { value, message } = resp.result || {} - if (value !== 'Success') { - const errorMsg = `${value}: ${message}` - if (value === 'UnauthorizedRequest') { - if (/fetch failed/i.test(message || '')) { - throw new ChatError(errorMsg, ErrorCode.BING_IP_FORBIDDEN) - } - throw new ChatError(errorMsg, ErrorCode.BING_UNAUTHORIZED) - } - if (value === 'TryLater') { - throw new ChatError(errorMsg, ErrorCode.BING_TRY_LATER) - } - if (value === 'Forbidden') { - throw new ChatError(errorMsg, ErrorCode.BING_FORBIDDEN) - } - throw new ChatError(errorMsg, ErrorCode.UNKOWN_ERROR) - } - return resp - } - - private async createContext(conversationStyle: BingConversationStyle) { - if (!this.conversationContext) { - const conversation = await this.createConversation() - this.conversationContext = { - conversationId: conversation.conversationId, - conversationSignature: conversation.conversationSignature, - clientId: conversation.clientId, - invocationId: 0, - conversationStyle, - prompt: '', - } - } - return this.conversationContext - } - - async sendMessage(params: Params) { - try { - await this.createContext(params.options.bingConversationStyle) - Object.assign(this.conversationContext!, { prompt: params.prompt, imageUrl: params.imageUrl }) - return this.sydneyProxy(params) - } catch (error) { - params.onEvent({ - type: 'ERROR', - error: error instanceof ChatError ? error : new ChatError('Catch Error', ErrorCode.UNKOWN_ERROR), - }) - } - } - - private async sydneyProxy(params: Params) { - const abortController = new AbortController() - const response = await fetch(this.endpoint + '/api/sydney', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - signal: abortController.signal, - body: JSON.stringify(this.conversationContext!) - }) - if (response.status !== 200) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - 'Unknown error', - ErrorCode.UNKOWN_ERROR, - ), - }) - } - params.signal?.addEventListener('abort', () => { - abortController.abort() - }) - - const textDecoder = createChunkDecoder() - for await (const chunk of streamAsyncIterable(response.body!)) { - this.parseEvents(params, websocketUtils.unpackMessage(textDecoder(chunk))) - } - } - - async sendWs() { - const wsConfig: ConstructorParameters[1] = { - packMessage: websocketUtils.packMessage, - unpackMessage: websocketUtils.unpackMessage, - createWebSocket: (url) => new WebSocket(url, { - headers: { - 'accept-language': 'zh-CN,zh;q=0.9', - 'cache-control': 'no-cache', - 'User-Agent': this.ua, - pragma: 'no-cache', - cookie: this.cookie, - } - }) - } - const wsp = new WebSocketAsPromised('wss://sydney.bing.com/sydney/ChatHub', wsConfig) - - wsp.open().then(() => { - wsp.sendPacked({ protocol: 'json', version: 1 }) - wsp.sendPacked({ type: 6 }) - wsp.sendPacked(BingWebBot.buildChatRequest(this.conversationContext!)) - }) - - return wsp - } - - private async useWs(params: Params) { - const wsp = await this.sendWs() - const watchDog = new WatchDog() - wsp.onUnpackedMessage.addListener((events) => { - watchDog.watch(() => { - wsp.sendPacked({ type: 6 }) - }) - this.parseEvents(params, events) - }) - - wsp.onClose.addListener(() => { - watchDog.reset() - params.onEvent({ type: 'DONE' }) - wsp.removeAllListeners() - }) - - params.signal?.addEventListener('abort', () => { - wsp.removeAllListeners() - wsp.close() - }) - } - - private async createImage(prompt: string, id: string) { - try { - const headers = { - 'Accept-Encoding': 'gzip, deflate, br, zsdch', - 'User-Agent': this.ua, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - cookie: this.cookie, - } - const query = new URLSearchParams({ - prompt, - id - }) - const response = await fetch(this.endpoint + '/api/image?' + query.toString(), - { - method: 'POST', - headers, - mode: 'cors', - credentials: 'include' - }) - .then(res => res.text()) - if (response) { - this.lastText += '\n' + response - } - } catch (err) { - console.error('Create Image Error', err) - } - } - - private buildKnowledgeApiPayload(imageUrl: string, conversationStyle: BingConversationStyle) { - const imageInfo: ImageInfo = {} - let imageBase64: string | undefined = undefined - const knowledgeRequest = { - imageInfo, - knowledgeRequest: { - invokedSkills: [ - 'ImageById' - ], - subscriptionId: 'Bing.Chat.Multimodal', - invokedSkillsRequestData: { - enableFaceBlur: true - }, - convoData: { - convoid: this.conversationContext?.conversationId, - convotone: conversationStyle, - } - }, - } - - if (imageUrl.startsWith('data:image/')) { - imageBase64 = imageUrl.replace('data:image/', ''); - const partIndex = imageBase64.indexOf(',') - if (partIndex) { - imageBase64 = imageBase64.substring(partIndex + 1) - } - } else { - imageInfo.url = imageUrl - } - return { knowledgeRequest, imageBase64 } - } - - async uploadImage(imageUrl: string, conversationStyle: BingConversationStyle = BingConversationStyle.Creative): Promise { - if (!imageUrl) { - return - } - await this.createContext(conversationStyle) - const payload = this.buildKnowledgeApiPayload(imageUrl, conversationStyle) - - const response = await fetch(this.endpoint + '/api/kblob', - { - headers: { - 'Content-Type': 'application/json', - }, - method: 'POST', - mode: 'cors', - credentials: 'include', - body: JSON.stringify(payload), - }) - .then(res => res.json()) - .catch(e => { - console.log('Error', e) - }) - return response - } - - private async generateContent(message: ChatResponseMessage) { - if (message.contentType === 'IMAGE') { - this.asyncTasks.push(this.createImage(message.text, message.messageId)) - } - } - - private async parseEvents(params: Params, events: any) { - const conversation = this.conversationContext! - - events?.forEach(async (event: ChatUpdateCompleteResponse) => { - debug('bing event', event) - if (event.type === 3) { - await Promise.all(this.asyncTasks) - this.asyncTasks = [] - params.onEvent({ type: 'UPDATE_ANSWER', data: { text: this.lastText } }) - params.onEvent({ type: 'DONE' }) - conversation.invocationId = parseInt(event.invocationId, 10) + 1 - } else if (event.type === 1) { - const messages = event.arguments[0].messages - if (messages) { - const text = convertMessageToMarkdown(messages[0]) - this.lastText = text - params.onEvent({ type: 'UPDATE_ANSWER', data: { text, spokenText: messages[0].text, throttling: event.arguments[0].throttling } }) - } - } else if (event.type === 2) { - const messages = event.item.messages as ChatResponseMessage[] | undefined - if (!messages) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - event.item.result.error || 'Unknown error', - event.item.result.value === 'Throttled' ? ErrorCode.THROTTLE_LIMIT - : event.item.result.value === 'CaptchaChallenge' ? (this.conversationContext?.conversationId?.includes('BingProdUnAuthenticatedUsers') ? ErrorCode.BING_UNAUTHORIZED : ErrorCode.BING_CAPTCHA) - : ErrorCode.UNKOWN_ERROR - ), - }) - return - } - const limited = messages.some((message) => - message.contentOrigin === 'TurnLimiter' - || message.messageType === 'Disengaged' - ) - if (limited) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - 'Sorry, you have reached chat limit in this conversation.', - ErrorCode.CONVERSATION_LIMIT, - ), - }) - return - } - - const lastMessage = event.item.messages.at(-1) as ChatResponseMessage - const specialMessage = event.item.messages.find(message => message.author === 'bot' && message.contentType === 'IMAGE') - if (specialMessage) { - this.generateContent(specialMessage) - } - - if (lastMessage) { - const text = convertMessageToMarkdown(lastMessage) - this.lastText = text - params.onEvent({ - type: 'UPDATE_ANSWER', - data: { text, throttling: event.item.throttling, suggestedResponses: lastMessage.suggestedResponses, sourceAttributions: lastMessage.sourceAttributions }, - }) - } - } - }) - } - - resetConversation() { - this.conversationContext = undefined - } -} diff --git a/spaces/YotamNitzan/domain-expansion/generate_aligned.py b/spaces/YotamNitzan/domain-expansion/generate_aligned.py deleted file mode 100644 index 40382e45358c98be0c0763ff4a339f2c800e9aba..0000000000000000000000000000000000000000 --- a/spaces/YotamNitzan/domain-expansion/generate_aligned.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2023 Adobe Research. All rights reserved. -# To view a copy of the license, visit LICENSE.md. - -import sys - -sys.path.append('..') - -import argparse -from pathlib import Path - -import torch -import torch.nn.functional as F -import torchvision.transforms as T - -import dnnlib -import legacy -from expansion_utils import io_utils, latent_operations - -def generate_images( - ckpt, - num_samples, - truncation_psi -): - device = torch.device('cuda') - with dnnlib.util.open_url(ckpt) as f: - snapshot_dict = legacy.load_network_pkl(f) - G = snapshot_dict['G_ema'].to(device) - latent_basis = snapshot_dict['latent_basis'].to(device) - subspace_distance = snapshot_dict['subspace_distance'] - repurposed_dims = snapshot_dict['repurposed_dims'].cpu() - - # out_dir = Path(out_dir) - - def norm_fn(tensor): - minFrom= tensor.min() - maxFrom= tensor.max() - minTo = 0 - maxTo=1 - return minTo + (maxTo - minTo) * ((tensor - minFrom) / (maxFrom - minFrom)) - topil = T.ToPILImage(mode='RGB') - - # norm_fn = T.Normalize( - # # mean=[0.485, 0.456, 0.406], - # # std=[0.229, 0.224, 0.225] - # mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225], - # std=[1/0.229, 1/0.224, 1/0.225] - # ) - - all_imgs = [] - for i in range(num_samples): - per_sample_imgs = [] - z = torch.randn((1, G.z_dim), ).to(device) - w = G.mapping(z, None, truncation_psi=truncation_psi) - - base_w, edit_ws = latent_operations.project_to_subspaces(w, latent_basis, repurposed_dims, step_size=subspace_distance, mean=G.mapping.w_avg) - edit_ws = edit_ws[0] # Single step - base_img = G.synthesis(base_w, noise_mode='const') - per_sample_imgs.append(topil(norm_fn(base_img.squeeze()))) - # io_utils.save_images(base_img, out_dir.joinpath('base', f'{i:05d}')) - - for idx, (dim_num, edit_w) in enumerate(zip(repurposed_dims, edit_ws)): - # dim_out_dir = out_dir.joinpath(f'dim_{dim_num}') - if idx % 4 == 0: - edit_img = G.synthesis(edit_w, noise_mode='const') - # mean, std = edit_img.mean((0,2)), edit_img.std((0,2)) - # norm_fn = T.Normalize(mean, std) - - edited_img_pil = topil( - norm_fn( - edit_img.squeeze(), - ) - ) - per_sample_imgs.append(edited_img_pil) - # io_utils.save_images(edit_img, dim_out_dir.joinpath(f'{i:05d}')) - - all_imgs.append(per_sample_imgs) - - return all_imgs - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument('--ckpt', help='Network pickle filename', required=True) - parser.add_argument('--out_dir', help='Where to save the output images', type=str, required=True, metavar='DIR') - parser.add_argument('--num', help='Number of independant samples', type=int) - parser.add_argument('--truncation_psi', help='Coefficient for truncation', type=float, default=1) - - args = parser.parse_args() - - with torch.no_grad(): - generate_images(args.ckpt, args.out_dir, args.num, args.truncation_psi) diff --git a/spaces/Yunshansongbai/SVC-Nahida/inference_main.py b/spaces/Yunshansongbai/SVC-Nahida/inference_main.py deleted file mode 100644 index f612b919444701bd94ab8d70e29058cf90769994..0000000000000000000000000000000000000000 --- a/spaces/Yunshansongbai/SVC-Nahida/inference_main.py +++ /dev/null @@ -1,108 +0,0 @@ -import io -import logging -import time -from pathlib import Path - -import librosa -import matplotlib.pyplot as plt -import numpy as np -import soundfile - -from inference import infer_tool -from inference import slicer -from inference.infer_tool import Svc - -logging.getLogger('numba').setLevel(logging.WARNING) -chunks_dict = infer_tool.read_temp("inference/chunks_temp.json") - -# 这里是推理用到的所有参数,从这里修改参数即可 -模型路径:str = "./logs/44k/G_10000.pdparams" # 模型路径 -推理文件列表:list = ["1.wav"] # wav文件名列表,放在raw文件夹下 -音高调整:list = [0] # 音高调整,支持正负(半音) -合成目标说话人名称:list = ['yuuka'] # 合成目标说话人名称 -自动预测音高:bool = False # 语音转换自动预测音高,转换歌声时不要打开这个会严重跑调 -聚类模型路径:str = "logs/44k/kmeans_10000.pdparams" # 聚类模型路径,如果没有训练聚类则随便填 -聚类方案占比:float = 0 # 聚类方案占比,范围0-1,若没有训练聚类模型则填0即可 -静音分贝:int = -40 # 静音分贝阈值,默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50 -推理设备:str or None = None # 推理设备,None则为自动选择cpu和gpu -音频输出格式:str = 'flac' # 音频输出格式 -噪音比例:float = 0.4 # 声音有点电的话可以尝试调高这个,但是会降低音质,较为玄学 - -def main(): - import argparse - - parser = argparse.ArgumentParser(description='飞桨sovits4 推理模块') - parser.add_argument('-m', '--model_path', type=str, default=模型路径, help='模型路径') - parser.add_argument('-c', '--config_path', type=str, default="./logs/44k/config.json", help='配置文件路径') - parser.add_argument('-n', '--clean_names', type=str, nargs='+', default=推理文件列表, help='wav文件名列表,放在raw文件夹下') - parser.add_argument('-t', '--trans', type=int, nargs='+', default=音高调整, help='音高调整,支持正负(半音)') - parser.add_argument('-s', '--spk_list', type=str, nargs='+', default=合成目标说话人名称, help='合成目标说话人名称') - - parser.add_argument('-a', '--auto_predict_f0', action='store_true', default=自动预测音高, - help='语音转换自动预测音高,转换歌声时不要打开这个会严重跑调') - parser.add_argument('-cm', '--cluster_model_path', type=str, default=聚类模型路径, help='聚类模型路径,如果没有训练聚类则随便填') - parser.add_argument('-cr', '--cluster_infer_ratio', type=float, default=聚类方案占比, help='聚类方案占比,范围0-1,若没有训练聚类模型则填0即可') - - parser.add_argument('-sd', '--slice_db', type=int, default=静音分贝, help='默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50') - parser.add_argument('-d', '--device', type=str, default=推理设备, help='推理设备,None则为自动选择cpu和gpu') - parser.add_argument('-ns', '--noice_scale', type=float, default=噪音比例, help='噪音级别,会影响咬字和音质,较为玄学') - parser.add_argument('-p', '--pad_seconds', type=float, default=0.5, help='推理音频pad秒数,由于未知原因开头结尾会有异响,pad一小段静音段后就不会出现') - parser.add_argument('-wf', '--wav_format', type=str, default=音频输出格式, help='音频输出格式') - - args = parser.parse_args() - - svc_model = Svc(args.model_path, args.config_path, args.device, args.cluster_model_path) - infer_tool.mkdir(["raw", "results"]) - clean_names = args.clean_names - trans = args.trans - spk_list = args.spk_list - slice_db = args.slice_db - wav_format = args.wav_format - auto_predict_f0 = args.auto_predict_f0 - cluster_infer_ratio = args.cluster_infer_ratio - noice_scale = args.noice_scale - pad_seconds = args.pad_seconds - - infer_tool.fill_a_to_b(trans, clean_names) - for clean_name, tran in zip(clean_names, trans): - raw_audio_path = f"raw/{clean_name}" - if "." not in raw_audio_path: - raw_audio_path += ".wav" - infer_tool.format_wav(raw_audio_path) - wav_path = Path(raw_audio_path).with_suffix('.wav') - chunks = slicer.cut(wav_path, db_thresh=slice_db) - audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks) - - for spk in spk_list: - audio = [] - for (slice_tag, data) in audio_data: - print(f'#=====分段开始,{round(len(data) / audio_sr, 3)}秒======') - - length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample)) - if slice_tag: - print('跳过空段') - _audio = np.zeros(length) - else: - # padd - pad_len = int(audio_sr * pad_seconds) - data = np.concatenate([np.zeros([pad_len]), data, np.zeros([pad_len])]) - raw_path = io.BytesIO() - soundfile.write(raw_path, data, audio_sr, format="wav") - raw_path.seek(0) - out_audio, out_sr = svc_model.infer(spk, tran, raw_path, - cluster_infer_ratio=cluster_infer_ratio, - auto_predict_f0=auto_predict_f0, - noice_scale=noice_scale - ) - _audio = out_audio.detach().cpu().numpy() - pad_len = int(svc_model.target_sample * pad_seconds) - _audio = _audio[pad_len:-pad_len] - - audio.extend(list(infer_tool.pad_array(_audio, length))) - key = "auto" if auto_predict_f0 else f"{tran}key" - cluster_name = "" if cluster_infer_ratio == 0 else f"_{cluster_infer_ratio}" - res_path = f'./results/{clean_name}_{key}_{spk}{cluster_name}.{wav_format}' - soundfile.write(res_path, audio, svc_model.target_sample, format=wav_format) - -if __name__ == '__main__': - main() diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/cnn/bricks/upsample.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/cnn/bricks/upsample.py deleted file mode 100644 index a1a353767d0ce8518f0d7289bed10dba0178ed12..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/cnn/bricks/upsample.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -import torch.nn.functional as F - -from ..utils import xavier_init -from .registry import UPSAMPLE_LAYERS - -UPSAMPLE_LAYERS.register_module('nearest', module=nn.Upsample) -UPSAMPLE_LAYERS.register_module('bilinear', module=nn.Upsample) - - -@UPSAMPLE_LAYERS.register_module(name='pixel_shuffle') -class PixelShufflePack(nn.Module): - """Pixel Shuffle upsample layer. - - This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to - achieve a simple upsampling with pixel shuffle. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - scale_factor (int): Upsample ratio. - upsample_kernel (int): Kernel size of the conv layer to expand the - channels. - """ - - def __init__(self, in_channels, out_channels, scale_factor, - upsample_kernel): - super(PixelShufflePack, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.scale_factor = scale_factor - self.upsample_kernel = upsample_kernel - self.upsample_conv = nn.Conv2d( - self.in_channels, - self.out_channels * scale_factor * scale_factor, - self.upsample_kernel, - padding=(self.upsample_kernel - 1) // 2) - self.init_weights() - - def init_weights(self): - xavier_init(self.upsample_conv, distribution='uniform') - - def forward(self, x): - x = self.upsample_conv(x) - x = F.pixel_shuffle(x, self.scale_factor) - return x - - -def build_upsample_layer(cfg, *args, **kwargs): - """Build upsample layer. - - Args: - cfg (dict): The upsample layer config, which should contain: - - - type (str): Layer type. - - scale_factor (int): Upsample ratio, which is not applicable to - deconv. - - layer args: Args needed to instantiate a upsample layer. - args (argument list): Arguments passed to the ``__init__`` - method of the corresponding conv layer. - kwargs (keyword arguments): Keyword arguments passed to the - ``__init__`` method of the corresponding conv layer. - - Returns: - nn.Module: Created upsample layer. - """ - if not isinstance(cfg, dict): - raise TypeError(f'cfg must be a dict, but got {type(cfg)}') - if 'type' not in cfg: - raise KeyError( - f'the cfg dict must contain the key "type", but got {cfg}') - cfg_ = cfg.copy() - - layer_type = cfg_.pop('type') - if layer_type not in UPSAMPLE_LAYERS: - raise KeyError(f'Unrecognized upsample type {layer_type}') - else: - upsample = UPSAMPLE_LAYERS.get(layer_type) - - if upsample is nn.Upsample: - cfg_['mode'] = layer_type - layer = upsample(*args, **kwargs, **cfg_) - return layer diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/corner_pool.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/corner_pool.py deleted file mode 100644 index a33d798b43d405e4c86bee4cd6389be21ca9c637..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/corner_pool.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from torch import nn -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', [ - 'top_pool_forward', 'top_pool_backward', 'bottom_pool_forward', - 'bottom_pool_backward', 'left_pool_forward', 'left_pool_backward', - 'right_pool_forward', 'right_pool_backward' -]) - -_mode_dict = {'top': 0, 'bottom': 1, 'left': 2, 'right': 3} - - -class TopPoolFunction(Function): - - @staticmethod - def symbolic(g, input): - output = g.op( - 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['top'])) - return output - - @staticmethod - def forward(ctx, input): - output = ext_module.top_pool_forward(input) - ctx.save_for_backward(input) - return output - - @staticmethod - def backward(ctx, grad_output): - input, = ctx.saved_tensors - output = ext_module.top_pool_backward(input, grad_output) - return output - - -class BottomPoolFunction(Function): - - @staticmethod - def symbolic(g, input): - output = g.op( - 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['bottom'])) - return output - - @staticmethod - def forward(ctx, input): - output = ext_module.bottom_pool_forward(input) - ctx.save_for_backward(input) - return output - - @staticmethod - def backward(ctx, grad_output): - input, = ctx.saved_tensors - output = ext_module.bottom_pool_backward(input, grad_output) - return output - - -class LeftPoolFunction(Function): - - @staticmethod - def symbolic(g, input): - output = g.op( - 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['left'])) - return output - - @staticmethod - def forward(ctx, input): - output = ext_module.left_pool_forward(input) - ctx.save_for_backward(input) - return output - - @staticmethod - def backward(ctx, grad_output): - input, = ctx.saved_tensors - output = ext_module.left_pool_backward(input, grad_output) - return output - - -class RightPoolFunction(Function): - - @staticmethod - def symbolic(g, input): - output = g.op( - 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['right'])) - return output - - @staticmethod - def forward(ctx, input): - output = ext_module.right_pool_forward(input) - ctx.save_for_backward(input) - return output - - @staticmethod - def backward(ctx, grad_output): - input, = ctx.saved_tensors - output = ext_module.right_pool_backward(input, grad_output) - return output - - -class CornerPool(nn.Module): - """Corner Pooling. - - Corner Pooling is a new type of pooling layer that helps a - convolutional network better localize corners of bounding boxes. - - Please refer to https://arxiv.org/abs/1808.01244 for more details. - Code is modified from https://github.com/princeton-vl/CornerNet-Lite. - - Args: - mode(str): Pooling orientation for the pooling layer - - - 'bottom': Bottom Pooling - - 'left': Left Pooling - - 'right': Right Pooling - - 'top': Top Pooling - - Returns: - Feature map after pooling. - """ - - pool_functions = { - 'bottom': BottomPoolFunction, - 'left': LeftPoolFunction, - 'right': RightPoolFunction, - 'top': TopPoolFunction, - } - - cummax_dim_flip = { - 'bottom': (2, False), - 'left': (3, True), - 'right': (3, False), - 'top': (2, True), - } - - def __init__(self, mode): - super(CornerPool, self).__init__() - assert mode in self.pool_functions - self.mode = mode - self.corner_pool = self.pool_functions[mode] - - def forward(self, x): - if torch.__version__ != 'parrots' and torch.__version__ >= '1.5.0': - if torch.onnx.is_in_onnx_export(): - assert torch.__version__ >= '1.7.0', \ - 'When `cummax` serves as an intermediate component whose '\ - 'outputs is used as inputs for another modules, it\'s '\ - 'expected that pytorch version must be >= 1.7.0, '\ - 'otherwise Error appears like: `RuntimeError: tuple '\ - 'appears in op that does not forward tuples, unsupported '\ - 'kind: prim::PythonOp`.' - - dim, flip = self.cummax_dim_flip[self.mode] - if flip: - x = x.flip(dim) - pool_tensor, _ = torch.cummax(x, dim=dim) - if flip: - pool_tensor = pool_tensor.flip(dim) - return pool_tensor - else: - return self.corner_pool.apply(x) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/dense_heads/dense_test_mixins.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/dense_heads/dense_test_mixins.py deleted file mode 100644 index dd81364dec90e97c30a6e2220a5e0fe96373c5bd..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/dense_heads/dense_test_mixins.py +++ /dev/null @@ -1,100 +0,0 @@ -from inspect import signature - -import torch - -from mmdet.core import bbox2result, bbox_mapping_back, multiclass_nms - - -class BBoxTestMixin(object): - """Mixin class for test time augmentation of bboxes.""" - - def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas): - """Merge augmented detection bboxes and scores. - - Args: - aug_bboxes (list[Tensor]): shape (n, 4*#class) - aug_scores (list[Tensor] or None): shape (n, #class) - img_shapes (list[Tensor]): shape (3, ). - - Returns: - tuple: (bboxes, scores) - """ - recovered_bboxes = [] - for bboxes, img_info in zip(aug_bboxes, img_metas): - img_shape = img_info[0]['img_shape'] - scale_factor = img_info[0]['scale_factor'] - flip = img_info[0]['flip'] - flip_direction = img_info[0]['flip_direction'] - bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, - flip_direction) - recovered_bboxes.append(bboxes) - bboxes = torch.cat(recovered_bboxes, dim=0) - if aug_scores is None: - return bboxes - else: - scores = torch.cat(aug_scores, dim=0) - return bboxes, scores - - def aug_test_bboxes(self, feats, img_metas, rescale=False): - """Test det bboxes with test time augmentation. - - Args: - feats (list[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains features for all images in the batch. - img_metas (list[list[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. each dict has image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[ndarray]: bbox results of each class - """ - # check with_nms argument - gb_sig = signature(self.get_bboxes) - gb_args = [p.name for p in gb_sig.parameters.values()] - if hasattr(self, '_get_bboxes'): - gbs_sig = signature(self._get_bboxes) - else: - gbs_sig = signature(self._get_bboxes_single) - gbs_args = [p.name for p in gbs_sig.parameters.values()] - assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \ - f'{self.__class__.__name__}' \ - ' does not support test-time augmentation' - - aug_bboxes = [] - aug_scores = [] - aug_factors = [] # score_factors for NMS - for x, img_meta in zip(feats, img_metas): - # only one image in the batch - outs = self.forward(x) - bbox_inputs = outs + (img_meta, self.test_cfg, False, False) - bbox_outputs = self.get_bboxes(*bbox_inputs)[0] - aug_bboxes.append(bbox_outputs[0]) - aug_scores.append(bbox_outputs[1]) - # bbox_outputs of some detectors (e.g., ATSS, FCOS, YOLOv3) - # contains additional element to adjust scores before NMS - if len(bbox_outputs) >= 3: - aug_factors.append(bbox_outputs[2]) - - # after merging, bboxes will be rescaled to the original image size - merged_bboxes, merged_scores = self.merge_aug_bboxes( - aug_bboxes, aug_scores, img_metas) - merged_factors = torch.cat(aug_factors, dim=0) if aug_factors else None - det_bboxes, det_labels = multiclass_nms( - merged_bboxes, - merged_scores, - self.test_cfg.score_thr, - self.test_cfg.nms, - self.test_cfg.max_per_img, - score_factors=merged_factors) - - if rescale: - _det_bboxes = det_bboxes - else: - _det_bboxes = det_bboxes.clone() - _det_bboxes[:, :4] *= det_bboxes.new_tensor( - img_metas[0][0]['scale_factor']) - bbox_results = bbox2result(_det_bboxes, det_labels, self.num_classes) - return bbox_results diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/detectors/sparse_rcnn.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/detectors/sparse_rcnn.py deleted file mode 100644 index 0dbd0250f189e610a0bbc72b0dab2559e26857ae..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/detectors/sparse_rcnn.py +++ /dev/null @@ -1,110 +0,0 @@ -from ..builder import DETECTORS -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class SparseRCNN(TwoStageDetector): - r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with - Learnable Proposals `_""" - - def __init__(self, *args, **kwargs): - super(SparseRCNN, self).__init__(*args, **kwargs) - assert self.with_rpn, 'Sparse R-CNN do not support external proposals' - - def forward_train(self, - img, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None, - proposals=None, - **kwargs): - """Forward function of SparseR-CNN in train stage. - - Args: - img (Tensor): of shape (N, C, H, W) encoding input images. - Typically these should be mean centered and std scaled. - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - :class:`mmdet.datasets.pipelines.Collect`. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - gt_bboxes_ignore (None | list[Tensor): specify which bounding - boxes can be ignored when computing the loss. - gt_masks (List[Tensor], optional) : Segmentation masks for - each box. But we don't support it in this architecture. - proposals (List[Tensor], optional): override rpn proposals with - custom proposals. Use when `with_rpn` is False. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - - assert proposals is None, 'Sparse R-CNN does not support' \ - ' external proposals' - assert gt_masks is None, 'Sparse R-CNN does not instance segmentation' - - x = self.extract_feat(img) - proposal_boxes, proposal_features, imgs_whwh = \ - self.rpn_head.forward_train(x, img_metas) - roi_losses = self.roi_head.forward_train( - x, - proposal_boxes, - proposal_features, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=gt_bboxes_ignore, - gt_masks=gt_masks, - imgs_whwh=imgs_whwh) - return roi_losses - - def simple_test(self, img, img_metas, rescale=False): - """Test function without test time augmentation. - - Args: - imgs (list[torch.Tensor]): List of multiple images - img_metas (list[dict]): List of image information. - rescale (bool): Whether to rescale the results. - Defaults to False. - - Returns: - list[list[np.ndarray]]: BBox results of each image and classes. - The outer list corresponds to each image. The inner list - corresponds to each class. - """ - x = self.extract_feat(img) - proposal_boxes, proposal_features, imgs_whwh = \ - self.rpn_head.simple_test_rpn(x, img_metas) - bbox_results = self.roi_head.simple_test( - x, - proposal_boxes, - proposal_features, - img_metas, - imgs_whwh=imgs_whwh, - rescale=rescale) - return bbox_results - - def forward_dummy(self, img): - """Used for computing network flops. - - See `mmdetection/tools/analysis_tools/get_flops.py` - """ - # backbone - x = self.extract_feat(img) - # rpn - num_imgs = len(img) - dummy_img_metas = [ - dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs) - ] - proposal_boxes, proposal_features, imgs_whwh = \ - self.rpn_head.simple_test_rpn(x, dummy_img_metas) - # roi_head - roi_outs = self.roi_head.forward_dummy(x, proposal_boxes, - proposal_features, - dummy_img_metas) - return roi_outs diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/fileio/handlers/json_handler.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/fileio/handlers/json_handler.py deleted file mode 100644 index 18d4f15f74139d20adff18b20be5529c592a66b6..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/fileio/handlers/json_handler.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import json - -import numpy as np - -from .base import BaseFileHandler - - -def set_default(obj): - """Set default json values for non-serializable values. - - It helps convert ``set``, ``range`` and ``np.ndarray`` data types to list. - It also converts ``np.generic`` (including ``np.int32``, ``np.float32``, - etc.) into plain numbers of plain python built-in types. - """ - if isinstance(obj, (set, range)): - return list(obj) - elif isinstance(obj, np.ndarray): - return obj.tolist() - elif isinstance(obj, np.generic): - return obj.item() - raise TypeError(f'{type(obj)} is unsupported for json dump') - - -class JsonHandler(BaseFileHandler): - - def load_from_fileobj(self, file): - return json.load(file) - - def dump_to_fileobj(self, obj, file, **kwargs): - kwargs.setdefault('default', set_default) - json.dump(obj, file, **kwargs) - - def dump_to_str(self, obj, **kwargs): - kwargs.setdefault('default', set_default) - return json.dumps(obj, **kwargs) diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/window/cocoa/systemcursor.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/window/cocoa/systemcursor.py deleted file mode 100644 index a615792f51d6ee4c294d14b10a46478ccede456d..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/window/cocoa/systemcursor.py +++ /dev/null @@ -1,21 +0,0 @@ -from pyglet.libs.darwin import cocoapy - - -# This class is a wrapper around NSCursor which prevents us from -# sending too many hide or unhide messages in a row. Apparently -# NSCursor treats them like retain/release messages, which can be -# problematic when we are e.g. switching between window & fullscreen. -class SystemCursor: - cursor_is_hidden = False - - @classmethod - def hide(cls): - if not cls.cursor_is_hidden: - cocoapy.send_message('NSCursor', 'hide') - cls.cursor_is_hidden = True - - @classmethod - def unhide(cls): - if cls.cursor_is_hidden: - cocoapy.send_message('NSCursor', 'unhide') - cls.cursor_is_hidden = False diff --git a/spaces/ahuss/pet/app.py b/spaces/ahuss/pet/app.py deleted file mode 100644 index 9494385f8aaa26e1e4fc7b18c4815d68f4c784c0..0000000000000000000000000000000000000000 --- a/spaces/ahuss/pet/app.py +++ /dev/null @@ -1,36 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../app.ipynb. - -# %% auto 0 -__all__ = ['temp', 'learn', 'categories', 'image', - 'label', 'examples', 'intf', 'is_cat', 'classify'] - -# %% ../app.ipynb 5 -import pathlib -from fastai.vision.all import * -import gradio as gr - - -def is_cat(x): return x[0].is_upper() - - -learn = load_learner('model_dog-vs-cat') - - -# %% ../app.ipynb 7 -categories = ('Dog', 'Cat') - - -def classify(image): - pred, adx, probs = learn.predict(image) - return dict(zip(categories, map(float, probs))) - - -# %% ../app.ipynb 8 -image = gr.inputs.Image(shape=(192, 192)) -label = gr.outputs.Label() -examples = ['dog (4).jpg', 'cat (2).jpg', 'dogcat2.jpeg'] -title = "Katt eller Hund" - -intf = gr.Interface(fn=classify, inputs=image, - outputs=label,title=title, examples=examples) -intf.launch(inline=False) diff --git a/spaces/akhaliq/Music_Source_Separation/bytesep/dataset_creation/create_evaluation_audios/violin-piano.py b/spaces/akhaliq/Music_Source_Separation/bytesep/dataset_creation/create_evaluation_audios/violin-piano.py deleted file mode 100644 index da36f43553f507c1b980fff826d443cdec113aa6..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Music_Source_Separation/bytesep/dataset_creation/create_evaluation_audios/violin-piano.py +++ /dev/null @@ -1,162 +0,0 @@ -import argparse -import os -from typing import NoReturn - -import librosa -import numpy as np -import soundfile - -from bytesep.dataset_creation.pack_audios_to_hdf5s.instruments_solo import ( - read_csv as read_instruments_solo_csv, -) -from bytesep.dataset_creation.pack_audios_to_hdf5s.maestro import ( - read_csv as read_maestro_csv, -) -from bytesep.utils import load_random_segment - - -def create_evaluation(args) -> NoReturn: - r"""Random mix and write out audios for evaluation. - - Args: - violin_dataset_dir: str, the directory of the violin dataset - piano_dataset_dir: str, the directory of the piano dataset - evaluation_audios_dir: str, the directory to write out randomly selected and mixed audio segments - sample_rate: int - channels: int, e.g., 1 | 2 - evaluation_segments_num: int - mono: bool - - Returns: - NoReturn - """ - - # arguments & parameters - violin_dataset_dir = args.violin_dataset_dir - piano_dataset_dir = args.piano_dataset_dir - evaluation_audios_dir = args.evaluation_audios_dir - sample_rate = args.sample_rate - channels = args.channels - evaluation_segments_num = args.evaluation_segments_num - mono = True if channels == 1 else False - - split = 'test' - segment_seconds = 10.0 - - random_state = np.random.RandomState(1234) - - violin_meta_csv = os.path.join(violin_dataset_dir, 'validation.csv') - violin_names_dict = read_instruments_solo_csv(violin_meta_csv) - violin_audio_names = violin_names_dict['{}'.format(split)] - - piano_meta_csv = os.path.join(piano_dataset_dir, 'maestro-v2.0.0.csv') - piano_names_dict = read_maestro_csv(piano_meta_csv) - piano_audio_names = piano_names_dict['{}'.format(split)] - - for source_type in ['violin', 'piano', 'mixture']: - output_dir = os.path.join(evaluation_audios_dir, split, source_type) - os.makedirs(output_dir, exist_ok=True) - - for n in range(evaluation_segments_num): - - print('{} / {}'.format(n, evaluation_segments_num)) - - # Randomly select and write out a clean violin segment. - violin_audio_name = random_state.choice(violin_audio_names) - violin_audio_path = os.path.join(violin_dataset_dir, "mp3s", violin_audio_name) - - violin_audio = load_random_segment( - audio_path=violin_audio_path, - random_state=random_state, - segment_seconds=segment_seconds, - mono=mono, - sample_rate=sample_rate, - ) - # (channels_num, audio_samples) - - output_violin_path = os.path.join( - evaluation_audios_dir, split, 'violin', '{:04d}.wav'.format(n) - ) - soundfile.write( - file=output_violin_path, data=violin_audio.T, samplerate=sample_rate - ) - print("Write out to {}".format(output_violin_path)) - - # Randomly select and write out a clean piano segment. - piano_audio_name = random_state.choice(piano_audio_names) - piano_audio_path = os.path.join(piano_dataset_dir, piano_audio_name) - - piano_audio = load_random_segment( - audio_path=piano_audio_path, - random_state=random_state, - segment_seconds=segment_seconds, - mono=mono, - sample_rate=sample_rate, - ) - # (channels_num, audio_samples) - - output_piano_path = os.path.join( - evaluation_audios_dir, split, 'piano', '{:04d}.wav'.format(n) - ) - soundfile.write( - file=output_piano_path, data=piano_audio.T, samplerate=sample_rate - ) - print("Write out to {}".format(output_piano_path)) - - # Mix violin and piano segments and write out a mixture segment. - mixture_audio = violin_audio + piano_audio - # (channels_num, audio_samples) - - output_mixture_path = os.path.join( - evaluation_audios_dir, split, 'mixture', '{:04d}.wav'.format(n) - ) - soundfile.write( - file=output_mixture_path, data=mixture_audio.T, samplerate=sample_rate - ) - print("Write out to {}".format(output_mixture_path)) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "--violin_dataset_dir", - type=str, - required=True, - help="The directory of the violin dataset.", - ) - parser.add_argument( - "--piano_dataset_dir", - type=str, - required=True, - help="The directory of the piano dataset.", - ) - parser.add_argument( - "--evaluation_audios_dir", - type=str, - required=True, - help="The directory to write out randomly selected and mixed audio segments.", - ) - parser.add_argument( - "--sample_rate", - type=int, - required=True, - help="Sample rate", - ) - parser.add_argument( - "--channels", - type=int, - required=True, - help="Audio channels, e.g, 1 or 2.", - ) - parser.add_argument( - "--evaluation_segments_num", - type=int, - required=True, - help="The number of segments to create for evaluation.", - ) - - # Parse arguments. - args = parser.parse_args() - - create_evaluation(args) diff --git a/spaces/akhaliq/Music_Source_Separation/scripts/1_pack_audios_to_hdf5s/maestro/sr=44100,chn=2.sh b/spaces/akhaliq/Music_Source_Separation/scripts/1_pack_audios_to_hdf5s/maestro/sr=44100,chn=2.sh deleted file mode 100644 index 05c239fb85749261920f85f4ccf70641f9f67546..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Music_Source_Separation/scripts/1_pack_audios_to_hdf5s/maestro/sr=44100,chn=2.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -DATASET_DIR=${1:-"./datasets/maestro"} # The first argument is dataset directory. -WORKSPACE=${2:-"./workspaces/bytesep"} # The second argument is workspace directory. - -echo "DATASET_DIR=${DATASET_DIR}" -echo "WORKSPACE=${WORKSPACE}" - -# Users can change the following settings. -SAMPLE_RATE=44100 -CHANNELS=2 - -# Paths -HDF5S_DIR="${WORKSPACE}/hdf5s/maestro/sr=${SAMPLE_RATE}_chn=${CHANNELS}/train" - -python3 bytesep/dataset_creation/pack_audios_to_hdf5s/maestro.py \ - --dataset_dir=$DATASET_DIR \ - --split="train" \ - --hdf5s_dir=$HDF5S_DIR \ - --sample_rate=$SAMPLE_RATE \ - --channels=$CHANNELS \ No newline at end of file diff --git a/spaces/akhaliq/stylegan3_clip/torch_utils/ops/bias_act.py b/spaces/akhaliq/stylegan3_clip/torch_utils/ops/bias_act.py deleted file mode 100644 index af13db543c63934d902c08f1a957bee6012e1f27..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/stylegan3_clip/torch_utils/ops/bias_act.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom PyTorch ops for efficient bias and activation.""" - -import os -import numpy as np -import torch -import dnnlib - -from .. import custom_ops -from .. import misc - -#---------------------------------------------------------------------------- - -activation_funcs = { - 'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False), - 'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False), - 'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False), - 'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True), - 'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True), - 'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True), - 'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True), - 'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True), - 'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True), -} - -#---------------------------------------------------------------------------- - -_plugin = None -_null_tensor = torch.empty([0]) - -def _init(): - global _plugin - if _plugin is None: - _plugin = custom_ops.get_plugin( - module_name='bias_act_plugin', - sources=['bias_act.cpp', 'bias_act.cu'], - headers=['bias_act.h'], - source_dir=os.path.dirname(__file__), - extra_cuda_cflags=['--use_fast_math'], - ) - return True - -#---------------------------------------------------------------------------- - -def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'): - r"""Fused bias and activation function. - - Adds bias `b` to activation tensor `x`, evaluates activation function `act`, - and scales the result by `gain`. Each of the steps is optional. In most cases, - the fused op is considerably more efficient than performing the same calculation - using standard PyTorch ops. It supports first and second order gradients, - but not third order gradients. - - Args: - x: Input activation tensor. Can be of any shape. - b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type - as `x`. The shape must be known, and it must match the dimension of `x` - corresponding to `dim`. - dim: The dimension in `x` corresponding to the elements of `b`. - The value of `dim` is ignored if `b` is not specified. - act: Name of the activation function to evaluate, or `"linear"` to disable. - Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc. - See `activation_funcs` for a full list. `None` is not allowed. - alpha: Shape parameter for the activation function, or `None` to use the default. - gain: Scaling factor for the output tensor, or `None` to use default. - See `activation_funcs` for the default scaling of each activation function. - If unsure, consider specifying 1. - clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable - the clamping (default). - impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default). - - Returns: - Tensor of the same shape and datatype as `x`. - """ - assert isinstance(x, torch.Tensor) - assert impl in ['ref', 'cuda'] - if impl == 'cuda' and x.device.type == 'cuda' and _init(): - return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b) - return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp) - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None): - """Slow reference implementation of `bias_act()` using standard TensorFlow ops. - """ - assert isinstance(x, torch.Tensor) - assert clamp is None or clamp >= 0 - spec = activation_funcs[act] - alpha = float(alpha if alpha is not None else spec.def_alpha) - gain = float(gain if gain is not None else spec.def_gain) - clamp = float(clamp if clamp is not None else -1) - - # Add bias. - if b is not None: - assert isinstance(b, torch.Tensor) and b.ndim == 1 - assert 0 <= dim < x.ndim - assert b.shape[0] == x.shape[dim] - x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)]) - - # Evaluate activation function. - alpha = float(alpha) - x = spec.func(x, alpha=alpha) - - # Scale by gain. - gain = float(gain) - if gain != 1: - x = x * gain - - # Clamp. - if clamp >= 0: - x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type - return x - -#---------------------------------------------------------------------------- - -_bias_act_cuda_cache = dict() - -def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None): - """Fast CUDA implementation of `bias_act()` using custom ops. - """ - # Parse arguments. - assert clamp is None or clamp >= 0 - spec = activation_funcs[act] - alpha = float(alpha if alpha is not None else spec.def_alpha) - gain = float(gain if gain is not None else spec.def_gain) - clamp = float(clamp if clamp is not None else -1) - - # Lookup from cache. - key = (dim, act, alpha, gain, clamp) - if key in _bias_act_cuda_cache: - return _bias_act_cuda_cache[key] - - # Forward op. - class BiasActCuda(torch.autograd.Function): - @staticmethod - def forward(ctx, x, b): # pylint: disable=arguments-differ - ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride(1) == 1 else torch.contiguous_format - x = x.contiguous(memory_format=ctx.memory_format) - b = b.contiguous() if b is not None else _null_tensor - y = x - if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor: - y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp) - ctx.save_for_backward( - x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor, - b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor, - y if 'y' in spec.ref else _null_tensor) - return y - - @staticmethod - def backward(ctx, dy): # pylint: disable=arguments-differ - dy = dy.contiguous(memory_format=ctx.memory_format) - x, b, y = ctx.saved_tensors - dx = None - db = None - - if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: - dx = dy - if act != 'linear' or gain != 1 or clamp >= 0: - dx = BiasActCudaGrad.apply(dy, x, b, y) - - if ctx.needs_input_grad[1]: - db = dx.sum([i for i in range(dx.ndim) if i != dim]) - - return dx, db - - # Backward op. - class BiasActCudaGrad(torch.autograd.Function): - @staticmethod - def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ - ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride(1) == 1 else torch.contiguous_format - dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp) - ctx.save_for_backward( - dy if spec.has_2nd_grad else _null_tensor, - x, b, y) - return dx - - @staticmethod - def backward(ctx, d_dx): # pylint: disable=arguments-differ - d_dx = d_dx.contiguous(memory_format=ctx.memory_format) - dy, x, b, y = ctx.saved_tensors - d_dy = None - d_x = None - d_b = None - d_y = None - - if ctx.needs_input_grad[0]: - d_dy = BiasActCudaGrad.apply(d_dx, x, b, y) - - if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]): - d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp) - - if spec.has_2nd_grad and ctx.needs_input_grad[2]: - d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim]) - - return d_dy, d_x, d_b, d_y - - # Add to cache. - _bias_act_cuda_cache[key] = BiasActCuda - return BiasActCuda - -#---------------------------------------------------------------------------- diff --git a/spaces/akiraaaaaa/Waifu-Reina/infer_pack/models_onnx.py b/spaces/akiraaaaaa/Waifu-Reina/infer_pack/models_onnx.py deleted file mode 100644 index b945eac8e59aac38fbd166da49eda01e2b8f4bd4..0000000000000000000000000000000000000000 --- a/spaces/akiraaaaaa/Waifu-Reina/infer_pack/models_onnx.py +++ /dev/null @@ -1,818 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMsNSFsidM(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - if self.gin_channels == 256: - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - else: - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - self.speaker_map = None - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def construct_spkmixmap(self, n_speaker): - self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels)) - for i in range(n_speaker): - self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]])) - self.speaker_map = self.speaker_map.unsqueeze(0) - - def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None): - if self.speaker_map is not None: # [N, S] * [S, B, 1, H] - g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1] - g = g * self.speaker_map # [N, S, B, 1, H] - g = torch.sum(g, dim=1) # [N, 1, B, 1, H] - g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N] - else: - g = g.unsqueeze(0) - g = self.emb_g(g).transpose(1, 2) - - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/commands/help.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/commands/help.py deleted file mode 100644 index 62066318b74dcc5c32bcd24b9493fb34d1ce52d7..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/commands/help.py +++ /dev/null @@ -1,41 +0,0 @@ -from optparse import Values -from typing import List - -from pip._internal.cli.base_command import Command -from pip._internal.cli.status_codes import SUCCESS -from pip._internal.exceptions import CommandError - - -class HelpCommand(Command): - """Show help for commands""" - - usage = """ - %prog """ - ignore_require_venv = True - - def run(self, options: Values, args: List[str]) -> int: - from pip._internal.commands import ( - commands_dict, - create_command, - get_similar_commands, - ) - - try: - # 'pip help' with no args is handled by pip.__init__.parseopt() - cmd_name = args[0] # the command we need help for - except IndexError: - return SUCCESS - - if cmd_name not in commands_dict: - guess = get_similar_commands(cmd_name) - - msg = [f'unknown command "{cmd_name}"'] - if guess: - msg.append(f'maybe you meant "{guess}"') - - raise CommandError(" - ".join(msg)) - - command = create_command(cmd_name) - command.parser.print_help() - - return SUCCESS diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/cp949prober.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/cp949prober.py deleted file mode 100644 index efd793abca4bf496001a4e46a67557e5a6f16bba..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/cp949prober.py +++ /dev/null @@ -1,49 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .chardistribution import EUCKRDistributionAnalysis -from .codingstatemachine import CodingStateMachine -from .mbcharsetprober import MultiByteCharSetProber -from .mbcssm import CP949_SM_MODEL - - -class CP949Prober(MultiByteCharSetProber): - def __init__(self): - super(CP949Prober, self).__init__() - self.coding_sm = CodingStateMachine(CP949_SM_MODEL) - # NOTE: CP949 is a superset of EUC-KR, so the distribution should be - # not different. - self.distribution_analyzer = EUCKRDistributionAnalysis() - self.reset() - - @property - def charset_name(self): - return "CP949" - - @property - def language(self): - return "Korean" diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/resolvelib/resolvers.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/resolvelib/resolvers.py deleted file mode 100644 index 787681b03e9ec2fd4490de10cdc95e58c893c8b5..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/resolvelib/resolvers.py +++ /dev/null @@ -1,482 +0,0 @@ -import collections -import operator - -from .providers import AbstractResolver -from .structs import DirectedGraph, IteratorMapping, build_iter_view - -RequirementInformation = collections.namedtuple( - "RequirementInformation", ["requirement", "parent"] -) - - -class ResolverException(Exception): - """A base class for all exceptions raised by this module. - - Exceptions derived by this class should all be handled in this module. Any - bubbling pass the resolver should be treated as a bug. - """ - - -class RequirementsConflicted(ResolverException): - def __init__(self, criterion): - super(RequirementsConflicted, self).__init__(criterion) - self.criterion = criterion - - def __str__(self): - return "Requirements conflict: {}".format( - ", ".join(repr(r) for r in self.criterion.iter_requirement()), - ) - - -class InconsistentCandidate(ResolverException): - def __init__(self, candidate, criterion): - super(InconsistentCandidate, self).__init__(candidate, criterion) - self.candidate = candidate - self.criterion = criterion - - def __str__(self): - return "Provided candidate {!r} does not satisfy {}".format( - self.candidate, - ", ".join(repr(r) for r in self.criterion.iter_requirement()), - ) - - -class Criterion(object): - """Representation of possible resolution results of a package. - - This holds three attributes: - - * `information` is a collection of `RequirementInformation` pairs. - Each pair is a requirement contributing to this criterion, and the - candidate that provides the requirement. - * `incompatibilities` is a collection of all known not-to-work candidates - to exclude from consideration. - * `candidates` is a collection containing all possible candidates deducted - from the union of contributing requirements and known incompatibilities. - It should never be empty, except when the criterion is an attribute of a - raised `RequirementsConflicted` (in which case it is always empty). - - .. note:: - This class is intended to be externally immutable. **Do not** mutate - any of its attribute containers. - """ - - def __init__(self, candidates, information, incompatibilities): - self.candidates = candidates - self.information = information - self.incompatibilities = incompatibilities - - def __repr__(self): - requirements = ", ".join( - "({!r}, via={!r})".format(req, parent) - for req, parent in self.information - ) - return "Criterion({})".format(requirements) - - def iter_requirement(self): - return (i.requirement for i in self.information) - - def iter_parent(self): - return (i.parent for i in self.information) - - -class ResolutionError(ResolverException): - pass - - -class ResolutionImpossible(ResolutionError): - def __init__(self, causes): - super(ResolutionImpossible, self).__init__(causes) - # causes is a list of RequirementInformation objects - self.causes = causes - - -class ResolutionTooDeep(ResolutionError): - def __init__(self, round_count): - super(ResolutionTooDeep, self).__init__(round_count) - self.round_count = round_count - - -# Resolution state in a round. -State = collections.namedtuple("State", "mapping criteria backtrack_causes") - - -class Resolution(object): - """Stateful resolution object. - - This is designed as a one-off object that holds information to kick start - the resolution process, and holds the results afterwards. - """ - - def __init__(self, provider, reporter): - self._p = provider - self._r = reporter - self._states = [] - - @property - def state(self): - try: - return self._states[-1] - except IndexError: - raise AttributeError("state") - - def _push_new_state(self): - """Push a new state into history. - - This new state will be used to hold resolution results of the next - coming round. - """ - base = self._states[-1] - state = State( - mapping=base.mapping.copy(), - criteria=base.criteria.copy(), - backtrack_causes=base.backtrack_causes[:], - ) - self._states.append(state) - - def _add_to_criteria(self, criteria, requirement, parent): - self._r.adding_requirement(requirement=requirement, parent=parent) - - identifier = self._p.identify(requirement_or_candidate=requirement) - criterion = criteria.get(identifier) - if criterion: - incompatibilities = list(criterion.incompatibilities) - else: - incompatibilities = [] - - matches = self._p.find_matches( - identifier=identifier, - requirements=IteratorMapping( - criteria, - operator.methodcaller("iter_requirement"), - {identifier: [requirement]}, - ), - incompatibilities=IteratorMapping( - criteria, - operator.attrgetter("incompatibilities"), - {identifier: incompatibilities}, - ), - ) - - if criterion: - information = list(criterion.information) - information.append(RequirementInformation(requirement, parent)) - else: - information = [RequirementInformation(requirement, parent)] - - criterion = Criterion( - candidates=build_iter_view(matches), - information=information, - incompatibilities=incompatibilities, - ) - if not criterion.candidates: - raise RequirementsConflicted(criterion) - criteria[identifier] = criterion - - def _get_preference(self, name): - return self._p.get_preference( - identifier=name, - resolutions=self.state.mapping, - candidates=IteratorMapping( - self.state.criteria, - operator.attrgetter("candidates"), - ), - information=IteratorMapping( - self.state.criteria, - operator.attrgetter("information"), - ), - backtrack_causes=self.state.backtrack_causes, - ) - - def _is_current_pin_satisfying(self, name, criterion): - try: - current_pin = self.state.mapping[name] - except KeyError: - return False - return all( - self._p.is_satisfied_by(requirement=r, candidate=current_pin) - for r in criterion.iter_requirement() - ) - - def _get_updated_criteria(self, candidate): - criteria = self.state.criteria.copy() - for requirement in self._p.get_dependencies(candidate=candidate): - self._add_to_criteria(criteria, requirement, parent=candidate) - return criteria - - def _attempt_to_pin_criterion(self, name): - criterion = self.state.criteria[name] - - causes = [] - for candidate in criterion.candidates: - try: - criteria = self._get_updated_criteria(candidate) - except RequirementsConflicted as e: - causes.append(e.criterion) - continue - - # Check the newly-pinned candidate actually works. This should - # always pass under normal circumstances, but in the case of a - # faulty provider, we will raise an error to notify the implementer - # to fix find_matches() and/or is_satisfied_by(). - satisfied = all( - self._p.is_satisfied_by(requirement=r, candidate=candidate) - for r in criterion.iter_requirement() - ) - if not satisfied: - raise InconsistentCandidate(candidate, criterion) - - self._r.pinning(candidate=candidate) - self.state.criteria.update(criteria) - - # Put newly-pinned candidate at the end. This is essential because - # backtracking looks at this mapping to get the last pin. - self.state.mapping.pop(name, None) - self.state.mapping[name] = candidate - - return [] - - # All candidates tried, nothing works. This criterion is a dead - # end, signal for backtracking. - return causes - - def _backtrack(self): - """Perform backtracking. - - When we enter here, the stack is like this:: - - [ state Z ] - [ state Y ] - [ state X ] - .... earlier states are irrelevant. - - 1. No pins worked for Z, so it does not have a pin. - 2. We want to reset state Y to unpinned, and pin another candidate. - 3. State X holds what state Y was before the pin, but does not - have the incompatibility information gathered in state Y. - - Each iteration of the loop will: - - 1. Discard Z. - 2. Discard Y but remember its incompatibility information gathered - previously, and the failure we're dealing with right now. - 3. Push a new state Y' based on X, and apply the incompatibility - information from Y to Y'. - 4a. If this causes Y' to conflict, we need to backtrack again. Make Y' - the new Z and go back to step 2. - 4b. If the incompatibilities apply cleanly, end backtracking. - """ - while len(self._states) >= 3: - # Remove the state that triggered backtracking. - del self._states[-1] - - # Retrieve the last candidate pin and known incompatibilities. - broken_state = self._states.pop() - name, candidate = broken_state.mapping.popitem() - incompatibilities_from_broken = [ - (k, list(v.incompatibilities)) - for k, v in broken_state.criteria.items() - ] - - # Also mark the newly known incompatibility. - incompatibilities_from_broken.append((name, [candidate])) - - self._r.backtracking(candidate=candidate) - - # Create a new state from the last known-to-work one, and apply - # the previously gathered incompatibility information. - def _patch_criteria(): - for k, incompatibilities in incompatibilities_from_broken: - if not incompatibilities: - continue - try: - criterion = self.state.criteria[k] - except KeyError: - continue - matches = self._p.find_matches( - identifier=k, - requirements=IteratorMapping( - self.state.criteria, - operator.methodcaller("iter_requirement"), - ), - incompatibilities=IteratorMapping( - self.state.criteria, - operator.attrgetter("incompatibilities"), - {k: incompatibilities}, - ), - ) - candidates = build_iter_view(matches) - if not candidates: - return False - incompatibilities.extend(criterion.incompatibilities) - self.state.criteria[k] = Criterion( - candidates=candidates, - information=list(criterion.information), - incompatibilities=incompatibilities, - ) - return True - - self._push_new_state() - success = _patch_criteria() - - # It works! Let's work on this new state. - if success: - return True - - # State does not work after applying known incompatibilities. - # Try the still previous state. - - # No way to backtrack anymore. - return False - - def resolve(self, requirements, max_rounds): - if self._states: - raise RuntimeError("already resolved") - - self._r.starting() - - # Initialize the root state. - self._states = [ - State( - mapping=collections.OrderedDict(), - criteria={}, - backtrack_causes=[], - ) - ] - for r in requirements: - try: - self._add_to_criteria(self.state.criteria, r, parent=None) - except RequirementsConflicted as e: - raise ResolutionImpossible(e.criterion.information) - - # The root state is saved as a sentinel so the first ever pin can have - # something to backtrack to if it fails. The root state is basically - # pinning the virtual "root" package in the graph. - self._push_new_state() - - for round_index in range(max_rounds): - self._r.starting_round(index=round_index) - - unsatisfied_names = [ - key - for key, criterion in self.state.criteria.items() - if not self._is_current_pin_satisfying(key, criterion) - ] - - # All criteria are accounted for. Nothing more to pin, we are done! - if not unsatisfied_names: - self._r.ending(state=self.state) - return self.state - - # Choose the most preferred unpinned criterion to try. - name = min(unsatisfied_names, key=self._get_preference) - failure_causes = self._attempt_to_pin_criterion(name) - - if failure_causes: - causes = [i for c in failure_causes for i in c.information] - # Backtrack if pinning fails. The backtrack process puts us in - # an unpinned state, so we can work on it in the next round. - self._r.resolving_conflicts(causes=causes) - success = self._backtrack() - self.state.backtrack_causes[:] = causes - - # Dead ends everywhere. Give up. - if not success: - raise ResolutionImpossible(self.state.backtrack_causes) - else: - # Pinning was successful. Push a new state to do another pin. - self._push_new_state() - - self._r.ending_round(index=round_index, state=self.state) - - raise ResolutionTooDeep(max_rounds) - - -def _has_route_to_root(criteria, key, all_keys, connected): - if key in connected: - return True - if key not in criteria: - return False - for p in criteria[key].iter_parent(): - try: - pkey = all_keys[id(p)] - except KeyError: - continue - if pkey in connected: - connected.add(key) - return True - if _has_route_to_root(criteria, pkey, all_keys, connected): - connected.add(key) - return True - return False - - -Result = collections.namedtuple("Result", "mapping graph criteria") - - -def _build_result(state): - mapping = state.mapping - all_keys = {id(v): k for k, v in mapping.items()} - all_keys[id(None)] = None - - graph = DirectedGraph() - graph.add(None) # Sentinel as root dependencies' parent. - - connected = {None} - for key, criterion in state.criteria.items(): - if not _has_route_to_root(state.criteria, key, all_keys, connected): - continue - if key not in graph: - graph.add(key) - for p in criterion.iter_parent(): - try: - pkey = all_keys[id(p)] - except KeyError: - continue - if pkey not in graph: - graph.add(pkey) - graph.connect(pkey, key) - - return Result( - mapping={k: v for k, v in mapping.items() if k in connected}, - graph=graph, - criteria=state.criteria, - ) - - -class Resolver(AbstractResolver): - """The thing that performs the actual resolution work.""" - - base_exception = ResolverException - - def resolve(self, requirements, max_rounds=100): - """Take a collection of constraints, spit out the resolution result. - - The return value is a representation to the final resolution result. It - is a tuple subclass with three public members: - - * `mapping`: A dict of resolved candidates. Each key is an identifier - of a requirement (as returned by the provider's `identify` method), - and the value is the resolved candidate. - * `graph`: A `DirectedGraph` instance representing the dependency tree. - The vertices are keys of `mapping`, and each edge represents *why* - a particular package is included. A special vertex `None` is - included to represent parents of user-supplied requirements. - * `criteria`: A dict of "criteria" that hold detailed information on - how edges in the graph are derived. Each key is an identifier of a - requirement, and the value is a `Criterion` instance. - - The following exceptions may be raised if a resolution cannot be found: - - * `ResolutionImpossible`: A resolution cannot be found for the given - combination of requirements. The `causes` attribute of the - exception is a list of (requirement, parent), giving the - requirements that could not be satisfied. - * `ResolutionTooDeep`: The dependency tree is too deeply nested and - the resolver gave up. This is usually caused by a circular - dependency, but you can try to resolve this by increasing the - `max_rounds` argument. - """ - resolution = Resolution(self.provider, self.reporter) - state = resolution.resolve(requirements, max_rounds=max_rounds) - return _build_result(state) diff --git a/spaces/allknowingroger/Image-Models-Test9/app.py b/spaces/allknowingroger/Image-Models-Test9/app.py deleted file mode 100644 index 4d735033650242d82a9c36070141c21ebc1f8b98..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test9/app.py +++ /dev/null @@ -1,144 +0,0 @@ -import gradio as gr -# import os -# import sys -# from pathlib import Path -import time - -models =[ - "jbilcke-hf/sdxl-joker", - "Jinouga/andy-raconte", - "spitfire4794/photo", - "CozerTechnology/CZR3D", - "Jinouga/brie-larson-v2", - "LieDeath/MergeStove2.5D", - "gfalcao/smkfr24jun", - "Jinouga/kilira-cooperv2", - "WALIDALI/bekclothes", -] - - -model_functions = {} -model_idx = 1 -for model_path in models: - try: - model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False) - except Exception as error: - def the_fn(txt): - return None - model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"]) - model_idx+=1 - - -def send_it_idx(idx): - def send_it_fn(prompt): - output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt) - return output - return send_it_fn - -def get_prompts(prompt_text): - return prompt_text - -def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - -def all_task_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d = gr.update(value=0) - tog = gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d = gr.update(value=et) - else: - d = gr.update(value=0) - tog = gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d, tog - -def all_task_start(): - print("\n\n\n\n\n\n\n") - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0) - -def clear_fn(): - nn = len(models) - return tuple([None, *[None for _ in range(nn)]]) - - - -with gr.Blocks(title="SD Models") as my_interface: - with gr.Column(scale=12): - # with gr.Row(): - # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""") - with gr.Row(): - with gr.Row(scale=6): - primary_prompt=gr.Textbox(label="Prompt", value="") - # real_prompt=gr.Textbox(label="Real prompt") - with gr.Row(scale=6): - # improve_prompts_btn=gr.Button("Improve") - with gr.Row(): - run=gr.Button("Run",variant="primary") - clear_btn=gr.Button("Clear") - with gr.Row(): - sd_outputs = {} - model_idx = 1 - for model_path in models: - with gr.Column(scale=3, min_width=320): - with gr.Box(): - sd_outputs[model_idx] = gr.Image(label=model_path) - pass - model_idx += 1 - pass - pass - - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - start_box.change( - all_task_end, - [start_box, end_box], - [start_box, tog_box], - every=1, - show_progress=False) - - primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box]) - run.click(all_task_start, None, [start_box, end_box, tog_box]) - runs_dict = {} - model_idx = 1 - for model_path in models: - runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]]) - model_idx += 1 - pass - pass - - # improve_prompts_btn_clicked=improve_prompts_btn.click( - # get_prompts, - # inputs=[primary_prompt], - # outputs=[primary_prompt], - # cancels=list(runs_dict.values())) - clear_btn.click( - clear_fn, - None, - [primary_prompt, *list(sd_outputs.values())], - cancels=[*list(runs_dict.values())]) - tog_box.change( - clear_it, - tog_box, - tog_box, - cancels=[*list(runs_dict.values())]) - -my_interface.queue(concurrency_count=600, status_update_rate=1) -my_interface.launch(inline=True, show_api=False) - \ No newline at end of file diff --git a/spaces/allknowingroger/huggingface/assets/index-fcdbd030.js b/spaces/allknowingroger/huggingface/assets/index-fcdbd030.js deleted file mode 100644 index 898f5a012f8e2d920a01209575d5037d7c3b0ee7..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/huggingface/assets/index-fcdbd030.js +++ /dev/null @@ -1,41 +0,0 @@ -var $c=Object.defineProperty;var Uc=(e,t,n)=>t in e?$c(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n;var yn=(e,t,n)=>(Uc(e,typeof t!="symbol"?t+"":t,n),n);(function(){const t=document.createElement("link").relList;if(t&&t.supports&&t.supports("modulepreload"))return;for(const l of document.querySelectorAll('link[rel="modulepreload"]'))r(l);new MutationObserver(l=>{for(const o of l)if(o.type==="childList")for(const i of o.addedNodes)i.tagName==="LINK"&&i.rel==="modulepreload"&&r(i)}).observe(document,{childList:!0,subtree:!0});function n(l){const o={};return l.integrity&&(o.integrity=l.integrity),l.referrerPolicy&&(o.referrerPolicy=l.referrerPolicy),l.crossOrigin==="use-credentials"?o.credentials="include":l.crossOrigin==="anonymous"?o.credentials="omit":o.credentials="same-origin",o}function r(l){if(l.ep)return;l.ep=!0;const o=n(l);fetch(l.href,o)}})();var bu={exports:{}},ul={},es={exports:{}},I={};/** - * @license React - * react.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var tr=Symbol.for("react.element"),Vc=Symbol.for("react.portal"),Bc=Symbol.for("react.fragment"),Qc=Symbol.for("react.strict_mode"),Hc=Symbol.for("react.profiler"),Wc=Symbol.for("react.provider"),Kc=Symbol.for("react.context"),Yc=Symbol.for("react.forward_ref"),Xc=Symbol.for("react.suspense"),Gc=Symbol.for("react.memo"),Zc=Symbol.for("react.lazy"),Qi=Symbol.iterator;function qc(e){return e===null||typeof e!="object"?null:(e=Qi&&e[Qi]||e["@@iterator"],typeof e=="function"?e:null)}var ts={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},ns=Object.assign,rs={};function cn(e,t,n){this.props=e,this.context=t,this.refs=rs,this.updater=n||ts}cn.prototype.isReactComponent={};cn.prototype.setState=function(e,t){if(typeof e!="object"&&typeof e!="function"&&e!=null)throw Error("setState(...): takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,e,t,"setState")};cn.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")};function ls(){}ls.prototype=cn.prototype;function Ko(e,t,n){this.props=e,this.context=t,this.refs=rs,this.updater=n||ts}var Yo=Ko.prototype=new ls;Yo.constructor=Ko;ns(Yo,cn.prototype);Yo.isPureReactComponent=!0;var Hi=Array.isArray,os=Object.prototype.hasOwnProperty,Xo={current:null},is={key:!0,ref:!0,__self:!0,__source:!0};function us(e,t,n){var r,l={},o=null,i=null;if(t!=null)for(r in t.ref!==void 0&&(i=t.ref),t.key!==void 0&&(o=""+t.key),t)os.call(t,r)&&!is.hasOwnProperty(r)&&(l[r]=t[r]);var u=arguments.length-2;if(u===1)l.children=n;else if(1>>1,te=j[G];if(0>>1;Gl(jl,z))ktl(ur,jl)?(j[G]=ur,j[kt]=z,G=kt):(j[G]=jl,j[xt]=z,G=xt);else if(ktl(ur,z))j[G]=ur,j[kt]=z,G=kt;else break e}}return L}function l(j,L){var z=j.sortIndex-L.sortIndex;return z!==0?z:j.id-L.id}if(typeof performance=="object"&&typeof performance.now=="function"){var o=performance;e.unstable_now=function(){return o.now()}}else{var i=Date,u=i.now();e.unstable_now=function(){return i.now()-u}}var s=[],f=[],h=1,c=null,v=3,g=!1,w=!1,k=!1,M=typeof setTimeout=="function"?setTimeout:null,m=typeof clearTimeout=="function"?clearTimeout:null,d=typeof setImmediate<"u"?setImmediate:null;typeof navigator<"u"&&navigator.scheduling!==void 0&&navigator.scheduling.isInputPending!==void 0&&navigator.scheduling.isInputPending.bind(navigator.scheduling);function y(j){for(var L=n(f);L!==null;){if(L.callback===null)r(f);else if(L.startTime<=j)r(f),L.sortIndex=L.expirationTime,t(s,L);else break;L=n(f)}}function S(j){if(k=!1,y(j),!w)if(n(s)!==null)w=!0,El(C);else{var L=n(f);L!==null&&Cl(S,L.startTime-j)}}function C(j,L){w=!1,k&&(k=!1,m(T),T=-1),g=!0;var z=v;try{for(y(L),c=n(s);c!==null&&(!(c.expirationTime>L)||j&&!ze());){var G=c.callback;if(typeof G=="function"){c.callback=null,v=c.priorityLevel;var te=G(c.expirationTime<=L);L=e.unstable_now(),typeof te=="function"?c.callback=te:c===n(s)&&r(s),y(L)}else r(s);c=n(s)}if(c!==null)var ir=!0;else{var xt=n(f);xt!==null&&Cl(S,xt.startTime-L),ir=!1}return ir}finally{c=null,v=z,g=!1}}var _=!1,N=null,T=-1,X=5,F=-1;function ze(){return!(e.unstable_now()-Fj||125G?(j.sortIndex=z,t(f,j),n(s)===null&&j===n(f)&&(k?(m(T),T=-1):k=!0,Cl(S,z-G))):(j.sortIndex=te,t(s,j),w||g||(w=!0,El(C))),j},e.unstable_shouldYield=ze,e.unstable_wrapCallback=function(j){var L=v;return function(){var z=v;v=L;try{return j.apply(this,arguments)}finally{v=z}}}})(fs);cs.exports=fs;var af=cs.exports;/** - * @license React - * react-dom.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var ds=p,Ee=af;function x(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n"u"||typeof window.document>"u"||typeof window.document.createElement>"u"),bl=Object.prototype.hasOwnProperty,cf=/^[:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD][:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\-.0-9\u00B7\u0300-\u036F\u203F-\u2040]*$/,Ki={},Yi={};function ff(e){return bl.call(Yi,e)?!0:bl.call(Ki,e)?!1:cf.test(e)?Yi[e]=!0:(Ki[e]=!0,!1)}function df(e,t,n,r){if(n!==null&&n.type===0)return!1;switch(typeof t){case"function":case"symbol":return!0;case"boolean":return r?!1:n!==null?!n.acceptsBooleans:(e=e.toLowerCase().slice(0,5),e!=="data-"&&e!=="aria-");default:return!1}}function pf(e,t,n,r){if(t===null||typeof t>"u"||df(e,t,n,r))return!0;if(r)return!1;if(n!==null)switch(n.type){case 3:return!t;case 4:return t===!1;case 5:return isNaN(t);case 6:return isNaN(t)||1>t}return!1}function me(e,t,n,r,l,o,i){this.acceptsBooleans=t===2||t===3||t===4,this.attributeName=r,this.attributeNamespace=l,this.mustUseProperty=n,this.propertyName=e,this.type=t,this.sanitizeURL=o,this.removeEmptyString=i}var ie={};"children dangerouslySetInnerHTML defaultValue defaultChecked innerHTML suppressContentEditableWarning suppressHydrationWarning style".split(" ").forEach(function(e){ie[e]=new me(e,0,!1,e,null,!1,!1)});[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach(function(e){var t=e[0];ie[t]=new me(t,1,!1,e[1],null,!1,!1)});["contentEditable","draggable","spellCheck","value"].forEach(function(e){ie[e]=new me(e,2,!1,e.toLowerCase(),null,!1,!1)});["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach(function(e){ie[e]=new me(e,2,!1,e,null,!1,!1)});"allowFullScreen async autoFocus autoPlay controls default defer disabled disablePictureInPicture disableRemotePlayback formNoValidate hidden loop noModule noValidate open playsInline readOnly required reversed scoped seamless itemScope".split(" ").forEach(function(e){ie[e]=new me(e,3,!1,e.toLowerCase(),null,!1,!1)});["checked","multiple","muted","selected"].forEach(function(e){ie[e]=new me(e,3,!0,e,null,!1,!1)});["capture","download"].forEach(function(e){ie[e]=new me(e,4,!1,e,null,!1,!1)});["cols","rows","size","span"].forEach(function(e){ie[e]=new me(e,6,!1,e,null,!1,!1)});["rowSpan","start"].forEach(function(e){ie[e]=new me(e,5,!1,e.toLowerCase(),null,!1,!1)});var Zo=/[\-:]([a-z])/g;function qo(e){return e[1].toUpperCase()}"accent-height alignment-baseline arabic-form baseline-shift cap-height clip-path clip-rule color-interpolation color-interpolation-filters color-profile color-rendering dominant-baseline enable-background fill-opacity fill-rule flood-color flood-opacity font-family font-size font-size-adjust font-stretch font-style font-variant font-weight glyph-name glyph-orientation-horizontal glyph-orientation-vertical horiz-adv-x horiz-origin-x image-rendering letter-spacing lighting-color marker-end marker-mid marker-start overline-position overline-thickness paint-order panose-1 pointer-events rendering-intent shape-rendering stop-color stop-opacity strikethrough-position strikethrough-thickness stroke-dasharray stroke-dashoffset stroke-linecap stroke-linejoin stroke-miterlimit stroke-opacity stroke-width text-anchor text-decoration text-rendering underline-position underline-thickness unicode-bidi unicode-range units-per-em v-alphabetic v-hanging v-ideographic v-mathematical vector-effect vert-adv-y vert-origin-x vert-origin-y word-spacing writing-mode xmlns:xlink x-height".split(" ").forEach(function(e){var t=e.replace(Zo,qo);ie[t]=new me(t,1,!1,e,null,!1,!1)});"xlink:actuate xlink:arcrole xlink:role xlink:show xlink:title xlink:type".split(" ").forEach(function(e){var t=e.replace(Zo,qo);ie[t]=new me(t,1,!1,e,"http://www.w3.org/1999/xlink",!1,!1)});["xml:base","xml:lang","xml:space"].forEach(function(e){var t=e.replace(Zo,qo);ie[t]=new me(t,1,!1,e,"http://www.w3.org/XML/1998/namespace",!1,!1)});["tabIndex","crossOrigin"].forEach(function(e){ie[e]=new me(e,1,!1,e.toLowerCase(),null,!1,!1)});ie.xlinkHref=new me("xlinkHref",1,!1,"xlink:href","http://www.w3.org/1999/xlink",!0,!1);["src","href","action","formAction"].forEach(function(e){ie[e]=new me(e,1,!1,e.toLowerCase(),null,!0,!0)});function Jo(e,t,n,r){var l=ie.hasOwnProperty(t)?ie[t]:null;(l!==null?l.type!==0:r||!(2u||l[i]!==o[u]){var s=` -`+l[i].replace(" at new "," at ");return e.displayName&&s.includes("")&&(s=s.replace("",e.displayName)),s}while(1<=i&&0<=u);break}}}finally{Tl=!1,Error.prepareStackTrace=n}return(e=e?e.displayName||e.name:"")?jn(e):""}function mf(e){switch(e.tag){case 5:return jn(e.type);case 16:return jn("Lazy");case 13:return jn("Suspense");case 19:return jn("SuspenseList");case 0:case 2:case 15:return e=Ol(e.type,!1),e;case 11:return e=Ol(e.type.render,!1),e;case 1:return e=Ol(e.type,!0),e;default:return""}}function ro(e){if(e==null)return null;if(typeof e=="function")return e.displayName||e.name||null;if(typeof e=="string")return e;switch(e){case $t:return"Fragment";case Dt:return"Portal";case eo:return"Profiler";case bo:return"StrictMode";case to:return"Suspense";case no:return"SuspenseList"}if(typeof e=="object")switch(e.$$typeof){case hs:return(e.displayName||"Context")+".Consumer";case ms:return(e._context.displayName||"Context")+".Provider";case ei:var t=e.render;return e=e.displayName,e||(e=t.displayName||t.name||"",e=e!==""?"ForwardRef("+e+")":"ForwardRef"),e;case ti:return t=e.displayName||null,t!==null?t:ro(e.type)||"Memo";case nt:t=e._payload,e=e._init;try{return ro(e(t))}catch{}}return null}function hf(e){var t=e.type;switch(e.tag){case 24:return"Cache";case 9:return(t.displayName||"Context")+".Consumer";case 10:return(t._context.displayName||"Context")+".Provider";case 18:return"DehydratedFragment";case 11:return e=t.render,e=e.displayName||e.name||"",t.displayName||(e!==""?"ForwardRef("+e+")":"ForwardRef");case 7:return"Fragment";case 5:return t;case 4:return"Portal";case 3:return"Root";case 6:return"Text";case 16:return ro(t);case 8:return t===bo?"StrictMode":"Mode";case 22:return"Offscreen";case 12:return"Profiler";case 21:return"Scope";case 13:return"Suspense";case 19:return"SuspenseList";case 25:return"TracingMarker";case 1:case 0:case 17:case 2:case 14:case 15:if(typeof t=="function")return t.displayName||t.name||null;if(typeof t=="string")return t}return null}function yt(e){switch(typeof e){case"boolean":case"number":case"string":case"undefined":return e;case"object":return e;default:return""}}function vs(e){var t=e.type;return(e=e.nodeName)&&e.toLowerCase()==="input"&&(t==="checkbox"||t==="radio")}function yf(e){var t=vs(e)?"checked":"value",n=Object.getOwnPropertyDescriptor(e.constructor.prototype,t),r=""+e[t];if(!e.hasOwnProperty(t)&&typeof n<"u"&&typeof n.get=="function"&&typeof n.set=="function"){var l=n.get,o=n.set;return Object.defineProperty(e,t,{configurable:!0,get:function(){return l.call(this)},set:function(i){r=""+i,o.call(this,i)}}),Object.defineProperty(e,t,{enumerable:n.enumerable}),{getValue:function(){return r},setValue:function(i){r=""+i},stopTracking:function(){e._valueTracker=null,delete e[t]}}}}function cr(e){e._valueTracker||(e._valueTracker=yf(e))}function gs(e){if(!e)return!1;var t=e._valueTracker;if(!t)return!0;var n=t.getValue(),r="";return e&&(r=vs(e)?e.checked?"true":"false":e.value),e=r,e!==n?(t.setValue(e),!0):!1}function Mr(e){if(e=e||(typeof document<"u"?document:void 0),typeof e>"u")return null;try{return e.activeElement||e.body}catch{return e.body}}function lo(e,t){var n=t.checked;return W({},t,{defaultChecked:void 0,defaultValue:void 0,value:void 0,checked:n??e._wrapperState.initialChecked})}function Gi(e,t){var n=t.defaultValue==null?"":t.defaultValue,r=t.checked!=null?t.checked:t.defaultChecked;n=yt(t.value!=null?t.value:n),e._wrapperState={initialChecked:r,initialValue:n,controlled:t.type==="checkbox"||t.type==="radio"?t.checked!=null:t.value!=null}}function ws(e,t){t=t.checked,t!=null&&Jo(e,"checked",t,!1)}function oo(e,t){ws(e,t);var n=yt(t.value),r=t.type;if(n!=null)r==="number"?(n===0&&e.value===""||e.value!=n)&&(e.value=""+n):e.value!==""+n&&(e.value=""+n);else if(r==="submit"||r==="reset"){e.removeAttribute("value");return}t.hasOwnProperty("value")?io(e,t.type,n):t.hasOwnProperty("defaultValue")&&io(e,t.type,yt(t.defaultValue)),t.checked==null&&t.defaultChecked!=null&&(e.defaultChecked=!!t.defaultChecked)}function Zi(e,t,n){if(t.hasOwnProperty("value")||t.hasOwnProperty("defaultValue")){var r=t.type;if(!(r!=="submit"&&r!=="reset"||t.value!==void 0&&t.value!==null))return;t=""+e._wrapperState.initialValue,n||t===e.value||(e.value=t),e.defaultValue=t}n=e.name,n!==""&&(e.name=""),e.defaultChecked=!!e._wrapperState.initialChecked,n!==""&&(e.name=n)}function io(e,t,n){(t!=="number"||Mr(e.ownerDocument)!==e)&&(n==null?e.defaultValue=""+e._wrapperState.initialValue:e.defaultValue!==""+n&&(e.defaultValue=""+n))}var _n=Array.isArray;function Zt(e,t,n,r){if(e=e.options,t){t={};for(var l=0;l"+t.valueOf().toString()+"",t=fr.firstChild;e.firstChild;)e.removeChild(e.firstChild);for(;t.firstChild;)e.appendChild(t.firstChild)}});function $n(e,t){if(t){var n=e.firstChild;if(n&&n===e.lastChild&&n.nodeType===3){n.nodeValue=t;return}}e.textContent=t}var On={animationIterationCount:!0,aspectRatio:!0,borderImageOutset:!0,borderImageSlice:!0,borderImageWidth:!0,boxFlex:!0,boxFlexGroup:!0,boxOrdinalGroup:!0,columnCount:!0,columns:!0,flex:!0,flexGrow:!0,flexPositive:!0,flexShrink:!0,flexNegative:!0,flexOrder:!0,gridArea:!0,gridRow:!0,gridRowEnd:!0,gridRowSpan:!0,gridRowStart:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnSpan:!0,gridColumnStart:!0,fontWeight:!0,lineClamp:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,tabSize:!0,widows:!0,zIndex:!0,zoom:!0,fillOpacity:!0,floodOpacity:!0,stopOpacity:!0,strokeDasharray:!0,strokeDashoffset:!0,strokeMiterlimit:!0,strokeOpacity:!0,strokeWidth:!0},vf=["Webkit","ms","Moz","O"];Object.keys(On).forEach(function(e){vf.forEach(function(t){t=t+e.charAt(0).toUpperCase()+e.substring(1),On[t]=On[e]})});function Es(e,t,n){return t==null||typeof t=="boolean"||t===""?"":n||typeof t!="number"||t===0||On.hasOwnProperty(e)&&On[e]?(""+t).trim():t+"px"}function Cs(e,t){e=e.style;for(var n in t)if(t.hasOwnProperty(n)){var r=n.indexOf("--")===0,l=Es(n,t[n],r);n==="float"&&(n="cssFloat"),r?e.setProperty(n,l):e[n]=l}}var gf=W({menuitem:!0},{area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0});function ao(e,t){if(t){if(gf[e]&&(t.children!=null||t.dangerouslySetInnerHTML!=null))throw Error(x(137,e));if(t.dangerouslySetInnerHTML!=null){if(t.children!=null)throw Error(x(60));if(typeof t.dangerouslySetInnerHTML!="object"||!("__html"in t.dangerouslySetInnerHTML))throw Error(x(61))}if(t.style!=null&&typeof t.style!="object")throw Error(x(62))}}function co(e,t){if(e.indexOf("-")===-1)return typeof t.is=="string";switch(e){case"annotation-xml":case"color-profile":case"font-face":case"font-face-src":case"font-face-uri":case"font-face-format":case"font-face-name":case"missing-glyph":return!1;default:return!0}}var fo=null;function ni(e){return e=e.target||e.srcElement||window,e.correspondingUseElement&&(e=e.correspondingUseElement),e.nodeType===3?e.parentNode:e}var po=null,qt=null,Jt=null;function bi(e){if(e=lr(e)){if(typeof po!="function")throw Error(x(280));var t=e.stateNode;t&&(t=dl(t),po(e.stateNode,e.type,t))}}function js(e){qt?Jt?Jt.push(e):Jt=[e]:qt=e}function _s(){if(qt){var e=qt,t=Jt;if(Jt=qt=null,bi(e),t)for(e=0;e>>=0,e===0?32:31-(Of(e)/Pf|0)|0}var dr=64,pr=4194304;function Nn(e){switch(e&-e){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return e&4194240;case 4194304:case 8388608:case 16777216:case 33554432:case 67108864:return e&130023424;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 1073741824;default:return e}}function Vr(e,t){var n=e.pendingLanes;if(n===0)return 0;var r=0,l=e.suspendedLanes,o=e.pingedLanes,i=n&268435455;if(i!==0){var u=i&~l;u!==0?r=Nn(u):(o&=i,o!==0&&(r=Nn(o)))}else i=n&~l,i!==0?r=Nn(i):o!==0&&(r=Nn(o));if(r===0)return 0;if(t!==0&&t!==r&&!(t&l)&&(l=r&-r,o=t&-t,l>=o||l===16&&(o&4194240)!==0))return t;if(r&4&&(r|=n&16),t=e.entangledLanes,t!==0)for(e=e.entanglements,t&=r;0n;n++)t.push(e);return t}function nr(e,t,n){e.pendingLanes|=t,t!==536870912&&(e.suspendedLanes=0,e.pingedLanes=0),e=e.eventTimes,t=31-Me(t),e[t]=n}function Ff(e,t){var n=e.pendingLanes&~t;e.pendingLanes=t,e.suspendedLanes=0,e.pingedLanes=0,e.expiredLanes&=t,e.mutableReadLanes&=t,e.entangledLanes&=t,t=e.entanglements;var r=e.eventTimes;for(e=e.expirationTimes;0=Ln),su=String.fromCharCode(32),au=!1;function Ks(e,t){switch(e){case"keyup":return sd.indexOf(t.keyCode)!==-1;case"keydown":return t.keyCode!==229;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function Ys(e){return e=e.detail,typeof e=="object"&&"data"in e?e.data:null}var Ut=!1;function cd(e,t){switch(e){case"compositionend":return Ys(t);case"keypress":return t.which!==32?null:(au=!0,su);case"textInput":return e=t.data,e===su&&au?null:e;default:return null}}function fd(e,t){if(Ut)return e==="compositionend"||!ci&&Ks(e,t)?(e=Hs(),Tr=ui=it=null,Ut=!1,e):null;switch(e){case"paste":return null;case"keypress":if(!(t.ctrlKey||t.altKey||t.metaKey)||t.ctrlKey&&t.altKey){if(t.char&&1=t)return{node:n,offset:t-e};e=r}e:{for(;n;){if(n.nextSibling){n=n.nextSibling;break e}n=n.parentNode}n=void 0}n=pu(n)}}function qs(e,t){return e&&t?e===t?!0:e&&e.nodeType===3?!1:t&&t.nodeType===3?qs(e,t.parentNode):"contains"in e?e.contains(t):e.compareDocumentPosition?!!(e.compareDocumentPosition(t)&16):!1:!1}function Js(){for(var e=window,t=Mr();t instanceof e.HTMLIFrameElement;){try{var n=typeof t.contentWindow.location.href=="string"}catch{n=!1}if(n)e=t.contentWindow;else break;t=Mr(e.document)}return t}function fi(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return t&&(t==="input"&&(e.type==="text"||e.type==="search"||e.type==="tel"||e.type==="url"||e.type==="password")||t==="textarea"||e.contentEditable==="true")}function Sd(e){var t=Js(),n=e.focusedElem,r=e.selectionRange;if(t!==n&&n&&n.ownerDocument&&qs(n.ownerDocument.documentElement,n)){if(r!==null&&fi(n)){if(t=r.start,e=r.end,e===void 0&&(e=t),"selectionStart"in n)n.selectionStart=t,n.selectionEnd=Math.min(e,n.value.length);else if(e=(t=n.ownerDocument||document)&&t.defaultView||window,e.getSelection){e=e.getSelection();var l=n.textContent.length,o=Math.min(r.start,l);r=r.end===void 0?o:Math.min(r.end,l),!e.extend&&o>r&&(l=r,r=o,o=l),l=mu(n,o);var i=mu(n,r);l&&i&&(e.rangeCount!==1||e.anchorNode!==l.node||e.anchorOffset!==l.offset||e.focusNode!==i.node||e.focusOffset!==i.offset)&&(t=t.createRange(),t.setStart(l.node,l.offset),e.removeAllRanges(),o>r?(e.addRange(t),e.extend(i.node,i.offset)):(t.setEnd(i.node,i.offset),e.addRange(t)))}}for(t=[],e=n;e=e.parentNode;)e.nodeType===1&&t.push({element:e,left:e.scrollLeft,top:e.scrollTop});for(typeof n.focus=="function"&&n.focus(),n=0;n=document.documentMode,Vt=null,wo=null,In=null,So=!1;function hu(e,t,n){var r=n.window===n?n.document:n.nodeType===9?n:n.ownerDocument;So||Vt==null||Vt!==Mr(r)||(r=Vt,"selectionStart"in r&&fi(r)?r={start:r.selectionStart,end:r.selectionEnd}:(r=(r.ownerDocument&&r.ownerDocument.defaultView||window).getSelection(),r={anchorNode:r.anchorNode,anchorOffset:r.anchorOffset,focusNode:r.focusNode,focusOffset:r.focusOffset}),In&&Wn(In,r)||(In=r,r=Hr(wo,"onSelect"),0Ht||(e.current=_o[Ht],_o[Ht]=null,Ht--)}function D(e,t){Ht++,_o[Ht]=e.current,e.current=t}var vt={},ce=wt(vt),ve=wt(!1),Pt=vt;function rn(e,t){var n=e.type.contextTypes;if(!n)return vt;var r=e.stateNode;if(r&&r.__reactInternalMemoizedUnmaskedChildContext===t)return r.__reactInternalMemoizedMaskedChildContext;var l={},o;for(o in n)l[o]=t[o];return r&&(e=e.stateNode,e.__reactInternalMemoizedUnmaskedChildContext=t,e.__reactInternalMemoizedMaskedChildContext=l),l}function ge(e){return e=e.childContextTypes,e!=null}function Kr(){U(ve),U(ce)}function ku(e,t,n){if(ce.current!==vt)throw Error(x(168));D(ce,t),D(ve,n)}function ua(e,t,n){var r=e.stateNode;if(t=t.childContextTypes,typeof r.getChildContext!="function")return n;r=r.getChildContext();for(var l in r)if(!(l in t))throw Error(x(108,hf(e)||"Unknown",l));return W({},n,r)}function Yr(e){return e=(e=e.stateNode)&&e.__reactInternalMemoizedMergedChildContext||vt,Pt=ce.current,D(ce,e),D(ve,ve.current),!0}function Eu(e,t,n){var r=e.stateNode;if(!r)throw Error(x(169));n?(e=ua(e,t,Pt),r.__reactInternalMemoizedMergedChildContext=e,U(ve),U(ce),D(ce,e)):U(ve),D(ve,n)}var Ke=null,pl=!1,Ql=!1;function sa(e){Ke===null?Ke=[e]:Ke.push(e)}function zd(e){pl=!0,sa(e)}function St(){if(!Ql&&Ke!==null){Ql=!0;var e=0,t=A;try{var n=Ke;for(A=1;e>=i,l-=i,Ye=1<<32-Me(t)+l|n<T?(X=N,N=null):X=N.sibling;var F=v(m,N,y[T],S);if(F===null){N===null&&(N=X);break}e&&N&&F.alternate===null&&t(m,N),d=o(F,d,T),_===null?C=F:_.sibling=F,_=F,N=X}if(T===y.length)return n(m,N),V&&Et(m,T),C;if(N===null){for(;TT?(X=N,N=null):X=N.sibling;var ze=v(m,N,F.value,S);if(ze===null){N===null&&(N=X);break}e&&N&&ze.alternate===null&&t(m,N),d=o(ze,d,T),_===null?C=ze:_.sibling=ze,_=ze,N=X}if(F.done)return n(m,N),V&&Et(m,T),C;if(N===null){for(;!F.done;T++,F=y.next())F=c(m,F.value,S),F!==null&&(d=o(F,d,T),_===null?C=F:_.sibling=F,_=F);return V&&Et(m,T),C}for(N=r(m,N);!F.done;T++,F=y.next())F=g(N,m,T,F.value,S),F!==null&&(e&&F.alternate!==null&&N.delete(F.key===null?T:F.key),d=o(F,d,T),_===null?C=F:_.sibling=F,_=F);return e&&N.forEach(function(mn){return t(m,mn)}),V&&Et(m,T),C}function M(m,d,y,S){if(typeof y=="object"&&y!==null&&y.type===$t&&y.key===null&&(y=y.props.children),typeof y=="object"&&y!==null){switch(y.$$typeof){case ar:e:{for(var C=y.key,_=d;_!==null;){if(_.key===C){if(C=y.type,C===$t){if(_.tag===7){n(m,_.sibling),d=l(_,y.props.children),d.return=m,m=d;break e}}else if(_.elementType===C||typeof C=="object"&&C!==null&&C.$$typeof===nt&&Pu(C)===_.type){n(m,_.sibling),d=l(_,y.props),d.ref=kn(m,_,y),d.return=m,m=d;break e}n(m,_);break}else t(m,_);_=_.sibling}y.type===$t?(d=Ot(y.props.children,m.mode,S,y.key),d.return=m,m=d):(S=Ar(y.type,y.key,y.props,null,m.mode,S),S.ref=kn(m,d,y),S.return=m,m=S)}return i(m);case Dt:e:{for(_=y.key;d!==null;){if(d.key===_)if(d.tag===4&&d.stateNode.containerInfo===y.containerInfo&&d.stateNode.implementation===y.implementation){n(m,d.sibling),d=l(d,y.children||[]),d.return=m,m=d;break e}else{n(m,d);break}else t(m,d);d=d.sibling}d=ql(y,m.mode,S),d.return=m,m=d}return i(m);case nt:return _=y._init,M(m,d,_(y._payload),S)}if(_n(y))return w(m,d,y,S);if(vn(y))return k(m,d,y,S);Sr(m,y)}return typeof y=="string"&&y!==""||typeof y=="number"?(y=""+y,d!==null&&d.tag===6?(n(m,d.sibling),d=l(d,y),d.return=m,m=d):(n(m,d),d=Zl(y,m.mode,S),d.return=m,m=d),i(m)):n(m,d)}return M}var on=ya(!0),va=ya(!1),or={},He=wt(or),Gn=wt(or),Zn=wt(or);function Nt(e){if(e===or)throw Error(x(174));return e}function Si(e,t){switch(D(Zn,t),D(Gn,e),D(He,or),e=t.nodeType,e){case 9:case 11:t=(t=t.documentElement)?t.namespaceURI:so(null,"");break;default:e=e===8?t.parentNode:t,t=e.namespaceURI||null,e=e.tagName,t=so(t,e)}U(He),D(He,t)}function un(){U(He),U(Gn),U(Zn)}function ga(e){Nt(Zn.current);var t=Nt(He.current),n=so(t,e.type);t!==n&&(D(Gn,e),D(He,n))}function xi(e){Gn.current===e&&(U(He),U(Gn))}var Q=wt(0);function br(e){for(var t=e;t!==null;){if(t.tag===13){var n=t.memoizedState;if(n!==null&&(n=n.dehydrated,n===null||n.data==="$?"||n.data==="$!"))return t}else if(t.tag===19&&t.memoizedProps.revealOrder!==void 0){if(t.flags&128)return t}else if(t.child!==null){t.child.return=t,t=t.child;continue}if(t===e)break;for(;t.sibling===null;){if(t.return===null||t.return===e)return null;t=t.return}t.sibling.return=t.return,t=t.sibling}return null}var Hl=[];function ki(){for(var e=0;en?n:4,e(!0);var r=Wl.transition;Wl.transition={};try{e(!1),t()}finally{A=n,Wl.transition=r}}function Fa(){return Le().memoizedState}function Ad(e,t,n){var r=mt(e);if(n={lane:r,action:n,hasEagerState:!1,eagerState:null,next:null},Ra(e))Aa(t,n);else if(n=da(e,t,n,r),n!==null){var l=de();De(n,e,r,l),Ma(n,t,r)}}function Md(e,t,n){var r=mt(e),l={lane:r,action:n,hasEagerState:!1,eagerState:null,next:null};if(Ra(e))Aa(t,l);else{var o=e.alternate;if(e.lanes===0&&(o===null||o.lanes===0)&&(o=t.lastRenderedReducer,o!==null))try{var i=t.lastRenderedState,u=o(i,n);if(l.hasEagerState=!0,l.eagerState=u,$e(u,i)){var s=t.interleaved;s===null?(l.next=l,gi(t)):(l.next=s.next,s.next=l),t.interleaved=l;return}}catch{}finally{}n=da(e,t,l,r),n!==null&&(l=de(),De(n,e,r,l),Ma(n,t,r))}}function Ra(e){var t=e.alternate;return e===H||t!==null&&t===H}function Aa(e,t){Fn=el=!0;var n=e.pending;n===null?t.next=t:(t.next=n.next,n.next=t),e.pending=t}function Ma(e,t,n){if(n&4194240){var r=t.lanes;r&=e.pendingLanes,n|=r,t.lanes=n,li(e,n)}}var tl={readContext:Pe,useCallback:ue,useContext:ue,useEffect:ue,useImperativeHandle:ue,useInsertionEffect:ue,useLayoutEffect:ue,useMemo:ue,useReducer:ue,useRef:ue,useState:ue,useDebugValue:ue,useDeferredValue:ue,useTransition:ue,useMutableSource:ue,useSyncExternalStore:ue,useId:ue,unstable_isNewReconciler:!1},Dd={readContext:Pe,useCallback:function(e,t){return Ve().memoizedState=[e,t===void 0?null:t],e},useContext:Pe,useEffect:zu,useImperativeHandle:function(e,t,n){return n=n!=null?n.concat([e]):null,zr(4194308,4,Oa.bind(null,t,e),n)},useLayoutEffect:function(e,t){return zr(4194308,4,e,t)},useInsertionEffect:function(e,t){return zr(4,2,e,t)},useMemo:function(e,t){var n=Ve();return t=t===void 0?null:t,e=e(),n.memoizedState=[e,t],e},useReducer:function(e,t,n){var r=Ve();return t=n!==void 0?n(t):t,r.memoizedState=r.baseState=t,e={pending:null,interleaved:null,lanes:0,dispatch:null,lastRenderedReducer:e,lastRenderedState:t},r.queue=e,e=e.dispatch=Ad.bind(null,H,e),[r.memoizedState,e]},useRef:function(e){var t=Ve();return e={current:e},t.memoizedState=e},useState:Lu,useDebugValue:Ni,useDeferredValue:function(e){return Ve().memoizedState=e},useTransition:function(){var e=Lu(!1),t=e[0];return e=Rd.bind(null,e[1]),Ve().memoizedState=e,[t,e]},useMutableSource:function(){},useSyncExternalStore:function(e,t,n){var r=H,l=Ve();if(V){if(n===void 0)throw Error(x(407));n=n()}else{if(n=t(),re===null)throw Error(x(349));zt&30||xa(r,t,n)}l.memoizedState=n;var o={value:n,getSnapshot:t};return l.queue=o,zu(Ea.bind(null,r,o,e),[e]),r.flags|=2048,bn(9,ka.bind(null,r,o,n,t),void 0,null),n},useId:function(){var e=Ve(),t=re.identifierPrefix;if(V){var n=Xe,r=Ye;n=(r&~(1<<32-Me(r)-1)).toString(32)+n,t=":"+t+"R"+n,n=qn++,0<\/script>",e=e.removeChild(e.firstChild)):typeof r.is=="string"?e=i.createElement(n,{is:r.is}):(e=i.createElement(n),n==="select"&&(i=e,r.multiple?i.multiple=!0:r.size&&(i.size=r.size))):e=i.createElementNS(e,n),e[Be]=t,e[Xn]=r,Ka(e,t,!1,!1),t.stateNode=e;e:{switch(i=co(n,r),n){case"dialog":$("cancel",e),$("close",e),l=r;break;case"iframe":case"object":case"embed":$("load",e),l=r;break;case"video":case"audio":for(l=0;lan&&(t.flags|=128,r=!0,En(o,!1),t.lanes=4194304)}else{if(!r)if(e=br(i),e!==null){if(t.flags|=128,r=!0,n=e.updateQueue,n!==null&&(t.updateQueue=n,t.flags|=4),En(o,!0),o.tail===null&&o.tailMode==="hidden"&&!i.alternate&&!V)return se(t),null}else 2*Z()-o.renderingStartTime>an&&n!==1073741824&&(t.flags|=128,r=!0,En(o,!1),t.lanes=4194304);o.isBackwards?(i.sibling=t.child,t.child=i):(n=o.last,n!==null?n.sibling=i:t.child=i,o.last=i)}return o.tail!==null?(t=o.tail,o.rendering=t,o.tail=t.sibling,o.renderingStartTime=Z(),t.sibling=null,n=Q.current,D(Q,r?n&1|2:n&1),t):(se(t),null);case 22:case 23:return Ii(),r=t.memoizedState!==null,e!==null&&e.memoizedState!==null!==r&&(t.flags|=8192),r&&t.mode&1?Se&1073741824&&(se(t),t.subtreeFlags&6&&(t.flags|=8192)):se(t),null;case 24:return null;case 25:return null}throw Error(x(156,t.tag))}function Kd(e,t){switch(pi(t),t.tag){case 1:return ge(t.type)&&Kr(),e=t.flags,e&65536?(t.flags=e&-65537|128,t):null;case 3:return un(),U(ve),U(ce),ki(),e=t.flags,e&65536&&!(e&128)?(t.flags=e&-65537|128,t):null;case 5:return xi(t),null;case 13:if(U(Q),e=t.memoizedState,e!==null&&e.dehydrated!==null){if(t.alternate===null)throw Error(x(340));ln()}return e=t.flags,e&65536?(t.flags=e&-65537|128,t):null;case 19:return U(Q),null;case 4:return un(),null;case 10:return vi(t.type._context),null;case 22:case 23:return Ii(),null;case 24:return null;default:return null}}var kr=!1,ae=!1,Yd=typeof WeakSet=="function"?WeakSet:Set,E=null;function Xt(e,t){var n=e.ref;if(n!==null)if(typeof n=="function")try{n(null)}catch(r){K(e,t,r)}else n.current=null}function Do(e,t,n){try{n()}catch(r){K(e,t,r)}}var Vu=!1;function Xd(e,t){if(xo=Br,e=Js(),fi(e)){if("selectionStart"in e)var n={start:e.selectionStart,end:e.selectionEnd};else e:{n=(n=e.ownerDocument)&&n.defaultView||window;var r=n.getSelection&&n.getSelection();if(r&&r.rangeCount!==0){n=r.anchorNode;var l=r.anchorOffset,o=r.focusNode;r=r.focusOffset;try{n.nodeType,o.nodeType}catch{n=null;break e}var i=0,u=-1,s=-1,f=0,h=0,c=e,v=null;t:for(;;){for(var g;c!==n||l!==0&&c.nodeType!==3||(u=i+l),c!==o||r!==0&&c.nodeType!==3||(s=i+r),c.nodeType===3&&(i+=c.nodeValue.length),(g=c.firstChild)!==null;)v=c,c=g;for(;;){if(c===e)break t;if(v===n&&++f===l&&(u=i),v===o&&++h===r&&(s=i),(g=c.nextSibling)!==null)break;c=v,v=c.parentNode}c=g}n=u===-1||s===-1?null:{start:u,end:s}}else n=null}n=n||{start:0,end:0}}else n=null;for(ko={focusedElem:e,selectionRange:n},Br=!1,E=t;E!==null;)if(t=E,e=t.child,(t.subtreeFlags&1028)!==0&&e!==null)e.return=t,E=e;else for(;E!==null;){t=E;try{var w=t.alternate;if(t.flags&1024)switch(t.tag){case 0:case 11:case 15:break;case 1:if(w!==null){var k=w.memoizedProps,M=w.memoizedState,m=t.stateNode,d=m.getSnapshotBeforeUpdate(t.elementType===t.type?k:Fe(t.type,k),M);m.__reactInternalSnapshotBeforeUpdate=d}break;case 3:var y=t.stateNode.containerInfo;y.nodeType===1?y.textContent="":y.nodeType===9&&y.documentElement&&y.removeChild(y.documentElement);break;case 5:case 6:case 4:case 17:break;default:throw Error(x(163))}}catch(S){K(t,t.return,S)}if(e=t.sibling,e!==null){e.return=t.return,E=e;break}E=t.return}return w=Vu,Vu=!1,w}function Rn(e,t,n){var r=t.updateQueue;if(r=r!==null?r.lastEffect:null,r!==null){var l=r=r.next;do{if((l.tag&e)===e){var o=l.destroy;l.destroy=void 0,o!==void 0&&Do(t,n,o)}l=l.next}while(l!==r)}}function yl(e,t){if(t=t.updateQueue,t=t!==null?t.lastEffect:null,t!==null){var n=t=t.next;do{if((n.tag&e)===e){var r=n.create;n.destroy=r()}n=n.next}while(n!==t)}}function $o(e){var t=e.ref;if(t!==null){var n=e.stateNode;switch(e.tag){case 5:e=n;break;default:e=n}typeof t=="function"?t(e):t.current=e}}function Ga(e){var t=e.alternate;t!==null&&(e.alternate=null,Ga(t)),e.child=null,e.deletions=null,e.sibling=null,e.tag===5&&(t=e.stateNode,t!==null&&(delete t[Be],delete t[Xn],delete t[jo],delete t[Pd],delete t[Ld])),e.stateNode=null,e.return=null,e.dependencies=null,e.memoizedProps=null,e.memoizedState=null,e.pendingProps=null,e.stateNode=null,e.updateQueue=null}function Za(e){return e.tag===5||e.tag===3||e.tag===4}function Bu(e){e:for(;;){for(;e.sibling===null;){if(e.return===null||Za(e.return))return null;e=e.return}for(e.sibling.return=e.return,e=e.sibling;e.tag!==5&&e.tag!==6&&e.tag!==18;){if(e.flags&2||e.child===null||e.tag===4)continue e;e.child.return=e,e=e.child}if(!(e.flags&2))return e.stateNode}}function Uo(e,t,n){var r=e.tag;if(r===5||r===6)e=e.stateNode,t?n.nodeType===8?n.parentNode.insertBefore(e,t):n.insertBefore(e,t):(n.nodeType===8?(t=n.parentNode,t.insertBefore(e,n)):(t=n,t.appendChild(e)),n=n._reactRootContainer,n!=null||t.onclick!==null||(t.onclick=Wr));else if(r!==4&&(e=e.child,e!==null))for(Uo(e,t,n),e=e.sibling;e!==null;)Uo(e,t,n),e=e.sibling}function Vo(e,t,n){var r=e.tag;if(r===5||r===6)e=e.stateNode,t?n.insertBefore(e,t):n.appendChild(e);else if(r!==4&&(e=e.child,e!==null))for(Vo(e,t,n),e=e.sibling;e!==null;)Vo(e,t,n),e=e.sibling}var le=null,Re=!1;function tt(e,t,n){for(n=n.child;n!==null;)qa(e,t,n),n=n.sibling}function qa(e,t,n){if(Qe&&typeof Qe.onCommitFiberUnmount=="function")try{Qe.onCommitFiberUnmount(sl,n)}catch{}switch(n.tag){case 5:ae||Xt(n,t);case 6:var r=le,l=Re;le=null,tt(e,t,n),le=r,Re=l,le!==null&&(Re?(e=le,n=n.stateNode,e.nodeType===8?e.parentNode.removeChild(n):e.removeChild(n)):le.removeChild(n.stateNode));break;case 18:le!==null&&(Re?(e=le,n=n.stateNode,e.nodeType===8?Bl(e.parentNode,n):e.nodeType===1&&Bl(e,n),Qn(e)):Bl(le,n.stateNode));break;case 4:r=le,l=Re,le=n.stateNode.containerInfo,Re=!0,tt(e,t,n),le=r,Re=l;break;case 0:case 11:case 14:case 15:if(!ae&&(r=n.updateQueue,r!==null&&(r=r.lastEffect,r!==null))){l=r=r.next;do{var o=l,i=o.destroy;o=o.tag,i!==void 0&&(o&2||o&4)&&Do(n,t,i),l=l.next}while(l!==r)}tt(e,t,n);break;case 1:if(!ae&&(Xt(n,t),r=n.stateNode,typeof r.componentWillUnmount=="function"))try{r.props=n.memoizedProps,r.state=n.memoizedState,r.componentWillUnmount()}catch(u){K(n,t,u)}tt(e,t,n);break;case 21:tt(e,t,n);break;case 22:n.mode&1?(ae=(r=ae)||n.memoizedState!==null,tt(e,t,n),ae=r):tt(e,t,n);break;default:tt(e,t,n)}}function Qu(e){var t=e.updateQueue;if(t!==null){e.updateQueue=null;var n=e.stateNode;n===null&&(n=e.stateNode=new Yd),t.forEach(function(r){var l=rp.bind(null,e,r);n.has(r)||(n.add(r),r.then(l,l))})}}function Ie(e,t){var n=t.deletions;if(n!==null)for(var r=0;rl&&(l=i),r&=~o}if(r=l,r=Z()-r,r=(120>r?120:480>r?480:1080>r?1080:1920>r?1920:3e3>r?3e3:4320>r?4320:1960*Zd(r/1960))-r,10e?16:e,ut===null)var r=!1;else{if(e=ut,ut=null,ll=0,R&6)throw Error(x(331));var l=R;for(R|=4,E=e.current;E!==null;){var o=E,i=o.child;if(E.flags&16){var u=o.deletions;if(u!==null){for(var s=0;sZ()-Li?Tt(e,0):Pi|=n),we(e,t)}function oc(e,t){t===0&&(e.mode&1?(t=pr,pr<<=1,!(pr&130023424)&&(pr=4194304)):t=1);var n=de();e=Je(e,t),e!==null&&(nr(e,t,n),we(e,n))}function np(e){var t=e.memoizedState,n=0;t!==null&&(n=t.retryLane),oc(e,n)}function rp(e,t){var n=0;switch(e.tag){case 13:var r=e.stateNode,l=e.memoizedState;l!==null&&(n=l.retryLane);break;case 19:r=e.stateNode;break;default:throw Error(x(314))}r!==null&&r.delete(t),oc(e,n)}var ic;ic=function(e,t,n){if(e!==null)if(e.memoizedProps!==t.pendingProps||ve.current)ye=!0;else{if(!(e.lanes&n)&&!(t.flags&128))return ye=!1,Hd(e,t,n);ye=!!(e.flags&131072)}else ye=!1,V&&t.flags&1048576&&aa(t,Gr,t.index);switch(t.lanes=0,t.tag){case 2:var r=t.type;Ir(e,t),e=t.pendingProps;var l=rn(t,ce.current);en(t,n),l=Ci(null,t,r,e,l,n);var o=ji();return t.flags|=1,typeof l=="object"&&l!==null&&typeof l.render=="function"&&l.$$typeof===void 0?(t.tag=1,t.memoizedState=null,t.updateQueue=null,ge(r)?(o=!0,Yr(t)):o=!1,t.memoizedState=l.state!==null&&l.state!==void 0?l.state:null,wi(t),l.updater=ml,t.stateNode=l,l._reactInternals=t,Lo(t,r,e,n),t=Fo(null,t,r,!0,o,n)):(t.tag=0,V&&o&&di(t),fe(null,t,l,n),t=t.child),t;case 16:r=t.elementType;e:{switch(Ir(e,t),e=t.pendingProps,l=r._init,r=l(r._payload),t.type=r,l=t.tag=op(r),e=Fe(r,e),l){case 0:t=Io(null,t,r,e,n);break e;case 1:t=Du(null,t,r,e,n);break e;case 11:t=Au(null,t,r,e,n);break e;case 14:t=Mu(null,t,r,Fe(r.type,e),n);break e}throw Error(x(306,r,""))}return t;case 0:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Fe(r,l),Io(e,t,r,l,n);case 1:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Fe(r,l),Du(e,t,r,l,n);case 3:e:{if(Qa(t),e===null)throw Error(x(387));r=t.pendingProps,o=t.memoizedState,l=o.element,pa(e,t),Jr(t,r,null,n);var i=t.memoizedState;if(r=i.element,o.isDehydrated)if(o={element:r,isDehydrated:!1,cache:i.cache,pendingSuspenseBoundaries:i.pendingSuspenseBoundaries,transitions:i.transitions},t.updateQueue.baseState=o,t.memoizedState=o,t.flags&256){l=sn(Error(x(423)),t),t=$u(e,t,r,n,l);break e}else if(r!==l){l=sn(Error(x(424)),t),t=$u(e,t,r,n,l);break e}else for(xe=ft(t.stateNode.containerInfo.firstChild),ke=t,V=!0,Ae=null,n=va(t,null,r,n),t.child=n;n;)n.flags=n.flags&-3|4096,n=n.sibling;else{if(ln(),r===l){t=be(e,t,n);break e}fe(e,t,r,n)}t=t.child}return t;case 5:return ga(t),e===null&&To(t),r=t.type,l=t.pendingProps,o=e!==null?e.memoizedProps:null,i=l.children,Eo(r,l)?i=null:o!==null&&Eo(r,o)&&(t.flags|=32),Ba(e,t),fe(e,t,i,n),t.child;case 6:return e===null&&To(t),null;case 13:return Ha(e,t,n);case 4:return Si(t,t.stateNode.containerInfo),r=t.pendingProps,e===null?t.child=on(t,null,r,n):fe(e,t,r,n),t.child;case 11:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Fe(r,l),Au(e,t,r,l,n);case 7:return fe(e,t,t.pendingProps,n),t.child;case 8:return fe(e,t,t.pendingProps.children,n),t.child;case 12:return fe(e,t,t.pendingProps.children,n),t.child;case 10:e:{if(r=t.type._context,l=t.pendingProps,o=t.memoizedProps,i=l.value,D(Zr,r._currentValue),r._currentValue=i,o!==null)if($e(o.value,i)){if(o.children===l.children&&!ve.current){t=be(e,t,n);break e}}else for(o=t.child,o!==null&&(o.return=t);o!==null;){var u=o.dependencies;if(u!==null){i=o.child;for(var s=u.firstContext;s!==null;){if(s.context===r){if(o.tag===1){s=Ge(-1,n&-n),s.tag=2;var f=o.updateQueue;if(f!==null){f=f.shared;var h=f.pending;h===null?s.next=s:(s.next=h.next,h.next=s),f.pending=s}}o.lanes|=n,s=o.alternate,s!==null&&(s.lanes|=n),Oo(o.return,n,t),u.lanes|=n;break}s=s.next}}else if(o.tag===10)i=o.type===t.type?null:o.child;else if(o.tag===18){if(i=o.return,i===null)throw Error(x(341));i.lanes|=n,u=i.alternate,u!==null&&(u.lanes|=n),Oo(i,n,t),i=o.sibling}else i=o.child;if(i!==null)i.return=o;else for(i=o;i!==null;){if(i===t){i=null;break}if(o=i.sibling,o!==null){o.return=i.return,i=o;break}i=i.return}o=i}fe(e,t,l.children,n),t=t.child}return t;case 9:return l=t.type,r=t.pendingProps.children,en(t,n),l=Pe(l),r=r(l),t.flags|=1,fe(e,t,r,n),t.child;case 14:return r=t.type,l=Fe(r,t.pendingProps),l=Fe(r.type,l),Mu(e,t,r,l,n);case 15:return Ua(e,t,t.type,t.pendingProps,n);case 17:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Fe(r,l),Ir(e,t),t.tag=1,ge(r)?(e=!0,Yr(t)):e=!1,en(t,n),ha(t,r,l),Lo(t,r,l,n),Fo(null,t,r,!0,e,n);case 19:return Wa(e,t,n);case 22:return Va(e,t,n)}throw Error(x(156,t.tag))};function uc(e,t){return Is(e,t)}function lp(e,t,n,r){this.tag=e,this.key=n,this.sibling=this.child=this.return=this.stateNode=this.type=this.elementType=null,this.index=0,this.ref=null,this.pendingProps=t,this.dependencies=this.memoizedState=this.updateQueue=this.memoizedProps=null,this.mode=r,this.subtreeFlags=this.flags=0,this.deletions=null,this.childLanes=this.lanes=0,this.alternate=null}function Te(e,t,n,r){return new lp(e,t,n,r)}function Ri(e){return e=e.prototype,!(!e||!e.isReactComponent)}function op(e){if(typeof e=="function")return Ri(e)?1:0;if(e!=null){if(e=e.$$typeof,e===ei)return 11;if(e===ti)return 14}return 2}function ht(e,t){var n=e.alternate;return n===null?(n=Te(e.tag,t,e.key,e.mode),n.elementType=e.elementType,n.type=e.type,n.stateNode=e.stateNode,n.alternate=e,e.alternate=n):(n.pendingProps=t,n.type=e.type,n.flags=0,n.subtreeFlags=0,n.deletions=null),n.flags=e.flags&14680064,n.childLanes=e.childLanes,n.lanes=e.lanes,n.child=e.child,n.memoizedProps=e.memoizedProps,n.memoizedState=e.memoizedState,n.updateQueue=e.updateQueue,t=e.dependencies,n.dependencies=t===null?null:{lanes:t.lanes,firstContext:t.firstContext},n.sibling=e.sibling,n.index=e.index,n.ref=e.ref,n}function Ar(e,t,n,r,l,o){var i=2;if(r=e,typeof e=="function")Ri(e)&&(i=1);else if(typeof e=="string")i=5;else e:switch(e){case $t:return Ot(n.children,l,o,t);case bo:i=8,l|=8;break;case eo:return e=Te(12,n,t,l|2),e.elementType=eo,e.lanes=o,e;case to:return e=Te(13,n,t,l),e.elementType=to,e.lanes=o,e;case no:return e=Te(19,n,t,l),e.elementType=no,e.lanes=o,e;case ys:return gl(n,l,o,t);default:if(typeof e=="object"&&e!==null)switch(e.$$typeof){case ms:i=10;break e;case hs:i=9;break e;case ei:i=11;break e;case ti:i=14;break e;case nt:i=16,r=null;break e}throw Error(x(130,e==null?e:typeof e,""))}return t=Te(i,n,t,l),t.elementType=e,t.type=r,t.lanes=o,t}function Ot(e,t,n,r){return e=Te(7,e,r,t),e.lanes=n,e}function gl(e,t,n,r){return e=Te(22,e,r,t),e.elementType=ys,e.lanes=n,e.stateNode={isHidden:!1},e}function Zl(e,t,n){return e=Te(6,e,null,t),e.lanes=n,e}function ql(e,t,n){return t=Te(4,e.children!==null?e.children:[],e.key,t),t.lanes=n,t.stateNode={containerInfo:e.containerInfo,pendingChildren:null,implementation:e.implementation},t}function ip(e,t,n,r,l){this.tag=t,this.containerInfo=e,this.finishedWork=this.pingCache=this.current=this.pendingChildren=null,this.timeoutHandle=-1,this.callbackNode=this.pendingContext=this.context=null,this.callbackPriority=0,this.eventTimes=Ll(0),this.expirationTimes=Ll(-1),this.entangledLanes=this.finishedLanes=this.mutableReadLanes=this.expiredLanes=this.pingedLanes=this.suspendedLanes=this.pendingLanes=0,this.entanglements=Ll(0),this.identifierPrefix=r,this.onRecoverableError=l,this.mutableSourceEagerHydrationData=null}function Ai(e,t,n,r,l,o,i,u,s){return e=new ip(e,t,n,u,s),t===1?(t=1,o===!0&&(t|=8)):t=0,o=Te(3,null,null,t),e.current=o,o.stateNode=e,o.memoizedState={element:r,isDehydrated:n,cache:null,transitions:null,pendingSuspenseBoundaries:null},wi(o),e}function up(e,t,n){var r=3"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(fc)}catch(e){console.error(e)}}fc(),as.exports=Ce;var dp=as.exports,dc,qu=dp;dc=qu.createRoot,qu.hydrateRoot;var pp=(typeof process<"u","https://huggingface.co");async function mp(e,t){var r;const n=new hp(e.url,e.status,e.headers.get("X-Request-Id")??(t==null?void 0:t.requestId));if(n.message=`Api error with status ${n.statusCode}.${t!=null&&t.message?` ${t.message}.`:""} Request ID: ${n.requestId}, url: ${n.url}`,(r=e.headers.get("Content-Type"))!=null&&r.startsWith("application/json")){const l=await e.json();n.message=l.error||l.message||n.message,n.data=l}else n.data={message:await e.text()};throw n}var hp=class extends Error{constructor(t,n,r,l){super(l);yn(this,"statusCode");yn(this,"url");yn(this,"requestId");yn(this,"data");this.statusCode=n,this.requestId=r,this.url=t}};function yp(e){if(!(!e||e.accessToken===void 0||e.accessToken===null)&&!e.accessToken.startsWith("hf_"))throw new TypeError("Your access token must start with 'hf_'")}function vp(e){const t=/<(https?:[/][/][^>]+)>;\s+rel="([^"]+)"/g;return Object.fromEntries([...e.matchAll(t)].map(([,n,r])=>[r,n]))}var gp=["pipeline_tag","private","gated","downloads","likes"];async function*wp(e){var r,l;yp(e==null?void 0:e.credentials);const t=new URLSearchParams([...Object.entries({limit:"500",...(r=e==null?void 0:e.search)!=null&&r.owner?{author:e.search.owner}:void 0,...(l=e==null?void 0:e.search)!=null&&l.task?{pipeline_tag:e.search.task}:void 0}),...gp.map(o=>["expand",o])]).toString();let n=`${(e==null?void 0:e.hubUrl)||pp}/api/models?${t}`;for(;n;){const o=await fetch(n,{headers:{accept:"application/json",...e!=null&&e.credentials?{Authorization:`Bearer ${e.credentials.accessToken}`}:void 0}});if(!o.ok)throw mp(o);const i=await o.json();for(const s of i)yield{id:s._id,name:s.id,private:s.private,task:s.pipeline_tag,downloads:s.downloads,gated:s.gated,likes:s.likes,updatedAt:new Date(s.lastModified)};const u=o.headers.get("Link");n=u?vp(u).next:void 0}}var Sp=Object.defineProperty,xp=(e,t)=>{for(var n in t)Sp(e,n,{get:t[n],enumerable:!0})},kp={};xp(kp,{audioClassification:()=>mc,automaticSpeechRecognition:()=>hc,conversational:()=>kc,documentQuestionAnswering:()=>Rc,featureExtraction:()=>Ec,fillMask:()=>Cc,imageClassification:()=>vc,imageSegmentation:()=>gc,imageToText:()=>wc,objectDetection:()=>Sc,questionAnswering:()=>jc,request:()=>B,sentenceSimilarity:()=>_c,streamingRequest:()=>Ui,summarization:()=>Nc,tableQuestionAnswering:()=>Tc,textClassification:()=>Oc,textGeneration:()=>Pc,textGenerationStream:()=>Np,textToImage:()=>xc,textToSpeech:()=>yc,tokenClassification:()=>Lc,translation:()=>zc,visualQuestionAnswering:()=>Ac,zeroShotClassification:()=>Ic});var Ep="https://api-inference.huggingface.co/models/";function pc(e,t){const{model:n,accessToken:r,...l}=e,o={};r&&(o.Authorization=`Bearer ${r}`);const i="data"in e&&!!e.data;i?(t!=null&&t.wait_for_model&&(o["X-Wait-For-Model"]="true"),(t==null?void 0:t.use_cache)===!1&&(o["X-Use-Cache"]="false"),t!=null&&t.dont_load_model&&(o["X-Load-Model"]="0")):o["Content-Type"]="application/json";const u=/^http(s?):/.test(n)||n.startsWith("/")?n:`${Ep}${n}`,s={headers:o,method:"POST",body:i?e.data:JSON.stringify({...l,options:t}),credentials:t!=null&&t.includeCredentials?"include":"same-origin"};return{url:u,info:s}}async function B(e,t){var o,i;const{url:n,info:r}=pc(e,t),l=await((t==null?void 0:t.fetch)??fetch)(n,r);if((t==null?void 0:t.retry_on_error)!==!1&&l.status===503&&!(t!=null&&t.wait_for_model))return B(e,{...t,wait_for_model:!0});if(!l.ok){if((o=l.headers.get("Content-Type"))!=null&&o.startsWith("application/json")){const u=await l.json();if(u.error)throw new Error(u.error)}throw new Error("An error occurred while fetching the blob")}return(i=l.headers.get("Content-Type"))!=null&&i.startsWith("application/json")?await l.json():await l.blob()}function Cp(e){let t,n,r,l=!1;return function(i){t===void 0?(t=i,n=0,r=-1):t=_p(t,i);const u=t.length;let s=0;for(;n0){const s=l.decode(i.subarray(0,u)),f=u+(i[u+1]===32?2:1),h=l.decode(i.subarray(f));switch(s){case"data":r.data=r.data?r.data+` -`+h:h;break;case"event":r.event=h;break;case"id":e(r.id=h);break;case"retry":const c=parseInt(h,10);isNaN(c)||t(r.retry=c);break}}}}function _p(e,t){const n=new Uint8Array(e.length+t.length);return n.set(e),n.set(t,e.length),n}function Ju(){return{data:"",event:"",id:"",retry:void 0}}async function*Ui(e,t){var f;const{url:n,info:r}=pc({...e,stream:!0},t),l=await((t==null?void 0:t.fetch)??fetch)(n,r);if((t==null?void 0:t.retry_on_error)!==!1&&l.status===503&&!(t!=null&&t.wait_for_model))return Ui(e,{...t,wait_for_model:!0});if(!l.ok){if((f=l.headers.get("Content-Type"))!=null&&f.startsWith("application/json")){const h=await l.json();if(h.error)throw new Error(h.error)}throw new Error(`Server response contains error: ${l.status}`)}if(l.headers.get("content-type")!=="text/event-stream")throw new Error("Server does not support event stream content type, it returned "+l.headers.get("content-type"));if(!l.body)return;const o=l.body.getReader();let i=[];const s=Cp(jp(()=>{},()=>{},h=>{i.push(h)}));try{for(;;){const{done:h,value:c}=await o.read();if(h)return;s(c);for(const v of i)if(v.data.length>0){const g=JSON.parse(v.data);if(typeof g=="object"&&g!==null&&"error"in g)throw new Error(g.error);yield g}i=[]}}finally{o.releaseLock()}}var Y=class extends TypeError{constructor(e){super(`Invalid inference output: ${e}. Use the 'request' method with the same parameters to do a custom call with no type checking.`),this.name="InferenceOutputError"}};async function mc(e,t){const n=await B(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.label=="string"&&typeof l.score=="number")))throw new Y("Expected Array<{label: string, score: number}>");return n}async function hc(e,t){const n=await B(e,t);if(!(typeof(n==null?void 0:n.text)=="string"))throw new Y("Expected {text: string}");return n}async function yc(e,t){const n=await B(e,t);if(!(n&&n instanceof Blob))throw new Y("Expected Blob");return n}async function vc(e,t){const n=await B(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.label=="string"&&typeof l.score=="number")))throw new Y("Expected Array<{label: string, score: number}>");return n}async function gc(e,t){const n=await B(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.label=="string"&&typeof l.mask=="string"&&typeof l.score=="number")))throw new Y("Expected Array<{label: string, mask: string, score: number}>");return n}async function wc(e,t){var r;const n=(r=await B(e,t))==null?void 0:r[0];if(typeof(n==null?void 0:n.generated_text)!="string")throw new Y("Expected {generated_text: string}");return n}async function Sc(e,t){const n=await B(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.label=="string"&&typeof l.score=="number"&&typeof l.box.xmin=="number"&&typeof l.box.ymin=="number"&&typeof l.box.xmax=="number"&&typeof l.box.ymax=="number")))throw new Y("Expected Array<{label:string; score:number; box:{xmin:number; ymin:number; xmax:number; ymax:number}}>");return n}async function xc(e,t){const n=await B(e,t);if(!(n&&n instanceof Blob))throw new Y("Expected Blob");return n}async function kc(e,t){const n=await B(e,t);if(!(Array.isArray(n.conversation.generated_responses)&&n.conversation.generated_responses.every(l=>typeof l=="string")&&Array.isArray(n.conversation.past_user_inputs)&&n.conversation.past_user_inputs.every(l=>typeof l=="string")&&typeof n.generated_text=="string"&&Array.isArray(n.warnings)&&n.warnings.every(l=>typeof l=="string")))throw new Y("Expected {conversation: {generated_responses: string[], past_user_inputs: string[]}, generated_text: string, warnings: string[]}");return n}async function Ec(e,t){const n=await B(e,t);let r=!0;if(Array.isArray(n)){for(const l of n)if(Array.isArray(l)){if(r=l.every(o=>typeof o=="number"),!r)break}else if(typeof l!="number"){r=!1;break}}else r=!1;if(!r)throw new Y("Expected Array");return n}async function Cc(e,t){const n=await B(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.score=="number"&&typeof l.sequence=="string"&&typeof l.token=="number"&&typeof l.token_str=="string")))throw new Y("Expected Array<{score: number, sequence: string, token: number, token_str: string}>");return n}async function jc(e,t){const n=await B(e,t);if(!(typeof n=="object"&&!!n&&typeof n.answer=="string"&&typeof n.end=="number"&&typeof n.score=="number"&&typeof n.start=="number"))throw new Y("Expected {answer: string, end: number, score: number, start: number}");return n}async function _c(e,t){const n=await B(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l=="number")))throw new Y("Expected number[]");return n}async function Nc(e,t){const n=await B(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof(l==null?void 0:l.summary_text)=="string")))throw new Y("Expected Array<{summary_text: string}>");return n==null?void 0:n[0]}async function Tc(e,t){const n=await B(e,t);if(!(typeof(n==null?void 0:n.aggregator)=="string"&&typeof n.answer=="string"&&Array.isArray(n.cells)&&n.cells.every(l=>typeof l=="string")&&Array.isArray(n.coordinates)&&n.coordinates.every(l=>Array.isArray(l)&&l.every(o=>typeof o=="number"))))throw new Y("Expected {aggregator: string, answer: string, cells: string[], coordinates: number[][]}");return n}async function Oc(e,t){var l;const n=(l=await B(e,t))==null?void 0:l[0];if(!(Array.isArray(n)&&n.every(o=>typeof(o==null?void 0:o.label)=="string"&&typeof o.score=="number")))throw new Y("Expected Array<{label: string, score: number}>");return n}async function Pc(e,t){const n=await B(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof(l==null?void 0:l.generated_text)=="string")))throw new Y("Expected Array<{generated_text: string}>");return n==null?void 0:n[0]}async function*Np(e,t){yield*Ui(e,t)}function Vi(e){return Array.isArray(e)?e:[e]}async function Lc(e,t){const n=Vi(await B(e,t));if(!(Array.isArray(n)&&n.every(l=>typeof l.end=="number"&&typeof l.entity_group=="string"&&typeof l.score=="number"&&typeof l.start=="number"&&typeof l.word=="string")))throw new Y("Expected Array<{end: number, entity_group: string, score: number, start: number, word: string}>");return n}async function zc(e,t){const n=await B(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof(l==null?void 0:l.translation_text)=="string")))throw new Y("Expected type Array<{translation_text: string}>");return n==null?void 0:n[0]}async function Ic(e,t){const n=Vi(await B(e,t));if(!(Array.isArray(n)&&n.every(l=>Array.isArray(l.labels)&&l.labels.every(o=>typeof o=="string")&&Array.isArray(l.scores)&&l.scores.every(o=>typeof o=="number")&&typeof l.sequence=="string")))throw new Y("Expected Array<{labels: string[], scores: number[], sequence: string}>");return n}function Fc(e){if(globalThis.Buffer)return globalThis.Buffer.from(e).toString("base64");{const t=[];return e.forEach(n=>{t.push(String.fromCharCode(n))}),globalThis.btoa(t.join(""))}}async function Rc(e,t){var o;const n={...e,inputs:{question:e.inputs.question,image:Fc(new Uint8Array(await e.inputs.image.arrayBuffer()))}},r=(o=Vi(await B(n,t)))==null?void 0:o[0];if(!(typeof(r==null?void 0:r.answer)=="string"&&(typeof r.end=="number"||typeof r.end>"u")&&(typeof r.score=="number"||typeof r.score>"u")&&(typeof r.start=="number"||typeof r.start>"u")))throw new Y("Expected Array<{answer: string, end?: number, score?: number, start?: number}>");return r}async function Ac(e,t){var o;const n={...e,inputs:{question:e.inputs.question,image:Fc(new Uint8Array(await e.inputs.image.arrayBuffer()))}},r=(o=await B(n,t))==null?void 0:o[0];if(!(typeof(r==null?void 0:r.answer)=="string"&&typeof r.score=="number"))throw new Y("Expected Array<{answer: string, score: number}>");return r}const O=e=>a.jsx("button",{className:`${e.variant==="secondary"?"border-4 border-yellow-200":"bg-yellow-200"} py-6 text-center w-full ${e.disabled?"cursor-not-allowed opacity-50":""}`,disabled:e.disabled??!1,onClick:e.onClick,children:e.label??"Submit"}),Mc=e=>a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:e.label??"Input"}),e.input?a.jsx("audio",{className:"w-full",controls:!0,src:URL.createObjectURL(e.input)}):a.jsxs("label",{className:"bg-yellow-200 block cursor-pointer py-6 text-center w-full",children:["No file chosen",a.jsx("input",{accept:"audio/*",className:"hidden",onChange:t=>{t.target.files&&t.target.files[0]&&e.setInput(t.target.files[0])},type:"file"})]})]}),P=e=>{const t=(()=>{try{return JSON.stringify(e.output,void 0,2)}catch(n){if(n instanceof Error)return`Error during JSON.stringify: ${n.message}`}})();return a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:e.label??"Output"}),a.jsx("pre",{className:`bg-yellow-200 break-words p-6 select-text w-full whitespace-pre-wrap ${e.disabled?"cursor-wait opacity-50":""}`,children:t})]})},Tp="audio-classification",Op=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),f=()=>{n(void 0),i(void 0),s(void 0)},h=async()=>{if(t){l(!0);try{const c=await mc({data:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(Mc,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:f,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:h}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o}):a.jsx(p.Fragment,{}),!o&&u?u.map(c=>a.jsx(P,{disabled:r,output:c},c.label)):a.jsx(p.Fragment,{})]})},Pp="automatic-speech-recognition",Lp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),f=()=>{n(void 0),i(void 0),s(void 0)},h=async()=>{if(t){l(!0);try{const c=await hc({data:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(Mc,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:f,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:h}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(P,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},J=e=>{const t=p.useRef(null);return p.useLayoutEffect(()=>{t.current&&(t.current.style.height="inherit",t.current.style.height=`${t.current.scrollHeight}px`)},[e.input]),a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:e.label??"Input"}),a.jsx("textarea",{className:"bg-yellow-200 py-6 resize-none text-center w-full",disabled:e.disabled??!1,onChange:n=>{!e.disabled&&e.setInput&&(n.target.value?e.setInput(n.target.value):e.setInput(""))},ref:t,rows:1,style:{height:t.current?`${t.current.scrollHeight}px`:"inherit"},value:e.input??""})]})},zp="conversational",Ip=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),f=()=>{n(void 0),i(void 0),s(void 0)},h=()=>{t&&(l(!0),s(c=>c?{...c,conversation:{...c.conversation,past_user_inputs:[...c.conversation.past_user_inputs,t]}}:{conversation:{generated_responses:[],past_user_inputs:[t]},generated_text:"",warnings:[]}),n(void 0),kc({inputs:{generated_responses:u==null?void 0:u.conversation.generated_responses,past_user_inputs:u==null?void 0:u.conversation.past_user_inputs,text:t},model:e.model}).then(s).catch(i).finally(()=>l(!1)))};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t&&!u,onClick:f,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:h}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o}):a.jsx(p.Fragment,{}),!o&&u?Array.from({length:Math.max(u.conversation.generated_responses.length,u.conversation.past_user_inputs.length)}).map((c,v,g)=>a.jsxs(p.Fragment,{children:[u.conversation.generated_responses[g.length-v-1]?a.jsx(P,{disabled:r,label:`Output - Generated Response #${g.length-v}`,output:u.conversation.generated_responses[g.length-v-1]}):a.jsx(p.Fragment,{}),u.conversation.past_user_inputs[g.length-v-1]?a.jsx(J,{disabled:!0,label:`Output - Past User Input #${g.length-v}`,input:u.conversation.past_user_inputs[g.length-v-1]}):a.jsx(p.Fragment,{})]},v)):a.jsx(p.Fragment,{})]})},pn=e=>a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:e.label??"Input"}),e.input?a.jsx("img",{className:"w-full",src:URL.createObjectURL(e.input)}):a.jsxs("label",{className:"bg-yellow-200 block cursor-pointer py-6 text-center w-full",children:["No file chosen",a.jsx("input",{accept:"image/*",className:"hidden",onChange:t=>{t.target.files&&t.target.files[0]&&e.setInput(t.target.files[0])},type:"file"})]})]}),Fp="document-question-answering",Rp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(),[o,i]=p.useState(!1),[u,s]=p.useState(),[f,h]=p.useState(),c=()=>{n(void 0),l(void 0),s(void 0),h(void 0)},v=async()=>{if(t&&r){i(!0);try{const g=await Rc({inputs:{question:t,image:r},model:e.model});h(g)}catch(g){g instanceof Error&&s(g)}finally{i(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,label:"Input - Question",setInput:n}),a.jsx(pn,{input:r,label:"Input - Image",setInput:l}),a.jsx(O,{label:"Clear",disabled:o||!r,onClick:c,variant:"secondary"}),a.jsx(O,{disabled:o||!r,onClick:v}),!o&&u?a.jsx(P,{disabled:o,label:"Error",output:u}):a.jsx(p.Fragment,{}),!u&&f?a.jsx(P,{disabled:o,output:f}):a.jsx(p.Fragment,{})]})},Ap="feature-extraction",Mp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),f=()=>{n(void 0),i(void 0),s(void 0)},h=async()=>{if(t){l(!0);try{const c=await Ec({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:f,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:h}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(P,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},Dp="fill-mask",$p=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),f=()=>{n(void 0),i(void 0),s(void 0)},h=async()=>{if(t){l(!0);try{const c=await Cc({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:f,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:h}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o}):a.jsx(p.Fragment,{}),!o&&u?u.map(c=>a.jsx(P,{disabled:r,output:c},c.token_str)):a.jsx(p.Fragment,{})]})},Up="image-classification",Vp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),f=()=>{n(void 0),i(void 0),s(void 0)},h=async()=>{if(t){l(!0);try{const c=await vc({data:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(pn,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:f,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:h}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o}):a.jsx(p.Fragment,{}),!o&&u?u.map(c=>a.jsx(P,{disabled:r,output:c},c.label)):a.jsx(p.Fragment,{})]})},Bp="image-segmentation",Qp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),f=()=>{n(void 0),i(void 0),s(void 0)},h=async()=>{if(t){l(!0);try{const c=await gc({data:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(pn,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:f,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:h}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o}):a.jsx(p.Fragment,{}),!o&&u?u.map(c=>a.jsx(P,{disabled:r,output:c},c.label)):a.jsx(p.Fragment,{})]})},Hp="image-to-text",Wp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),f=()=>{n(void 0),i(void 0),s(void 0)},h=async()=>{if(t){l(!0);try{const c=await wc({data:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(pn,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:f,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:h}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(P,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},Kp="object-detection",Yp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),f=()=>{n(void 0),i(void 0),s(void 0)},h=async()=>{if(t){l(!0);try{const c=await Sc({data:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(pn,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:f,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:h}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o}):a.jsx(p.Fragment,{}),!o&&u?u.map(c=>a.jsx(P,{disabled:r,output:c},c.label)):a.jsx(p.Fragment,{})]})},Xp="question-answering",Gp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(),[o,i]=p.useState(!1),[u,s]=p.useState(),[f,h]=p.useState(),c=()=>{n(void 0),l(void 0),s(void 0),h(void 0)},v=async()=>{if(t&&r){i(!0);try{const g=await jc({inputs:{question:t,context:r},model:e.model});h(g)}catch(g){g instanceof Error&&s(g)}finally{i(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,label:"Input - Question",setInput:n}),a.jsx(J,{input:r,label:"Input - Context",setInput:l}),a.jsx(O,{label:"Clear",disabled:o||!t||!r,onClick:c,variant:"secondary"}),a.jsx(O,{disabled:o||!t||!r,onClick:v}),!o&&u?a.jsx(P,{disabled:o,label:"Error",output:u}):a.jsx(p.Fragment,{}),!u&&f?a.jsx(P,{disabled:o,output:f}):a.jsx(p.Fragment,{})]})},Zp="sentence-similarity",qp=e=>{const[t,n]=p.useState(),r=Array.from({length:2}).map(()=>{}),[l,o]=p.useState(r),[i,u]=p.useState(!1),[s,f]=p.useState(),[h,c]=p.useState(),v=()=>{n(void 0),o(r),f(void 0),c(void 0)},g=async()=>{if(t&&l.every(Boolean)){u(!0);try{const w=await _c({inputs:{source_sentence:t,sentences:l},model:e.model});c(w)}catch(w){w instanceof Error&&f(w)}finally{u(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,label:"Input - Source Sentence",setInput:n}),l.map((w,k)=>a.jsx(J,{input:w,label:`Input - Sentence #${k+1}`,setInput:M=>o(m=>[...m.slice(0,k),M,...m.slice(k+1,m.length)])})),a.jsx(O,{disabled:i||!t||!l.every(Boolean),label:"Add Sentence",onClick:()=>o(w=>[...w,void 0])}),a.jsx(O,{disabled:i||!t||!l.every(Boolean),label:"Clear",onClick:v,variant:"secondary"}),a.jsx(O,{disabled:i||!t||!l.every(Boolean),onClick:g}),!i&&s?a.jsx(P,{disabled:i,label:"Error",output:s}):a.jsx(p.Fragment,{}),!s&&h?h.map((w,k)=>a.jsx(P,{disabled:i,label:`Output - Sentence #${k+1}`,output:w})):a.jsx(p.Fragment,{})]})},Jp="summarization",bp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),f=()=>{n(void 0),i(void 0),s(void 0)},h=async()=>{if(t){l(!0);try{const c=await Nc({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:f,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:h}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(P,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},em=async e=>{const t=await e.text();try{const n=JSON.parse(t);try{return JSON.stringify(n,void 0,2)}catch(r){if(r instanceof Error)return`Error during JSON.stringify: ${r.message}`}}catch(n){if(n instanceof Error)return`Error during JSON.parse: ${n.message}`}},tm=e=>{const[t,n]=p.useState();return p.useEffect(()=>{e.input&&em(e.input).then(n)},[e.input]),a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:e.label??"Input"}),e.input?a.jsx("pre",{className:"bg-yellow-200 break-words p-6 select-text w-full whitespace-pre-wrap",children:t}):a.jsxs("label",{className:"bg-yellow-200 block cursor-pointer py-6 text-center w-full",children:["No file chosen",a.jsx("input",{accept:".json",className:"hidden",onChange:r=>{r.target.files&&r.target.files[0]&&e.setInput(r.target.files[0])},type:"file"})]})]})},nm="table-question-answering",rm=e=>{const[t,n]=p.useState(),[r,l]=p.useState(),[o,i]=p.useState(!1),[u,s]=p.useState(),[f,h]=p.useState(),c=()=>{n(void 0),l(void 0),s(void 0),h(void 0)},v=async()=>{if(t&&r){i(!0);try{const g=await Tc({inputs:{query:t,table:JSON.parse(await r.text()??"{}")},model:e.model});h(g)}catch(g){g instanceof Error&&s(g)}finally{i(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,label:"Input - Query",setInput:n}),a.jsx(tm,{input:r,label:"Input - Table",setInput:l}),a.jsx(O,{label:"Clear",disabled:o||!t,onClick:c,variant:"secondary"}),a.jsx(O,{disabled:o||!t,onClick:v}),!o&&u?a.jsx(P,{disabled:o,label:"Error",output:u}):a.jsx(p.Fragment,{}),!u&&f?a.jsx(P,{disabled:o,output:f}):a.jsx(p.Fragment,{})]})},lm="text-classification",om=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),f=()=>{n(void 0),i(void 0),s(void 0)},h=async()=>{if(t){l(!0);try{const c=await Oc({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:f,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:h}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o}):a.jsx(p.Fragment,{}),!o&&u?u.map(c=>a.jsx(P,{disabled:r,output:c},c.label)):a.jsx(p.Fragment,{})]})},im="text-generation",um=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),f=()=>{n(void 0),i(void 0),s(void 0)},h=async()=>{if(t){l(!0);try{const c=await Pc({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:f,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:h}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(P,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},sm=e=>a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:e.label??"Output"}),a.jsx("img",{className:`w-full ${e.disabled?"cursor-wait opacity-50":""}`,src:URL.createObjectURL(e.output)})]}),am="text-to-image",cm=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),f=()=>{n(void 0),i(void 0),s(void 0)},h=async()=>{if(t){l(!0);try{const c=await xc({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:f,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:h}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(sm,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},fm=e=>a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:e.label??"Output"}),a.jsx("audio",{className:`w-full ${e.disabled?"cursor-wait opacity-50":""}`,controls:!0,src:URL.createObjectURL(e.output)})]}),dm="text-to-speech",pm=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),f=()=>{n(void 0),i(void 0),s(void 0)},h=async()=>{if(t){l(!0);try{const c=await yc({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:f,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:h}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(fm,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},mm="token-classification",hm=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),f=()=>{n(void 0),i(void 0),s(void 0)},h=async()=>{if(t){l(!0);try{const c=await Lc({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:f,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:h}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o}):a.jsx(p.Fragment,{}),!o&&u?u.map(c=>a.jsx(P,{disabled:r,output:c},c.word)):a.jsx(p.Fragment,{})]})},ym="translation",vm=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),f=()=>{n(void 0),i(void 0),s(void 0)},h=async()=>{if(t){l(!0);try{const c=await zc({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:f,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:h}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(P,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},gm="visual-question-answering",wm=e=>{const[t,n]=p.useState(),[r,l]=p.useState(),[o,i]=p.useState(!1),[u,s]=p.useState(),[f,h]=p.useState(),c=()=>{n(void 0),l(void 0),s(void 0),h(void 0)},v=async()=>{if(t&&r){i(!0);try{const g=await Ac({inputs:{question:t,image:r},model:e.model});h(g)}catch(g){g instanceof Error&&s(g)}finally{i(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,label:"Input - Question",setInput:n}),a.jsx(pn,{input:r,label:"Input - Image",setInput:l}),a.jsx(O,{label:"Clear",disabled:o||!r,onClick:c,variant:"secondary"}),a.jsx(O,{disabled:o||!r,onClick:v}),!o&&u?a.jsx(P,{disabled:o,label:"Error",output:u}):a.jsx(p.Fragment,{}),!u&&f?a.jsx(P,{disabled:o,output:f}):a.jsx(p.Fragment,{})]})},Sm="zero-shot-classification",xm=e=>{const[t,n]=p.useState(),r=Array.from({length:2}).map(()=>{}),[l,o]=p.useState(r),[i,u]=p.useState(!1),[s,f]=p.useState(),[h,c]=p.useState(),v=()=>{n(void 0),o(r),f(void 0),c(void 0)},g=async()=>{if(t&&l.every(Boolean)){u(!0);try{const w=await Ic({inputs:t,model:e.model,parameters:{candidate_labels:l}});c(w)}catch(w){w instanceof Error&&f(w)}finally{u(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),l.map((w,k)=>a.jsx(J,{input:w,label:`Parameter - Candidate Label #${k+1}`,setInput:M=>o(m=>[...m.slice(0,k),M,...m.slice(k+1,m.length)])})),a.jsx(O,{disabled:i||!t||!l.every(Boolean),label:"Add Candidate Label",onClick:()=>o(w=>[...w,void 0])}),a.jsx(O,{disabled:i||!t||!l.every(Boolean),label:"Clear",onClick:v,variant:"secondary"}),a.jsx(O,{disabled:i||!t||!l.every(Boolean),onClick:g}),!i&&s?a.jsx(P,{disabled:i,label:"Error",output:s}):a.jsx(p.Fragment,{}),!s&&h?h.map((w,k)=>a.jsx(P,{disabled:i,output:w})):a.jsx(p.Fragment,{})]})},km=[Tp,Pp,zp,Fp,Ap,Dp,Up,Bp,Hp,Kp,Xp,Zp,Jp,nm,lm,im,am,dm,mm,ym,gm,Sm],Em=e=>{if(!e.model||!e.task)return a.jsx(p.Fragment,{});switch(e.task){case"audio-classification":return a.jsx(Op,{model:e.model});case"automatic-speech-recognition":return a.jsx(Lp,{model:e.model});case"conversational":return a.jsx(Ip,{model:e.model});case"document-question-answering":return a.jsx(Rp,{model:e.model});case"feature-extraction":return a.jsx(Mp,{model:e.model});case"fill-mask":return a.jsx($p,{model:e.model});case"image-classification":return a.jsx(Vp,{model:e.model});case"image-segmentation":return a.jsx(Qp,{model:e.model});case"image-to-text":return a.jsx(Wp,{model:e.model});case"object-detection":return a.jsx(Yp,{model:e.model});case"question-answering":return a.jsx(Gp,{model:e.model});case"sentence-similarity":return a.jsx(qp,{model:e.model});case"summarization":return a.jsx(bp,{model:e.model});case"table-question-answering":return a.jsx(rm,{model:e.model});case"text-classification":return a.jsx(om,{model:e.model});case"text-generation":return a.jsx(um,{model:e.model});case"text-to-image":return a.jsx(cm,{model:e.model});case"text-to-speech":return a.jsx(pm,{model:e.model});case"token-classification":return a.jsx(hm,{model:e.model});case"translation":return a.jsx(vm,{model:e.model});case"visual-question-answering":return a.jsx(wm,{model:e.model});case"zero-shot-classification":return a.jsx(xm,{model:e.model});default:return a.jsx(p.Fragment,{})}},Cm=e=>a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:"Task"}),a.jsxs("select",{className:"bg-yellow-200 cursor-pointer py-6 text-center w-full",onChange:t=>e.onTaskSelect(t.target.value),placeholder:"Select a task",value:e.task,children:[a.jsx("option",{children:"Select a task"}),km.map(t=>a.jsx("option",{value:t,children:t},t))]})]}),Jl={},jm=async e=>{if(Jl[e])return Jl[e];const t=[];for await(const n of wp({search:{task:e}}))t.push(n);return t.sort((n,r)=>n.downloads>r.downloads?-1:n.downloadsr.likes?-1:n.likesr.name?-1:n.name{const[t,n]=p.useState(!1),[r,l]=p.useState([]);return p.useEffect(()=>{l([]),e.task&&(n(!0),jm(e.task).then(o=>l(o)).finally(()=>n(!1)))},[e.task]),r.length>0?a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:"Model"}),a.jsxs("select",{className:"bg-yellow-200 cursor-pointer py-6 text-center w-full",onChange:o=>e.onModelSelect(o.target.value),placeholder:"Select a model",value:e.model,children:[a.jsx("option",{children:"Select a model"}),r.map(o=>a.jsx("option",{value:o.name,children:o.name},o.name))]}),e.model?a.jsx("div",{className:"font-bold py-6 text-center text-yellow-200",children:a.jsx("a",{href:`https://huggingface.co/${e.model}`,rel:"noopener noferrer",target:"_blank",children:"View model on 🤗"})}):a.jsx(p.Fragment,{})]}):a.jsx("p",{className:"text-center w-full",children:e.task?t?"Loading models for this task":"No models available for this task":"Select a task to view available models"})},Nm=()=>{const[e,t]=p.useState(),[n,r]=p.useState(),l=o=>{r(void 0),t(o)};return a.jsx("div",{className:"bg-yellow-500 flex flex-col h-full items-center min-h-screen min-w-screen overflow-auto w-full",children:a.jsxs("div",{className:"flex flex-col items-center justify-center py-24 space-y-12 w-2/3 lg:w-1/3",children:[a.jsx("header",{className:"text-center text-6xl",children:"🤗"}),a.jsx(Cm,{onTaskSelect:l,task:e}),a.jsx(_m,{model:n,onModelSelect:r,task:e}),a.jsx(Em,{model:n,task:e})]})})};const Tm=()=>{const e="root",t=document.getElementById(e);if(t){const n=dc(t),r=a.jsx(p.StrictMode,{children:a.jsx(Nm,{})});n.render(r)}};Tm(); diff --git a/spaces/almino/WhisperYoutube/app.py b/spaces/almino/WhisperYoutube/app.py deleted file mode 100644 index 22ba2794134faa8248bd504a3bfd7e6b9f8c0d42..0000000000000000000000000000000000000000 --- a/spaces/almino/WhisperYoutube/app.py +++ /dev/null @@ -1,35 +0,0 @@ -from pytube import YouTube -from transformers import pipeline -import gradio as gr -import os - -pipe = pipeline(model="almino/checkpoints") - -def get_audio(url): - yt = YouTube(url) - video = yt.streams.filter(only_audio=True).first() - out_file=video.download(output_path=".") - base, ext = os.path.splitext(out_file) - new_file = base+'.mp3' - os.rename(out_file, new_file) - a = new_file - return a - -def get_text(url): - result = pipe(get_audio(url))['text'] - return result - - - -with gr.Blocks() as demo: - gr.Markdown("

      Youtube video transcription with OpenAI's Whisper

      ") - gr.Markdown("
      Enter the link of any youtube video to get the transcription of the video.
      ") - with gr.Tab('Get the transcription of any Youtube video'): - with gr.Row(): - input_text_1 = gr.Textbox(placeholder='Enter the Youtube video URL', label='URL') - output_text_1 = gr.Textbox(placeholder='Transcription of the video', label='Transcription') - result_button_1 = gr.Button('Get Transcription') - - - result_button_1.click(get_text, inputs = input_text_1, outputs = output_text_1) -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/almostagi/QTL/app.py b/spaces/almostagi/QTL/app.py deleted file mode 100644 index e6c651a8209775a0e34757b657c297052d1194db..0000000000000000000000000000000000000000 --- a/spaces/almostagi/QTL/app.py +++ /dev/null @@ -1,111 +0,0 @@ -import torch -import torchvision -import gradio as gr -import torch.nn as nn -import pennylane as qml -import matplotlib.pyplot as plt - -from pennylane import numpy as np -from torchvision import transforms - -qubits = 4 -batch_size = 8 -depth = 6 -delta = 0.01 - -is_cuda_available = torch.cuda.is_available() -device = torch.device("cuda:0" if is_cuda_available else "cpu") - -if is_cuda_available: - print ("CUDA is available, selected:", device) -else: - print ("CUDA not available, selected:", device) - -dev = qml.device("default.qubit", wires=qubits) - -def H_layer(nqubits): - for idx in range(nqubits): - qml.Hadamard(wires=idx) - -def RY_layer(w): - for idx, element in enumerate(w): - qml.RY(element, wires=idx) - -def entangling_layer(nqubits): - for i in range(0, nqubits - 1, 2): - qml.CNOT(wires=[i, i + 1]) - for i in range(1, nqubits - 1, 2): - qml.CNOT(wires=[i, i + 1]) - -@qml.qnode(dev, interface="torch") -def quantum_net(q_input_features, q_weights_flat): - q_weights = q_weights_flat.reshape(depth, qubits) - H_layer(qubits) - RY_layer(q_input_features) - - for k in range(depth): - entangling_layer(qubits) - RY_layer(q_weights[k]) - - exp_vals = [qml.expval(qml.PauliZ(position)) for position in range(qubits)] - return tuple(exp_vals) - -class QuantumNet(nn.Module): - def __init__(self): - super().__init__() - self.pre_net = nn.Linear(512, qubits) - self.q_params = nn.Parameter(delta * torch.randn(depth * qubits)) - self.post_net = nn.Linear(qubits, 2) - - def forward(self, input_features): - pre_out = self.pre_net(input_features) - q_in = torch.tanh(pre_out) * np.pi / 2.0 - q_out = torch.Tensor(0, qubits) - q_out = q_out.to(device) - for elem in q_in: - q_out_elem = quantum_net(elem, self.q_params).float().unsqueeze(0) - q_out = torch.cat((q_out, q_out_elem)) - return self.post_net(q_out) - -def classify(image): - mhModel = torch.load("QKTCC_simPennylane-26032022174332.pth", map_location=device) - mMModel = torchvision.models.resnet18(pretrained=True) - for param in mMModel.parameters(): - param.requires_grad = False - mMModel.fc = QuantumNet() - mMModel = mMModel.to(device) - qModel = mMModel - qModel.load_state_dict(mhModel) - - from PIL import Image - - data_transforms = transforms.Compose([ - transforms.Resize(256), - transforms.CenterCrop(224), - transforms.ToTensor(), - transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), - ]) - - PIL_img = image - img = data_transforms(PIL_img) - img_input = img.unsqueeze(0) - - qModel.eval() - with torch.no_grad(): - outputs = qModel(img_input) - base_labels = (("mask", outputs[0, 0]), ("no_mask", outputs[0, 1])) - expvals, preds = torch.max(outputs, 1) - expvals_min, preds_min = torch.min(outputs, 1) - if expvals == base_labels[0][1]: - labels = base_labels[0][0] - else: - labels = base_labels[1][0] - outp = "Classified with output: " + labels + ", Tensor: " + str(expvals) + " (" + str(expvals_min) + ")" - return outp - -out = gr.outputs.Label(label='Result: ',type='auto') -iface = gr.Interface(classify, gr.inputs.Image(type="pil"), outputs=out, - title="Quantum Layered TL RN-18 Face Mask Detector", - description="🤗 This proof-of-concept quantum machine learning model takes a face image input and detects a face that has a mask or no mask: ", theme="default") - -iface.launch(debug=True) \ No newline at end of file diff --git a/spaces/alsalemi/pv-segment-01/utils.py b/spaces/alsalemi/pv-segment-01/utils.py deleted file mode 100644 index f73915580f7c70c64ce8bc26e73d18ef72f88e86..0000000000000000000000000000000000000000 --- a/spaces/alsalemi/pv-segment-01/utils.py +++ /dev/null @@ -1,282 +0,0 @@ -import datetime -import errno -import os -import time -from collections import defaultdict, deque - -import torch -import torch.distributed as dist - - -class SmoothedValue: - """Track a series of values and provide access to smoothed values over a - window or the global series average. - """ - - def __init__(self, window_size=20, fmt=None): - if fmt is None: - fmt = "{median:.4f} ({global_avg:.4f})" - self.deque = deque(maxlen=window_size) - self.total = 0.0 - self.count = 0 - self.fmt = fmt - - def update(self, value, n=1): - self.deque.append(value) - self.count += n - self.total += value * n - - def synchronize_between_processes(self): - """ - Warning: does not synchronize the deque! - """ - if not is_dist_avail_and_initialized(): - return - t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda") - dist.barrier() - dist.all_reduce(t) - t = t.tolist() - self.count = int(t[0]) - self.total = t[1] - - @property - def median(self): - d = torch.tensor(list(self.deque)) - return d.median().item() - - @property - def avg(self): - d = torch.tensor(list(self.deque), dtype=torch.float32) - return d.mean().item() - - @property - def global_avg(self): - return self.total / self.count - - @property - def max(self): - return max(self.deque) - - @property - def value(self): - return self.deque[-1] - - def __str__(self): - return self.fmt.format( - median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value - ) - - -def all_gather(data): - """ - Run all_gather on arbitrary picklable data (not necessarily tensors) - Args: - data: any picklable object - Returns: - list[data]: list of data gathered from each rank - """ - world_size = get_world_size() - if world_size == 1: - return [data] - data_list = [None] * world_size - dist.all_gather_object(data_list, data) - return data_list - - -def reduce_dict(input_dict, average=True): - """ - Args: - input_dict (dict): all the values will be reduced - average (bool): whether to do average or sum - Reduce the values in the dictionary from all processes so that all processes - have the averaged results. Returns a dict with the same fields as - input_dict, after reduction. - """ - world_size = get_world_size() - if world_size < 2: - return input_dict - with torch.inference_mode(): - names = [] - values = [] - # sort the keys so that they are consistent across processes - for k in sorted(input_dict.keys()): - names.append(k) - values.append(input_dict[k]) - values = torch.stack(values, dim=0) - dist.all_reduce(values) - if average: - values /= world_size - reduced_dict = {k: v for k, v in zip(names, values)} - return reduced_dict - - -class MetricLogger: - def __init__(self, delimiter="\t"): - self.meters = defaultdict(SmoothedValue) - self.delimiter = delimiter - - def update(self, **kwargs): - for k, v in kwargs.items(): - if isinstance(v, torch.Tensor): - v = v.item() - assert isinstance(v, (float, int)) - self.meters[k].update(v) - - def __getattr__(self, attr): - if attr in self.meters: - return self.meters[attr] - if attr in self.__dict__: - return self.__dict__[attr] - raise AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'") - - def __str__(self): - loss_str = [] - for name, meter in self.meters.items(): - loss_str.append(f"{name}: {str(meter)}") - return self.delimiter.join(loss_str) - - def synchronize_between_processes(self): - for meter in self.meters.values(): - meter.synchronize_between_processes() - - def add_meter(self, name, meter): - self.meters[name] = meter - - def log_every(self, iterable, print_freq, header=None): - i = 0 - if not header: - header = "" - start_time = time.time() - end = time.time() - iter_time = SmoothedValue(fmt="{avg:.4f}") - data_time = SmoothedValue(fmt="{avg:.4f}") - space_fmt = ":" + str(len(str(len(iterable)))) + "d" - if torch.cuda.is_available(): - log_msg = self.delimiter.join( - [ - header, - "[{0" + space_fmt + "}/{1}]", - "eta: {eta}", - "{meters}", - "time: {time}", - "data: {data}", - "max mem: {memory:.0f}", - ] - ) - else: - log_msg = self.delimiter.join( - [header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}"] - ) - MB = 1024.0 * 1024.0 - for obj in iterable: - data_time.update(time.time() - end) - yield obj - iter_time.update(time.time() - end) - if i % print_freq == 0 or i == len(iterable) - 1: - eta_seconds = iter_time.global_avg * (len(iterable) - i) - eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) - if torch.cuda.is_available(): - print( - log_msg.format( - i, - len(iterable), - eta=eta_string, - meters=str(self), - time=str(iter_time), - data=str(data_time), - memory=torch.cuda.max_memory_allocated() / MB, - ) - ) - else: - print( - log_msg.format( - i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time) - ) - ) - i += 1 - end = time.time() - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print(f"{header} Total time: {total_time_str} ({total_time / len(iterable):.4f} s / it)") - - -def collate_fn(batch): - return tuple(zip(*batch)) - - -def mkdir(path): - try: - os.makedirs(path) - except OSError as e: - if e.errno != errno.EEXIST: - raise - - -def setup_for_distributed(is_master): - """ - This function disables printing when not in master process - """ - import builtins as __builtin__ - - builtin_print = __builtin__.print - - def print(*args, **kwargs): - force = kwargs.pop("force", False) - if is_master or force: - builtin_print(*args, **kwargs) - - __builtin__.print = print - - -def is_dist_avail_and_initialized(): - if not dist.is_available(): - return False - if not dist.is_initialized(): - return False - return True - - -def get_world_size(): - if not is_dist_avail_and_initialized(): - return 1 - return dist.get_world_size() - - -def get_rank(): - if not is_dist_avail_and_initialized(): - return 0 - return dist.get_rank() - - -def is_main_process(): - return get_rank() == 0 - - -def save_on_master(*args, **kwargs): - if is_main_process(): - torch.save(*args, **kwargs) - - -def init_distributed_mode(args): - if "RANK" in os.environ and "WORLD_SIZE" in os.environ: - args.rank = int(os.environ["RANK"]) - args.world_size = int(os.environ["WORLD_SIZE"]) - args.gpu = int(os.environ["LOCAL_RANK"]) - elif "SLURM_PROCID" in os.environ: - args.rank = int(os.environ["SLURM_PROCID"]) - args.gpu = args.rank % torch.cuda.device_count() - else: - print("Not using distributed mode") - args.distributed = False - return - - args.distributed = True - - torch.cuda.set_device(args.gpu) - args.dist_backend = "nccl" - print(f"| distributed init (rank {args.rank}): {args.dist_url}", flush=True) - torch.distributed.init_process_group( - backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank - ) - torch.distributed.barrier() - setup_for_distributed(args.rank == 0) diff --git a/spaces/am4nsolanki/hateful-memes/app.py b/spaces/am4nsolanki/hateful-memes/app.py deleted file mode 100644 index 85c078183bb99f529926d64299f5032e0c9cda7d..0000000000000000000000000000000000000000 --- a/spaces/am4nsolanki/hateful-memes/app.py +++ /dev/null @@ -1,70 +0,0 @@ -import streamlit as st -import pandas as pd -import numpy as np -import pickle - -from utils import get_image_arrays, get_image_predictions, show_image - -st.title('Hateful Memes Classification') -image_path = './images/' -demo_data_file = 'demo_data.csv' -demo_data = pd.read_csv('demo_data.csv') -TFLITE_FILE_PATH = 'image_model.tflite' - -demo_data = demo_data.sample(1) -y_true = demo_data['label'] -image_id = demo_data['image_id'] -text = demo_data['text'] - -image_id_dict = dict(image_id).values() -image_id_string = list(image_id_dict)[0] -st.write('Meme:') -st.image(image_path+image_id_string) - -# Image Unimodel -image_array = get_image_arrays(image_id, image_path) -image_prediction = get_image_predictions(image_array, TFLITE_FILE_PATH) -y_pred_image = np.argmax(image_prediction, axis=1) -print('Image Prediction Probabilities:') -print(image_prediction) - -# TFIDF Model -model = 'tfidf_model.pickle' -vectorizer = 'tfidf_vectorizer.pickle' -tfidf_model = pickle.load(open(model, 'rb')) -tfidf_vectorizer = pickle.load(open(vectorizer, 'rb')) -transformed_text = tfidf_vectorizer.transform(text) -text_prediction = tfidf_model.predict_proba(transformed_text) -y_pred_text = np.argmax(text_prediction, axis=1) -print('Text Prediction Probabilities:') -print(text_prediction) - -# Ensemble Probabilities -ensemble_prediction = np.mean(np.array([image_prediction, text_prediction]), axis=0) -y_pred_ensemble = np.argmax(ensemble_prediction, axis=1) -print(ensemble_prediction) - -# StreamLit Display -st.write('Image Model Predictions:') -st.write(np.round(np.array(image_prediction), 4)) - -st.write('Text Model Predictions:') -st.write(np.round(np.array(text_prediction), 4)) - -st.write('Ensemble Model Predictions:') -st.write(np.round(np.array(ensemble_prediction), 4)) - -true_label = list(dict(y_true).values())[0] -predicted_label = y_pred_ensemble[0] - -if true_label == 0: - st.write('True Label: non-hateful') -if true_label == 1: - st.write('True Label: hateful') - -if predicted_label == 0: - st.write('Predicted Label: non-hateful') -if predicted_label == 1: - st.write('Predicted Label: hateful') - -st.button('Random Meme') diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_prime.c b/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_prime.c deleted file mode 100644 index e94331049009f6179b0f4656f79ecaf4a832ac1b..0000000000000000000000000000000000000000 --- a/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_prime.c +++ /dev/null @@ -1,234 +0,0 @@ -/** @file patest_prime.c - @ingroup test_src - @brief Test stream priming mode. - @author Ross Bencina http://www.audiomulch.com/~rossb -*/ - -/* - * $Id$ - * - * This program uses the PortAudio Portable Audio Library. - * For more information see: http://www.portaudio.com - * Copyright (c) 1999-2000 Ross Bencina and Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include -#include -#include "portaudio.h" -#include "pa_util.h" - -#define NUM_BEEPS (3) -#define SAMPLE_RATE (44100) -#define SAMPLE_PERIOD (1.0/44100.0) -#define FRAMES_PER_BUFFER (256) -#define BEEP_DURATION (400) -#define IDLE_DURATION (SAMPLE_RATE*2) /* 2 seconds */ -#define SLEEP_MSEC (50) - -#define STATE_BKG_IDLE (0) -#define STATE_BKG_BEEPING (1) - -typedef struct -{ - float leftPhase; - float rightPhase; - int state; - int beepCountdown; - int idleCountdown; -} -paTestData; - -static void InitializeTestData( paTestData *testData ) -{ - testData->leftPhase = 0; - testData->rightPhase = 0; - testData->state = STATE_BKG_BEEPING; - testData->beepCountdown = BEEP_DURATION; - testData->idleCountdown = IDLE_DURATION; -} - -/* This routine will be called by the PortAudio engine when audio is needed. -** It may called at interrupt level on some machines so don't do anything -** that could mess up the system like calling malloc() or free(). -*/ -static int patestCallback( const void *inputBuffer, void *outputBuffer, - unsigned long framesPerBuffer, - const PaStreamCallbackTimeInfo *timeInfo, - PaStreamCallbackFlags statusFlags, void *userData ) -{ - /* Cast data passed through stream to our structure. */ - paTestData *data = (paTestData*)userData; - float *out = (float*)outputBuffer; - unsigned int i; - int result = paContinue; - - /* suppress unused parameter warnings */ - (void) inputBuffer; - (void) timeInfo; - (void) statusFlags; - - for( i=0; istate ) - { - case STATE_BKG_IDLE: - *out++ = 0.0; /* left */ - *out++ = 0.0; /* right */ - --data->idleCountdown; - - if( data->idleCountdown <= 0 ) result = paComplete; - break; - - case STATE_BKG_BEEPING: - if( data->beepCountdown <= 0 ) - { - data->state = STATE_BKG_IDLE; - *out++ = 0.0; /* left */ - *out++ = 0.0; /* right */ - } - else - { - /* Play sawtooth wave. */ - *out++ = data->leftPhase; /* left */ - *out++ = data->rightPhase; /* right */ - /* Generate simple sawtooth phaser that ranges between -1.0 and 1.0. */ - data->leftPhase += 0.01f; - /* When signal reaches top, drop back down. */ - if( data->leftPhase >= 1.0f ) data->leftPhase -= 2.0f; - /* higher pitch so we can distinguish left and right. */ - data->rightPhase += 0.03f; - if( data->rightPhase >= 1.0f ) data->rightPhase -= 2.0f; - } - --data->beepCountdown; - break; - } - } - - return result; -} - -/*******************************************************************/ -static PaError DoTest( int flags ) -{ - PaStream *stream; - PaError err = paNoError; - paTestData data; - PaStreamParameters outputParameters; - - InitializeTestData( &data ); - - outputParameters.device = Pa_GetDefaultOutputDevice(); - if (outputParameters.device == paNoDevice) { - fprintf(stderr,"Error: No default output device.\n"); - goto error; - } - outputParameters.channelCount = 2; - outputParameters.hostApiSpecificStreamInfo = NULL; - outputParameters.sampleFormat = paFloat32; - outputParameters.suggestedLatency = Pa_GetDeviceInfo( outputParameters.device )->defaultHighOutputLatency; - - /* Open an audio I/O stream. */ - err = Pa_OpenStream( - &stream, - NULL, /* no input */ - &outputParameters, - SAMPLE_RATE, - FRAMES_PER_BUFFER, /* frames per buffer */ - paClipOff | flags, /* we won't output out of range samples so don't bother clipping them */ - patestCallback, - &data ); - if( err != paNoError ) goto error; - - - err = Pa_StartStream( stream ); - if( err != paNoError ) goto error; - - printf("hear \"BEEP\"\n" ); - fflush(stdout); - - while( ( err = Pa_IsStreamActive( stream ) ) == 1 ) Pa_Sleep(SLEEP_MSEC); - if( err < 0 ) goto error; - - err = Pa_StopStream( stream ); - if( err != paNoError ) goto error; - - err = Pa_CloseStream( stream ); - if( err != paNoError ) goto error; - - return err; -error: - return err; -} - -/*******************************************************************/ -int main(void); -int main(void) -{ - PaError err = paNoError; - int i; - - /* Initialize library before making any other calls. */ - err = Pa_Initialize(); - if( err != paNoError ) goto error; - - printf("PortAudio Test: Testing stream playback with no priming.\n"); - printf("PortAudio Test: you should see BEEP before you hear it.\n"); - printf("BEEP %d times.\n", NUM_BEEPS ); - - for( i=0; i< NUM_BEEPS; ++i ) - { - err = DoTest( 0 ); - if( err != paNoError ) - goto error; - } - - printf("PortAudio Test: Testing stream playback with priming.\n"); - printf("PortAudio Test: you should see BEEP around the same time you hear it.\n"); - for( i=0; i< NUM_BEEPS; ++i ) - { - err = DoTest( paPrimeOutputBuffersUsingStreamCallback ); - if( err != paNoError ) - goto error; - } - - printf("Test finished.\n"); - - Pa_Terminate(); - return err; -error: - Pa_Terminate(); - fprintf( stderr, "An error occurred while using the portaudio stream\n" ); - fprintf( stderr, "Error number: %d\n", err ); - fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); - return err; -} diff --git a/spaces/amitjainmldesign/amitapp/app.py b/spaces/amitjainmldesign/amitapp/app.py deleted file mode 100644 index 30b9e7f3952e481bbb23ba844188b56ef63175c9..0000000000000000000000000000000000000000 --- a/spaces/amitjainmldesign/amitapp/app.py +++ /dev/null @@ -1,23 +0,0 @@ -import gradio as gr -from transformers import pipeline - -generator = pipeline('text-generation', model='gpt2') - -def generate(text): - result = generator(text, max_length=50, num_return_sequences=1) - return result[0]["generated_text"] - -examples = [ - ["The Moon's orbit around Earth has"], - ["The smooth Borealis basin in the Northern Hemisphere covers 40%"], - ["Learning LLMs is pure fun"] -] - -demo = gr.Interface( - fn=generate, - inputs=gr.inputs.Textbox(lines=5, label="Input Text"), - outputs=gr.outputs.Textbox(label="Generated Text"), - examples=examples -) - -demo.launch() \ No newline at end of file diff --git a/spaces/amongusrickroll68/openai-jukebox-5b-lyrics/app.py b/spaces/amongusrickroll68/openai-jukebox-5b-lyrics/app.py deleted file mode 100644 index 12f3419eb0aab404d2605dfb3c9fde1103f44d00..0000000000000000000000000000000000000000 --- a/spaces/amongusrickroll68/openai-jukebox-5b-lyrics/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/openai/jukebox-5b-lyrics").launch() \ No newline at end of file diff --git a/spaces/anaclaudia13ct/insect_detection/utils/loss.py b/spaces/anaclaudia13ct/insect_detection/utils/loss.py deleted file mode 100644 index 9b9c3d9f80181d1ad5b54d2700f32ba042368c31..0000000000000000000000000000000000000000 --- a/spaces/anaclaudia13ct/insect_detection/utils/loss.py +++ /dev/null @@ -1,234 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Loss functions -""" - -import torch -import torch.nn as nn - -from utils.metrics import bbox_iou -from utils.torch_utils import de_parallel - - -def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 - # return positive, negative label smoothing BCE targets - return 1.0 - 0.5 * eps, 0.5 * eps - - -class BCEBlurWithLogitsLoss(nn.Module): - # BCEwithLogitLoss() with reduced missing label effects. - def __init__(self, alpha=0.05): - super().__init__() - self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() - self.alpha = alpha - - def forward(self, pred, true): - loss = self.loss_fcn(pred, true) - pred = torch.sigmoid(pred) # prob from logits - dx = pred - true # reduce only missing label effects - # dx = (pred - true).abs() # reduce missing label and false label effects - alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) - loss *= alpha_factor - return loss.mean() - - -class FocalLoss(nn.Module): - # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) - def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super().__init__() - self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() - self.gamma = gamma - self.alpha = alpha - self.reduction = loss_fcn.reduction - self.loss_fcn.reduction = 'none' # required to apply FL to each element - - def forward(self, pred, true): - loss = self.loss_fcn(pred, true) - # p_t = torch.exp(-loss) - # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability - - # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py - pred_prob = torch.sigmoid(pred) # prob from logits - p_t = true * pred_prob + (1 - true) * (1 - pred_prob) - alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) - modulating_factor = (1.0 - p_t) ** self.gamma - loss *= alpha_factor * modulating_factor - - if self.reduction == 'mean': - return loss.mean() - elif self.reduction == 'sum': - return loss.sum() - else: # 'none' - return loss - - -class QFocalLoss(nn.Module): - # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) - def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super().__init__() - self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() - self.gamma = gamma - self.alpha = alpha - self.reduction = loss_fcn.reduction - self.loss_fcn.reduction = 'none' # required to apply FL to each element - - def forward(self, pred, true): - loss = self.loss_fcn(pred, true) - - pred_prob = torch.sigmoid(pred) # prob from logits - alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) - modulating_factor = torch.abs(true - pred_prob) ** self.gamma - loss *= alpha_factor * modulating_factor - - if self.reduction == 'mean': - return loss.mean() - elif self.reduction == 'sum': - return loss.sum() - else: # 'none' - return loss - - -class ComputeLoss: - sort_obj_iou = False - - # Compute losses - def __init__(self, model, autobalance=False): - device = next(model.parameters()).device # get model device - h = model.hyp # hyperparameters - - # Define criteria - BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) - BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) - - # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 - self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets - - # Focal loss - g = h['fl_gamma'] # focal loss gamma - if g > 0: - BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) - - m = de_parallel(model).model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 - self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index - self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance - self.na = m.na # number of anchors - self.nc = m.nc # number of classes - self.nl = m.nl # number of layers - self.anchors = m.anchors - self.device = device - - def __call__(self, p, targets): # predictions, targets - lcls = torch.zeros(1, device=self.device) # class loss - lbox = torch.zeros(1, device=self.device) # box loss - lobj = torch.zeros(1, device=self.device) # object loss - tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets - - # Losses - for i, pi in enumerate(p): # layer index, layer predictions - b, a, gj, gi = indices[i] # image, anchor, gridy, gridx - tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj - - n = b.shape[0] # number of targets - if n: - # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0 - pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions - - # Regression - pxy = pxy.sigmoid() * 2 - 0.5 - pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] - pbox = torch.cat((pxy, pwh), 1) # predicted box - iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) - lbox += (1.0 - iou).mean() # iou loss - - # Objectness - iou = iou.detach().clamp(0).type(tobj.dtype) - if self.sort_obj_iou: - j = iou.argsort() - b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] - if self.gr < 1: - iou = (1.0 - self.gr) + self.gr * iou - tobj[b, a, gj, gi] = iou # iou ratio - - # Classification - if self.nc > 1: # cls loss (only if multiple classes) - t = torch.full_like(pcls, self.cn, device=self.device) # targets - t[range(n), tcls[i]] = self.cp - lcls += self.BCEcls(pcls, t) # BCE - - # Append targets to text file - # with open('targets.txt', 'a') as file: - # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] - - obji = self.BCEobj(pi[..., 4], tobj) - lobj += obji * self.balance[i] # obj loss - if self.autobalance: - self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() - - if self.autobalance: - self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp['box'] - lobj *= self.hyp['obj'] - lcls *= self.hyp['cls'] - bs = tobj.shape[0] # batch size - - return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach() - - def build_targets(self, p, targets): - # Build targets for compute_loss(), input targets(image,class,x,y,w,h) - na, nt = self.na, targets.shape[0] # number of anchors, targets - tcls, tbox, indices, anch = [], [], [], [] - gain = torch.ones(7, device=self.device) # normalized to gridspace gain - ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) - targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None]), 2) # append anchor indices - - g = 0.5 # bias - off = torch.tensor( - [ - [0, 0], - [1, 0], - [0, 1], - [-1, 0], - [0, -1], # j,k,l,m - # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], - device=self.device).float() * g # offsets - - for i in range(self.nl): - anchors, shape = self.anchors[i], p[i].shape - gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain - - # Match targets to anchors - t = targets * gain # shape(3,n,7) - if nt: - # Matches - r = t[..., 4:6] / anchors[:, None] # wh ratio - j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare - # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) - t = t[j] # filter - - # Offsets - gxy = t[:, 2:4] # grid xy - gxi = gain[[2, 3]] - gxy # inverse - j, k = ((gxy % 1 < g) & (gxy > 1)).T - l, m = ((gxi % 1 < g) & (gxi > 1)).T - j = torch.stack((torch.ones_like(j), j, k, l, m)) - t = t.repeat((5, 1, 1))[j] - offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] - else: - t = targets[0] - offsets = 0 - - # Define - bc, gxy, gwh, a = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors - a, (b, c) = a.long().view(-1), bc.long().T # anchors, image, class - gij = (gxy - offsets).long() - gi, gj = gij.T # grid indices - - # Append - indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid - tbox.append(torch.cat((gxy - gij, gwh), 1)) # box - anch.append(anchors[a]) # anchors - tcls.append(c) # class - - return tcls, tbox, indices, anch diff --git a/spaces/anusurabhi/girl_race_detector/README.md b/spaces/anusurabhi/girl_race_detector/README.md deleted file mode 100644 index 74449a2c11c5041a776c076c702d38aae83b4e84..0000000000000000000000000000000000000000 --- a/spaces/anusurabhi/girl_race_detector/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Girl Race Detector -emoji: 📈 -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/aodianyun/stable-diffusion-webui/javascript/extraNetworks.js b/spaces/aodianyun/stable-diffusion-webui/javascript/extraNetworks.js deleted file mode 100644 index b15758b91ebd271604ac8ad342a19537b2efc760..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/javascript/extraNetworks.js +++ /dev/null @@ -1,107 +0,0 @@ - -function setupExtraNetworksForTab(tabname){ - gradioApp().querySelector('#'+tabname+'_extra_tabs').classList.add('extra-networks') - - var tabs = gradioApp().querySelector('#'+tabname+'_extra_tabs > div') - var search = gradioApp().querySelector('#'+tabname+'_extra_search textarea') - var refresh = gradioApp().getElementById(tabname+'_extra_refresh') - var close = gradioApp().getElementById(tabname+'_extra_close') - - search.classList.add('search') - tabs.appendChild(search) - tabs.appendChild(refresh) - tabs.appendChild(close) - - search.addEventListener("input", function(evt){ - searchTerm = search.value.toLowerCase() - - gradioApp().querySelectorAll('#'+tabname+'_extra_tabs div.card').forEach(function(elem){ - text = elem.querySelector('.name').textContent.toLowerCase() + " " + elem.querySelector('.search_term').textContent.toLowerCase() - elem.style.display = text.indexOf(searchTerm) == -1 ? "none" : "" - }) - }); -} - -var activePromptTextarea = {}; - -function setupExtraNetworks(){ - setupExtraNetworksForTab('txt2img') - setupExtraNetworksForTab('img2img') - - function registerPrompt(tabname, id){ - var textarea = gradioApp().querySelector("#" + id + " > label > textarea"); - - if (! activePromptTextarea[tabname]){ - activePromptTextarea[tabname] = textarea - } - - textarea.addEventListener("focus", function(){ - activePromptTextarea[tabname] = textarea; - }); - } - - registerPrompt('txt2img', 'txt2img_prompt') - registerPrompt('txt2img', 'txt2img_neg_prompt') - registerPrompt('img2img', 'img2img_prompt') - registerPrompt('img2img', 'img2img_neg_prompt') -} - -onUiLoaded(setupExtraNetworks) - -var re_extranet = /<([^:]+:[^:]+):[\d\.]+>/; -var re_extranet_g = /\s+<([^:]+:[^:]+):[\d\.]+>/g; - -function tryToRemoveExtraNetworkFromPrompt(textarea, text){ - var m = text.match(re_extranet) - if(! m) return false - - var partToSearch = m[1] - var replaced = false - var newTextareaText = textarea.value.replaceAll(re_extranet_g, function(found, index){ - m = found.match(re_extranet); - if(m[1] == partToSearch){ - replaced = true; - return "" - } - return found; - }) - - if(replaced){ - textarea.value = newTextareaText - return true; - } - - return false -} - -function cardClicked(tabname, textToAdd, allowNegativePrompt){ - var textarea = allowNegativePrompt ? activePromptTextarea[tabname] : gradioApp().querySelector("#" + tabname + "_prompt > label > textarea") - - if(! tryToRemoveExtraNetworkFromPrompt(textarea, textToAdd)){ - textarea.value = textarea.value + " " + textToAdd - } - - updateInput(textarea) -} - -function saveCardPreview(event, tabname, filename){ - var textarea = gradioApp().querySelector("#" + tabname + '_preview_filename > label > textarea') - var button = gradioApp().getElementById(tabname + '_save_preview') - - textarea.value = filename - updateInput(textarea) - - button.click() - - event.stopPropagation() - event.preventDefault() -} - -function extraNetworksSearchButton(tabs_id, event){ - searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > div > textarea') - button = event.target - text = button.classList.contains("search-all") ? "" : button.textContent.trim() - - searchTextarea.value = text - updateInput(searchTextarea) -} \ No newline at end of file diff --git a/spaces/aodianyun/stable-diffusion-webui/test/basic_features/img2img_test.py b/spaces/aodianyun/stable-diffusion-webui/test/basic_features/img2img_test.py deleted file mode 100644 index 08c5c903e8382ef4b969b01da87bc69fb06ff2b4..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/test/basic_features/img2img_test.py +++ /dev/null @@ -1,66 +0,0 @@ -import unittest -import requests -from gradio.processing_utils import encode_pil_to_base64 -from PIL import Image - - -class TestImg2ImgWorking(unittest.TestCase): - def setUp(self): - self.url_img2img = "http://localhost:7860/sdapi/v1/img2img" - self.simple_img2img = { - "init_images": [encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png"))], - "resize_mode": 0, - "denoising_strength": 0.75, - "mask": None, - "mask_blur": 4, - "inpainting_fill": 0, - "inpaint_full_res": False, - "inpaint_full_res_padding": 0, - "inpainting_mask_invert": False, - "prompt": "example prompt", - "styles": [], - "seed": -1, - "subseed": -1, - "subseed_strength": 0, - "seed_resize_from_h": -1, - "seed_resize_from_w": -1, - "batch_size": 1, - "n_iter": 1, - "steps": 3, - "cfg_scale": 7, - "width": 64, - "height": 64, - "restore_faces": False, - "tiling": False, - "negative_prompt": "", - "eta": 0, - "s_churn": 0, - "s_tmax": 0, - "s_tmin": 0, - "s_noise": 1, - "override_settings": {}, - "sampler_index": "Euler a", - "include_init_images": False - } - - def test_img2img_simple_performed(self): - self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) - - def test_inpainting_masked_performed(self): - self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(r"test/test_files/mask_basic.png")) - self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) - - def test_inpainting_with_inverted_masked_performed(self): - self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(r"test/test_files/mask_basic.png")) - self.simple_img2img["inpainting_mask_invert"] = True - self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) - - def test_img2img_sd_upscale_performed(self): - self.simple_img2img["script_name"] = "sd upscale" - self.simple_img2img["script_args"] = ["", 8, "Lanczos", 2.0] - - self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Tempita/_looper.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Tempita/_looper.py deleted file mode 100644 index 4010988300ffd12da5dd136f1f173d584261191e..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Tempita/_looper.py +++ /dev/null @@ -1,163 +0,0 @@ -""" -Helper for looping over sequences, particular in templates. - -Often in a loop in a template it's handy to know what's next up, -previously up, if this is the first or last item in the sequence, etc. -These can be awkward to manage in a normal Python loop, but using the -looper you can get a better sense of the context. Use like:: - - >>> for loop, item in looper(['a', 'b', 'c']): - ... print loop.number, item - ... if not loop.last: - ... print '---' - 1 a - --- - 2 b - --- - 3 c - -""" - -import sys -from Cython.Tempita.compat3 import basestring_ - -__all__ = ['looper'] - - -class looper(object): - """ - Helper for looping (particularly in templates) - - Use this like:: - - for loop, item in looper(seq): - if loop.first: - ... - """ - - def __init__(self, seq): - self.seq = seq - - def __iter__(self): - return looper_iter(self.seq) - - def __repr__(self): - return '<%s for %r>' % ( - self.__class__.__name__, self.seq) - - -class looper_iter(object): - - def __init__(self, seq): - self.seq = list(seq) - self.pos = 0 - - def __iter__(self): - return self - - def __next__(self): - if self.pos >= len(self.seq): - raise StopIteration - result = loop_pos(self.seq, self.pos), self.seq[self.pos] - self.pos += 1 - return result - - if sys.version < "3": - next = __next__ - - -class loop_pos(object): - - def __init__(self, seq, pos): - self.seq = seq - self.pos = pos - - def __repr__(self): - return '' % ( - self.seq[self.pos], self.pos) - - def index(self): - return self.pos - index = property(index) - - def number(self): - return self.pos + 1 - number = property(number) - - def item(self): - return self.seq[self.pos] - item = property(item) - - def __next__(self): - try: - return self.seq[self.pos + 1] - except IndexError: - return None - __next__ = property(__next__) - - if sys.version < "3": - next = __next__ - - def previous(self): - if self.pos == 0: - return None - return self.seq[self.pos - 1] - previous = property(previous) - - def odd(self): - return not self.pos % 2 - odd = property(odd) - - def even(self): - return self.pos % 2 - even = property(even) - - def first(self): - return self.pos == 0 - first = property(first) - - def last(self): - return self.pos == len(self.seq) - 1 - last = property(last) - - def length(self): - return len(self.seq) - length = property(length) - - def first_group(self, getter=None): - """ - Returns true if this item is the start of a new group, - where groups mean that some attribute has changed. The getter - can be None (the item itself changes), an attribute name like - ``'.attr'``, a function, or a dict key or list index. - """ - if self.first: - return True - return self._compare_group(self.item, self.previous, getter) - - def last_group(self, getter=None): - """ - Returns true if this item is the end of a new group, - where groups mean that some attribute has changed. The getter - can be None (the item itself changes), an attribute name like - ``'.attr'``, a function, or a dict key or list index. - """ - if self.last: - return True - return self._compare_group(self.item, self.__next__, getter) - - def _compare_group(self, item, other, getter): - if getter is None: - return item != other - elif (isinstance(getter, basestring_) - and getter.startswith('.')): - getter = getter[1:] - if getter.endswith('()'): - getter = getter[:-2] - return getattr(item, getter)() != getattr(other, getter)() - else: - return getattr(item, getter) != getattr(other, getter) - elif hasattr(getter, '__call__'): - return getter(item) != getter(other) - else: - return item[getter] != other[getter] diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/MpoImagePlugin.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/MpoImagePlugin.py deleted file mode 100644 index 5bfd8efc1a6e761a5b5791b19cff2712a34748c6..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/MpoImagePlugin.py +++ /dev/null @@ -1,181 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# MPO file handling -# -# See "Multi-Picture Format" (CIPA DC-007-Translation 2009, Standard of the -# Camera & Imaging Products Association) -# -# The multi-picture object combines multiple JPEG images (with a modified EXIF -# data format) into a single file. While it can theoretically be used much like -# a GIF animation, it is commonly used to represent 3D photographs and is (as -# of this writing) the most commonly used format by 3D cameras. -# -# History: -# 2014-03-13 Feneric Created -# -# See the README file for information on usage and redistribution. -# - -import itertools -import os -import struct - -from . import Image, ImageFile, ImageSequence, JpegImagePlugin, TiffImagePlugin -from ._binary import i16be as i16 -from ._binary import o32le - -# def _accept(prefix): -# return JpegImagePlugin._accept(prefix) - - -def _save(im, fp, filename): - JpegImagePlugin._save(im, fp, filename) - - -def _save_all(im, fp, filename): - append_images = im.encoderinfo.get("append_images", []) - if not append_images: - try: - animated = im.is_animated - except AttributeError: - animated = False - if not animated: - _save(im, fp, filename) - return - - offsets = [] - for imSequence in itertools.chain([im], append_images): - for im_frame in ImageSequence.Iterator(imSequence): - if not offsets: - # APP2 marker - im.encoderinfo["extra"] = ( - b"\xFF\xE2" + struct.pack(">H", 6 + 70) + b"MPF\0" + b" " * 70 - ) - JpegImagePlugin._save(im_frame, fp, filename) - offsets.append(fp.tell()) - else: - im_frame.save(fp, "JPEG") - offsets.append(fp.tell() - offsets[-1]) - - ifd = TiffImagePlugin.ImageFileDirectory_v2() - ifd[0xB001] = len(offsets) - - mpentries = b"" - data_offset = 0 - for i, size in enumerate(offsets): - if i == 0: - mptype = 0x030000 # Baseline MP Primary Image - else: - mptype = 0x000000 # Undefined - mpentries += struct.pack(" 1 - self._fp = self.fp # FIXME: hack - self._fp.seek(self.__mpoffsets[0]) # get ready to read first frame - self.__frame = 0 - self.offset = 0 - # for now we can only handle reading and individual frame extraction - self.readonly = 1 - - def load_seek(self, pos): - self._fp.seek(pos) - - def seek(self, frame): - if not self._seek_check(frame): - return - self.fp = self._fp - self.offset = self.__mpoffsets[frame] - - self.fp.seek(self.offset + 2) # skip SOI marker - segment = self.fp.read(2) - if not segment: - raise ValueError("No data found for frame") - self._size = self._initial_size - if i16(segment) == 0xFFE1: # APP1 - n = i16(self.fp.read(2)) - 2 - self.info["exif"] = ImageFile._safe_read(self.fp, n) - self._reload_exif() - - mptype = self.mpinfo[0xB002][frame]["Attribute"]["MPType"] - if mptype.startswith("Large Thumbnail"): - exif = self.getexif().get_ifd(0x8769) - if 40962 in exif and 40963 in exif: - self._size = (exif[40962], exif[40963]) - elif "exif" in self.info: - del self.info["exif"] - self._reload_exif() - - self.tile = [("jpeg", (0, 0) + self.size, self.offset, (self.mode, ""))] - self.__frame = frame - - def tell(self): - return self.__frame - - @staticmethod - def adopt(jpeg_instance, mpheader=None): - """ - Transform the instance of JpegImageFile into - an instance of MpoImageFile. - After the call, the JpegImageFile is extended - to be an MpoImageFile. - - This is essentially useful when opening a JPEG - file that reveals itself as an MPO, to avoid - double call to _open. - """ - jpeg_instance.__class__ = MpoImageFile - jpeg_instance._after_jpeg_open(mpheader) - return jpeg_instance - - -# --------------------------------------------------------------------- -# Registry stuff - -# Note that since MPO shares a factory with JPEG, we do not need to do a -# separate registration for it here. -# Image.register_open(MpoImageFile.format, -# JpegImagePlugin.jpeg_factory, _accept) -Image.register_save(MpoImageFile.format, _save) -Image.register_save_all(MpoImageFile.format, _save_all) - -Image.register_extension(MpoImageFile.format, ".mpo") - -Image.register_mime(MpoImageFile.format, "image/mpo") diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/grouped_bar_chart_horizontal.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/grouped_bar_chart_horizontal.py deleted file mode 100644 index b4ac2ec81636a48d2f10bb3ef138e9011f03243b..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/grouped_bar_chart_horizontal.py +++ /dev/null @@ -1,17 +0,0 @@ -""" -Horizontal Grouped Bar Chart ----------------------------- -This example shows a horizontal grouped bar chart. -""" -# category: bar charts -import altair as alt -from vega_datasets import data - -source = data.barley() - -alt.Chart(source).mark_bar().encode( - x='sum(yield):Q', - y='year:O', - color='year:N', - row='site:N' -) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/ParserRuleContext.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/ParserRuleContext.py deleted file mode 100644 index e4ec5983ab8ffbcb799f077d796499d9cdb6d7cb..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/ParserRuleContext.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -# Use of this file is governed by the BSD 3-clause license that -# can be found in the LICENSE.txt file in the project root. - -#* A rule invocation record for parsing. -# -# Contains all of the information about the current rule not stored in the -# RuleContext. It handles parse tree children list, Any ATN state -# tracing, and the default values available for rule indications: -# start, stop, rule index, current alt number, current -# ATN state. -# -# Subclasses made for each rule and grammar track the parameters, -# return values, locals, and labels specific to that rule. These -# are the objects that are returned from rules. -# -# Note text is not an actual field of a rule return value; it is computed -# from start and stop using the input stream's toString() method. I -# could add a ctor to this so that we can pass in and store the input -# stream, but I'm not sure we want to do that. It would seem to be undefined -# to get the .text property anyway if the rule matches tokens from multiple -# input streams. -# -# I do not use getters for fields of objects that are used simply to -# group values such as this aggregate. The getters/setters are there to -# satisfy the superclass interface. - -from antlr4.RuleContext import RuleContext -from antlr4.Token import Token -from antlr4.tree.Tree import ParseTreeListener, ParseTree, TerminalNodeImpl, ErrorNodeImpl, TerminalNode, \ - INVALID_INTERVAL - -# need forward declaration -ParserRuleContext = None - -class ParserRuleContext(RuleContext): - - def __init__(self, parent:ParserRuleContext = None, invokingStateNumber:int = None ): - super().__init__(parent, invokingStateNumber) - #* If we are debugging or building a parse tree for a visitor, - # we need to track all of the tokens and rule invocations associated - # with this rule's context. This is empty for parsing w/o tree constr. - # operation because we don't the need to track the details about - # how we parse this rule. - #/ - self.children = None - self.start = None - self.stop = None - # The exception that forced this rule to return. If the rule successfully - # completed, this is {@code null}. - self.exception = None - - #* COPY a ctx (I'm deliberately not using copy constructor)#/ - # - # This is used in the generated parser code to flip a generic XContext - # node for rule X to a YContext for alt label Y. In that sense, it is - # not really a generic copy function. - # - # If we do an error sync() at start of a rule, we might add error nodes - # to the generic XContext so this function must copy those nodes to - # the YContext as well else they are lost! - #/ - def copyFrom(self, ctx:ParserRuleContext): - # from RuleContext - self.parentCtx = ctx.parentCtx - self.invokingState = ctx.invokingState - self.children = None - self.start = ctx.start - self.stop = ctx.stop - - # copy any error nodes to alt label node - if ctx.children is not None: - self.children = [] - # reset parent pointer for any error nodes - for child in ctx.children: - if isinstance(child, ErrorNodeImpl): - self.children.append(child) - child.parentCtx = self - - # Double dispatch methods for listeners - def enterRule(self, listener:ParseTreeListener): - pass - - def exitRule(self, listener:ParseTreeListener): - pass - - #* Does not set parent link; other add methods do that#/ - def addChild(self, child:ParseTree): - if self.children is None: - self.children = [] - self.children.append(child) - return child - - #* Used by enterOuterAlt to toss out a RuleContext previously added as - # we entered a rule. If we have # label, we will need to remove - # generic ruleContext object. - #/ - def removeLastChild(self): - if self.children is not None: - del self.children[len(self.children)-1] - - def addTokenNode(self, token:Token): - node = TerminalNodeImpl(token) - self.addChild(node) - node.parentCtx = self - return node - - def addErrorNode(self, badToken:Token): - node = ErrorNodeImpl(badToken) - self.addChild(node) - node.parentCtx = self - return node - - def getChild(self, i:int, ttype:type = None): - if ttype is None: - return self.children[i] if len(self.children)>i else None - else: - for child in self.getChildren(): - if not isinstance(child, ttype): - continue - if i==0: - return child - i -= 1 - return None - - def getChildren(self, predicate = None): - if self.children is not None: - for child in self.children: - if predicate is not None and not predicate(child): - continue - yield child - - def getToken(self, ttype:int, i:int): - for child in self.getChildren(): - if not isinstance(child, TerminalNode): - continue - if child.symbol.type != ttype: - continue - if i==0: - return child - i -= 1 - return None - - def getTokens(self, ttype:int ): - if self.getChildren() is None: - return [] - tokens = [] - for child in self.getChildren(): - if not isinstance(child, TerminalNode): - continue - if child.symbol.type != ttype: - continue - tokens.append(child) - return tokens - - def getTypedRuleContext(self, ctxType:type, i:int): - return self.getChild(i, ctxType) - - def getTypedRuleContexts(self, ctxType:type): - children = self.getChildren() - if children is None: - return [] - contexts = [] - for child in children: - if not isinstance(child, ctxType): - continue - contexts.append(child) - return contexts - - def getChildCount(self): - return len(self.children) if self.children else 0 - - def getSourceInterval(self): - if self.start is None or self.stop is None: - return INVALID_INTERVAL - else: - return (self.start.tokenIndex, self.stop.tokenIndex) - - -RuleContext.EMPTY = ParserRuleContext() - -class InterpreterRuleContext(ParserRuleContext): - - def __init__(self, parent:ParserRuleContext, invokingStateNumber:int, ruleIndex:int): - super().__init__(parent, invokingStateNumber) - self.ruleIndex = ruleIndex diff --git a/spaces/atimughal662/InfoFusion/src/gpt_langchain.py b/spaces/atimughal662/InfoFusion/src/gpt_langchain.py deleted file mode 100644 index 144d9ec5c3783430db8c0714828028137ceac94d..0000000000000000000000000000000000000000 --- a/spaces/atimughal662/InfoFusion/src/gpt_langchain.py +++ /dev/null @@ -1,5394 +0,0 @@ -import ast -import asyncio -import copy -import functools -import glob -import gzip -import inspect -import json -import os -import pathlib -import pickle -import shutil -import subprocess -import tempfile -import time -import traceback -import types -import typing -import urllib.error -import uuid -import zipfile -from collections import defaultdict -from datetime import datetime -from functools import reduce -from operator import concat -import filelock -import tabulate -import yaml - -from joblib import delayed -from langchain.callbacks import streaming_stdout -from langchain.embeddings import HuggingFaceInstructEmbeddings -from langchain.llms.huggingface_pipeline import VALID_TASKS -from langchain.llms.utils import enforce_stop_tokens -from langchain.schema import LLMResult, Generation -from langchain.tools import PythonREPLTool -from langchain.tools.json.tool import JsonSpec -from tqdm import tqdm - -from src.db_utils import length_db1, set_dbid, set_userid, get_dbid, get_userid_direct, get_username_direct, \ - set_userid_direct -from utils import wrapped_partial, EThread, import_matplotlib, sanitize_filename, makedirs, get_url, flatten_list, \ - get_device, ProgressParallel, remove, hash_file, clear_torch_cache, NullContext, get_hf_server, FakeTokenizer, \ - have_libreoffice, have_arxiv, have_playwright, have_selenium, have_tesseract, have_doctr, have_pymupdf, set_openai, \ - get_list_or_str, have_pillow, only_selenium, only_playwright, only_unstructured_urls, get_sha, get_short_name, \ - get_accordion, have_jq, get_doc, get_source, have_chromamigdb, get_token_count, reverse_ucurve_list -from enums import DocumentSubset, no_lora_str, model_token_mapping, source_prefix, source_postfix, non_query_commands, \ - LangChainAction, LangChainMode, DocumentChoice, LangChainTypes, font_size, head_acc, super_source_prefix, \ - super_source_postfix, langchain_modes_intrinsic, get_langchain_prompts, LangChainAgent -from evaluate_params import gen_hyper, gen_hyper0 -from gen import get_model, SEED, get_limited_prompt, get_docs_tokens -from prompter import non_hf_types, PromptType, Prompter -from src.serpapi import H2OSerpAPIWrapper -from utils_langchain import StreamingGradioCallbackHandler, _chunk_sources, _add_meta, add_parser, fix_json_meta - -import_matplotlib() - -import numpy as np -import pandas as pd -import requests -from langchain.chains.qa_with_sources import load_qa_with_sources_chain -# , GCSDirectoryLoader, GCSFileLoader -# , OutlookMessageLoader # GPL3 -# ImageCaptionLoader, # use our own wrapper -# ReadTheDocsLoader, # no special file, some path, so have to give as special option -from langchain.document_loaders import PyPDFLoader, TextLoader, CSVLoader, PythonLoader, TomlLoader, \ - UnstructuredURLLoader, UnstructuredHTMLLoader, UnstructuredWordDocumentLoader, UnstructuredMarkdownLoader, \ - EverNoteLoader, UnstructuredEmailLoader, UnstructuredODTLoader, UnstructuredPowerPointLoader, \ - UnstructuredEPubLoader, UnstructuredImageLoader, UnstructuredRTFLoader, ArxivLoader, UnstructuredPDFLoader, \ - UnstructuredExcelLoader, JSONLoader -from langchain.text_splitter import Language -from langchain.chains.question_answering import load_qa_chain -from langchain.docstore.document import Document -from langchain import PromptTemplate, HuggingFaceTextGenInference, HuggingFacePipeline -from langchain.vectorstores import Chroma -from chromamig import ChromaMig - - -def split_list(input_list, split_size): - for i in range(0, len(input_list), split_size): - yield input_list[i:i + split_size] - - -def get_db(sources, use_openai_embedding=False, db_type='faiss', - persist_directory=None, load_db_if_exists=True, - langchain_mode='notset', - langchain_mode_paths={}, - langchain_mode_types={}, - collection_name=None, - hf_embedding_model=None, - migrate_embedding_model=False, - auto_migrate_db=False, - n_jobs=-1): - if not sources: - return None - user_path = langchain_mode_paths.get(langchain_mode) - if persist_directory is None: - langchain_type = langchain_mode_types.get(langchain_mode, LangChainTypes.EITHER.value) - persist_directory, langchain_type = get_persist_directory(langchain_mode, langchain_type=langchain_type) - langchain_mode_types[langchain_mode] = langchain_type - assert hf_embedding_model is not None - - # get freshly-determined embedding model - embedding = get_embedding(use_openai_embedding, hf_embedding_model=hf_embedding_model) - assert collection_name is not None or langchain_mode != 'notset' - if collection_name is None: - collection_name = langchain_mode.replace(' ', '_') - - # Create vector database - if db_type == 'faiss': - from langchain.vectorstores import FAISS - db = FAISS.from_documents(sources, embedding) - elif db_type == 'weaviate': - import weaviate - from weaviate.embedded import EmbeddedOptions - from langchain.vectorstores import Weaviate - - if os.getenv('WEAVIATE_URL', None): - client = _create_local_weaviate_client() - else: - client = weaviate.Client( - embedded_options=EmbeddedOptions(persistence_data_path=persist_directory) - ) - index_name = collection_name.capitalize() - db = Weaviate.from_documents(documents=sources, embedding=embedding, client=client, by_text=False, - index_name=index_name) - elif db_type in ['chroma', 'chroma_old']: - assert persist_directory is not None - # use_base already handled when making persist_directory, unless was passed into get_db() - makedirs(persist_directory, exist_ok=True) - - # see if already actually have persistent db, and deal with possible changes in embedding - db, use_openai_embedding, hf_embedding_model = \ - get_existing_db(None, persist_directory, load_db_if_exists, db_type, - use_openai_embedding, - langchain_mode, langchain_mode_paths, langchain_mode_types, - hf_embedding_model, migrate_embedding_model, auto_migrate_db, - verbose=False, - n_jobs=n_jobs) - if db is None: - import logging - logging.getLogger("chromadb").setLevel(logging.ERROR) - if db_type == 'chroma': - from chromadb.config import Settings - settings_extra_kwargs = dict(is_persistent=True) - else: - from chromamigdb.config import Settings - settings_extra_kwargs = dict(chroma_db_impl="duckdb+parquet") - client_settings = Settings(anonymized_telemetry=False, - persist_directory=persist_directory, - **settings_extra_kwargs) - if n_jobs in [None, -1]: - n_jobs = int(os.getenv('OMP_NUM_THREADS', str(os.cpu_count() // 2))) - num_threads = max(1, min(n_jobs, 8)) - else: - num_threads = max(1, n_jobs) - collection_metadata = {"hnsw:num_threads": num_threads} - from_kwargs = dict(embedding=embedding, - persist_directory=persist_directory, - collection_name=collection_name, - client_settings=client_settings, - collection_metadata=collection_metadata) - if db_type == 'chroma': - import chromadb - api = chromadb.PersistentClient(path=persist_directory) - max_batch_size = api._producer.max_batch_size - sources_batches = split_list(sources, max_batch_size) - for sources_batch in sources_batches: - db = Chroma.from_documents(documents=sources_batch, **from_kwargs) - db.persist() - else: - db = ChromaMig.from_documents(documents=sources, **from_kwargs) - clear_embedding(db) - save_embed(db, use_openai_embedding, hf_embedding_model) - else: - # then just add - # doesn't check or change embedding, just saves it in case not saved yet, after persisting - db, num_new_sources, new_sources_metadata = add_to_db(db, sources, db_type=db_type, - use_openai_embedding=use_openai_embedding, - hf_embedding_model=hf_embedding_model) - else: - raise RuntimeError("No such db_type=%s" % db_type) - - # once here, db is not changing and embedding choices in calling functions does not matter - return db - - -def _get_unique_sources_in_weaviate(db): - batch_size = 100 - id_source_list = [] - result = db._client.data_object.get(class_name=db._index_name, limit=batch_size) - - while result['objects']: - id_source_list += [(obj['id'], obj['properties']['source']) for obj in result['objects']] - last_id = id_source_list[-1][0] - result = db._client.data_object.get(class_name=db._index_name, limit=batch_size, after=last_id) - - unique_sources = {source for _, source in id_source_list} - return unique_sources - - -def del_from_db(db, sources, db_type=None): - if db_type in ['chroma', 'chroma_old'] and db is not None: - # sources should be list of x.metadata['source'] from document metadatas - if isinstance(sources, str): - sources = [sources] - else: - assert isinstance(sources, (list, tuple, types.GeneratorType)) - metadatas = set(sources) - client_collection = db._client.get_collection(name=db._collection.name, - embedding_function=db._collection._embedding_function) - for source in metadatas: - meta = dict(source=source) - try: - client_collection.delete(where=meta) - except KeyError: - pass - - -def add_to_db(db, sources, db_type='faiss', - avoid_dup_by_file=False, - avoid_dup_by_content=True, - use_openai_embedding=False, - hf_embedding_model=None): - assert hf_embedding_model is not None - num_new_sources = len(sources) - if not sources: - return db, num_new_sources, [] - if db_type == 'faiss': - db.add_documents(sources) - elif db_type == 'weaviate': - # FIXME: only control by file name, not hash yet - if avoid_dup_by_file or avoid_dup_by_content: - unique_sources = _get_unique_sources_in_weaviate(db) - sources = [x for x in sources if x.metadata['source'] not in unique_sources] - num_new_sources = len(sources) - if num_new_sources == 0: - return db, num_new_sources, [] - db.add_documents(documents=sources) - elif db_type in ['chroma', 'chroma_old']: - collection = get_documents(db) - # files we already have: - metadata_files = set([x['source'] for x in collection['metadatas']]) - if avoid_dup_by_file: - # Too weak in case file changed content, assume parent shouldn't pass true for this for now - raise RuntimeError("Not desired code path") - if avoid_dup_by_content: - # look at hash, instead of page_content - # migration: If no hash previously, avoid updating, - # since don't know if need to update and may be expensive to redo all unhashed files - metadata_hash_ids = set( - [x['hashid'] for x in collection['metadatas'] if 'hashid' in x and x['hashid'] not in ["None", None]]) - # avoid sources with same hash - sources = [x for x in sources if x.metadata.get('hashid') not in metadata_hash_ids] - num_nohash = len([x for x in sources if not x.metadata.get('hashid')]) - print("Found %s new sources (%d have no hash in original source," - " so have to reprocess for migration to sources with hash)" % (len(sources), num_nohash), flush=True) - # get new file names that match existing file names. delete existing files we are overridding - dup_metadata_files = set([x.metadata['source'] for x in sources if x.metadata['source'] in metadata_files]) - print("Removing %s duplicate files from db because ingesting those as new documents" % len( - dup_metadata_files), flush=True) - client_collection = db._client.get_collection(name=db._collection.name, - embedding_function=db._collection._embedding_function) - for dup_file in dup_metadata_files: - dup_file_meta = dict(source=dup_file) - try: - client_collection.delete(where=dup_file_meta) - except KeyError: - pass - num_new_sources = len(sources) - if num_new_sources == 0: - return db, num_new_sources, [] - if hasattr(db, '_persist_directory'): - print("Existing db, adding to %s" % db._persist_directory, flush=True) - # chroma only - lock_file = get_db_lock_file(db) - context = filelock.FileLock - else: - lock_file = None - context = NullContext - with context(lock_file): - # this is place where add to db, but others maybe accessing db, so lock access. - # else see RuntimeError: Index seems to be corrupted or unsupported - import chromadb - api = chromadb.PersistentClient(path=db._persist_directory) - max_batch_size = api._producer.max_batch_size - sources_batches = split_list(sources, max_batch_size) - for sources_batch in sources_batches: - db.add_documents(documents=sources_batch) - db.persist() - clear_embedding(db) - # save here is for migration, in case old db directory without embedding saved - save_embed(db, use_openai_embedding, hf_embedding_model) - else: - raise RuntimeError("No such db_type=%s" % db_type) - - new_sources_metadata = [x.metadata for x in sources] - - return db, num_new_sources, new_sources_metadata - - -def create_or_update_db(db_type, persist_directory, collection_name, - user_path, langchain_type, - sources, use_openai_embedding, add_if_exists, verbose, - hf_embedding_model, migrate_embedding_model, auto_migrate_db, - n_jobs=-1): - if not os.path.isdir(persist_directory) or not add_if_exists: - if os.path.isdir(persist_directory): - if verbose: - print("Removing %s" % persist_directory, flush=True) - remove(persist_directory) - if verbose: - print("Generating db", flush=True) - if db_type == 'weaviate': - import weaviate - from weaviate.embedded import EmbeddedOptions - - if os.getenv('WEAVIATE_URL', None): - client = _create_local_weaviate_client() - else: - client = weaviate.Client( - embedded_options=EmbeddedOptions(persistence_data_path=persist_directory) - ) - - index_name = collection_name.replace(' ', '_').capitalize() - if client.schema.exists(index_name) and not add_if_exists: - client.schema.delete_class(index_name) - if verbose: - print("Removing %s" % index_name, flush=True) - elif db_type in ['chroma', 'chroma_old']: - pass - - if not add_if_exists: - if verbose: - print("Generating db", flush=True) - else: - if verbose: - print("Loading and updating db", flush=True) - - db = get_db(sources, - use_openai_embedding=use_openai_embedding, - db_type=db_type, - persist_directory=persist_directory, - langchain_mode=collection_name, - langchain_mode_paths={collection_name: user_path}, - langchain_mode_types={collection_name: langchain_type}, - hf_embedding_model=hf_embedding_model, - migrate_embedding_model=migrate_embedding_model, - auto_migrate_db=auto_migrate_db, - n_jobs=n_jobs) - - return db - - -from langchain.embeddings import FakeEmbeddings - - -class H2OFakeEmbeddings(FakeEmbeddings): - """Fake embedding model, but constant instead of random""" - - size: int - """The size of the embedding vector.""" - - def _get_embedding(self) -> typing.List[float]: - return [1] * self.size - - def embed_documents(self, texts: typing.List[str]) -> typing.List[typing.List[float]]: - return [self._get_embedding() for _ in texts] - - def embed_query(self, text: str) -> typing.List[float]: - return self._get_embedding() - - -def get_embedding(use_openai_embedding, hf_embedding_model=None, preload=False): - assert hf_embedding_model is not None - # Get embedding model - if use_openai_embedding: - assert os.getenv("OPENAI_API_KEY") is not None, "Set ENV OPENAI_API_KEY" - from langchain.embeddings import OpenAIEmbeddings - embedding = OpenAIEmbeddings(disallowed_special=()) - elif hf_embedding_model == 'fake': - embedding = H2OFakeEmbeddings(size=1) - else: - if isinstance(hf_embedding_model, str): - pass - elif isinstance(hf_embedding_model, dict): - # embedding itself preloaded globally - return hf_embedding_model['model'] - else: - # object - return hf_embedding_model - # to ensure can fork without deadlock - from langchain.embeddings import HuggingFaceEmbeddings - - device, torch_dtype, context_class = get_device_dtype() - model_kwargs = dict(device=device) - if 'instructor' in hf_embedding_model: - encode_kwargs = {'normalize_embeddings': True} - embedding = HuggingFaceInstructEmbeddings(model_name=hf_embedding_model, - model_kwargs=model_kwargs, - encode_kwargs=encode_kwargs) - else: - embedding = HuggingFaceEmbeddings(model_name=hf_embedding_model, model_kwargs=model_kwargs) - embedding.client.preload = preload - return embedding - - -def get_answer_from_sources(chain, sources, question): - return chain( - { - "input_documents": sources, - "question": question, - }, - return_only_outputs=True, - )["output_text"] - - -"""Wrapper around Huggingface text generation inference API.""" -from functools import partial -from typing import Any, Dict, List, Optional, Set, Iterable - -from pydantic import Extra, Field, root_validator - -from langchain.callbacks.manager import CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun -from langchain.llms.base import LLM - - -class GradioInference(LLM): - """ - Gradio generation inference API. - """ - inference_server_url: str = "" - - temperature: float = 0.8 - top_p: Optional[float] = 0.95 - top_k: Optional[int] = None - num_beams: Optional[int] = 1 - max_new_tokens: int = 512 - min_new_tokens: int = 1 - early_stopping: bool = False - max_time: int = 180 - repetition_penalty: Optional[float] = None - num_return_sequences: Optional[int] = 1 - do_sample: bool = False - chat_client: bool = False - - return_full_text: bool = False - stream_output: bool = False - sanitize_bot_response: bool = False - - prompter: Any = None - context: Any = '' - iinput: Any = '' - client: Any = None - tokenizer: Any = None - - system_prompt: Any = None - visible_models: Any = None - h2ogpt_key: Any = None - - count_input_tokens: Any = 0 - count_output_tokens: Any = 0 - - min_max_new_tokens: Any = 256 - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() - def validate_environment(cls, values: Dict) -> Dict: - """Validate that python package exists in environment.""" - - try: - if values['client'] is None: - import gradio_client - values["client"] = gradio_client.Client( - values["inference_server_url"] - ) - except ImportError: - raise ImportError( - "Could not import gradio_client python package. " - "Please install it with `pip install gradio_client`." - ) - return values - - @property - def _llm_type(self) -> str: - """Return type of llm.""" - return "gradio_inference" - - def _call( - self, - prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> str: - # NOTE: prompt here has no prompt_type (e.g. human: bot:) prompt injection, - # so server should get prompt_type or '', not plain - # This is good, so gradio server can also handle stopping.py conditions - # this is different than TGI server that uses prompter to inject prompt_type prompting - stream_output = self.stream_output - gr_client = self.client - client_langchain_mode = 'Disabled' - client_add_chat_history_to_context = True - client_add_search_to_context = False - client_chat_conversation = [] - client_langchain_action = LangChainAction.QUERY.value - client_langchain_agents = [] - top_k_docs = 1 - chunk = True - chunk_size = 512 - client_kwargs = dict(instruction=prompt if self.chat_client else '', # only for chat=True - iinput=self.iinput if self.chat_client else '', # only for chat=True - context=self.context, - # streaming output is supported, loops over and outputs each generation in streaming mode - # but leave stream_output=False for simple input/output mode - stream_output=stream_output, - prompt_type=self.prompter.prompt_type, - prompt_dict='', - - temperature=self.temperature, - top_p=self.top_p, - top_k=self.top_k, - num_beams=self.num_beams, - max_new_tokens=self.max_new_tokens, - min_new_tokens=self.min_new_tokens, - early_stopping=self.early_stopping, - max_time=self.max_time, - repetition_penalty=self.repetition_penalty, - num_return_sequences=self.num_return_sequences, - do_sample=self.do_sample, - chat=self.chat_client, - - instruction_nochat=prompt if not self.chat_client else '', - iinput_nochat=self.iinput if not self.chat_client else '', - langchain_mode=client_langchain_mode, - add_chat_history_to_context=client_add_chat_history_to_context, - langchain_action=client_langchain_action, - langchain_agents=client_langchain_agents, - top_k_docs=top_k_docs, - chunk=chunk, - chunk_size=chunk_size, - document_subset=DocumentSubset.Relevant.name, - document_choice=[DocumentChoice.ALL.value], - pre_prompt_query=None, - prompt_query=None, - pre_prompt_summary=None, - prompt_summary=None, - system_prompt=self.system_prompt, - image_loaders=None, # don't need to further do doc specific things - pdf_loaders=None, # don't need to further do doc specific things - url_loaders=None, # don't need to further do doc specific things - jq_schema=None, # don't need to further do doc specific things - visible_models=self.visible_models, - h2ogpt_key=self.h2ogpt_key, - add_search_to_context=client_add_search_to_context, - chat_conversation=client_chat_conversation, - text_context_list=None, - docs_ordering_type=None, - min_max_new_tokens=self.min_max_new_tokens, - ) - api_name = '/submit_nochat_api' # NOTE: like submit_nochat but stable API for string dict passing - self.count_input_tokens += self.get_num_tokens(prompt) - - if not stream_output: - res = gr_client.predict(str(dict(client_kwargs)), api_name=api_name) - res_dict = ast.literal_eval(res) - text = res_dict['response'] - ret = self.prompter.get_response(prompt + text, prompt=prompt, - sanitize_bot_response=self.sanitize_bot_response) - self.count_output_tokens += self.get_num_tokens(ret) - return ret - else: - text_callback = None - if run_manager: - text_callback = partial( - run_manager.on_llm_new_token, verbose=self.verbose - ) - - job = gr_client.submit(str(dict(client_kwargs)), api_name=api_name) - text0 = '' - while not job.done(): - if job.communicator.job.latest_status.code.name == 'FINISHED': - break - e = job.future._exception - if e is not None: - break - outputs_list = job.communicator.job.outputs - if outputs_list: - res = job.communicator.job.outputs[-1] - res_dict = ast.literal_eval(res) - text = res_dict['response'] - text = self.prompter.get_response(prompt + text, prompt=prompt, - sanitize_bot_response=self.sanitize_bot_response) - # FIXME: derive chunk from full for now - text_chunk = text[len(text0):] - if not text_chunk: - continue - # save old - text0 = text - - if text_callback: - text_callback(text_chunk) - - time.sleep(0.01) - - # ensure get last output to avoid race - res_all = job.outputs() - if len(res_all) > 0: - res = res_all[-1] - res_dict = ast.literal_eval(res) - text = res_dict['response'] - # FIXME: derive chunk from full for now - else: - # go with old if failure - text = text0 - text_chunk = text[len(text0):] - if text_callback: - text_callback(text_chunk) - ret = self.prompter.get_response(prompt + text, prompt=prompt, - sanitize_bot_response=self.sanitize_bot_response) - self.count_output_tokens += self.get_num_tokens(ret) - return ret - - def get_token_ids(self, text: str) -> List[int]: - return self.tokenizer.encode(text) - # avoid base method that is not aware of how to properly tokenize (uses GPT2) - # return _get_token_ids_default_method(text) - - -class H2OHuggingFaceTextGenInference(HuggingFaceTextGenInference): - max_new_tokens: int = 512 - do_sample: bool = False - top_k: Optional[int] = None - top_p: Optional[float] = 0.95 - typical_p: Optional[float] = 0.95 - temperature: float = 0.8 - repetition_penalty: Optional[float] = None - return_full_text: bool = False - stop_sequences: List[str] = Field(default_factory=list) - seed: Optional[int] = None - inference_server_url: str = "" - timeout: int = 300 - headers: dict = None - stream_output: bool = False - sanitize_bot_response: bool = False - prompter: Any = None - context: Any = '' - iinput: Any = '' - tokenizer: Any = None - async_sem: Any = None - count_input_tokens: Any = 0 - count_output_tokens: Any = 0 - - def _call( - self, - prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> str: - if stop is None: - stop = self.stop_sequences.copy() - else: - stop += self.stop_sequences.copy() - stop_tmp = stop.copy() - stop = [] - [stop.append(x) for x in stop_tmp if x not in stop] - - # HF inference server needs control over input tokens - assert self.tokenizer is not None - from h2oai_pipeline import H2OTextGenerationPipeline - prompt, num_prompt_tokens = H2OTextGenerationPipeline.limit_prompt(prompt, self.tokenizer) - - # NOTE: TGI server does not add prompting, so must do here - data_point = dict(context=self.context, instruction=prompt, input=self.iinput) - prompt = self.prompter.generate_prompt(data_point) - self.count_input_tokens += self.get_num_tokens(prompt) - - gen_server_kwargs = dict(do_sample=self.do_sample, - stop_sequences=stop, - max_new_tokens=self.max_new_tokens, - top_k=self.top_k, - top_p=self.top_p, - typical_p=self.typical_p, - temperature=self.temperature, - repetition_penalty=self.repetition_penalty, - return_full_text=self.return_full_text, - seed=self.seed, - ) - gen_server_kwargs.update(kwargs) - - # lower bound because client is re-used if multi-threading - self.client.timeout = max(300, self.timeout) - - if not self.stream_output: - res = self.client.generate( - prompt, - **gen_server_kwargs, - ) - if self.return_full_text: - gen_text = res.generated_text[len(prompt):] - else: - gen_text = res.generated_text - # remove stop sequences from the end of the generated text - for stop_seq in stop: - if stop_seq in gen_text: - gen_text = gen_text[:gen_text.index(stop_seq)] - text = prompt + gen_text - text = self.prompter.get_response(text, prompt=prompt, - sanitize_bot_response=self.sanitize_bot_response) - else: - text_callback = None - if run_manager: - text_callback = partial( - run_manager.on_llm_new_token, verbose=self.verbose - ) - # parent handler of streamer expects to see prompt first else output="" and lose if prompt=None in prompter - if text_callback: - text_callback(prompt) - text = "" - # Note: Streaming ignores return_full_text=True - for response in self.client.generate_stream(prompt, **gen_server_kwargs): - text_chunk = response.token.text - text += text_chunk - text = self.prompter.get_response(prompt + text, prompt=prompt, - sanitize_bot_response=self.sanitize_bot_response) - # stream part - is_stop = False - for stop_seq in stop: - if stop_seq in text_chunk: - is_stop = True - break - if is_stop: - break - if not response.token.special: - if text_callback: - text_callback(text_chunk) - self.count_output_tokens += self.get_num_tokens(text) - return text - - async def _acall( - self, - prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> str: - # print("acall", flush=True) - if stop is None: - stop = self.stop_sequences.copy() - else: - stop += self.stop_sequences.copy() - stop_tmp = stop.copy() - stop = [] - [stop.append(x) for x in stop_tmp if x not in stop] - - # HF inference server needs control over input tokens - assert self.tokenizer is not None - from h2oai_pipeline import H2OTextGenerationPipeline - prompt, num_prompt_tokens = H2OTextGenerationPipeline.limit_prompt(prompt, self.tokenizer) - - # NOTE: TGI server does not add prompting, so must do here - data_point = dict(context=self.context, instruction=prompt, input=self.iinput) - prompt = self.prompter.generate_prompt(data_point) - - gen_text = await super()._acall(prompt, stop=stop, run_manager=run_manager, **kwargs) - - # remove stop sequences from the end of the generated text - for stop_seq in stop: - if stop_seq in gen_text: - gen_text = gen_text[:gen_text.index(stop_seq)] - text = prompt + gen_text - text = self.prompter.get_response(text, prompt=prompt, - sanitize_bot_response=self.sanitize_bot_response) - # print("acall done", flush=True) - return text - - async def _agenerate( - self, - prompts: List[str], - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> LLMResult: - """Run the LLM on the given prompt and input.""" - generations = [] - new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager") - self.count_input_tokens += sum([self.get_num_tokens(prompt) for prompt in prompts]) - tasks = [ - asyncio.ensure_future(self._agenerate_one(prompt, stop=stop, run_manager=run_manager, - new_arg_supported=new_arg_supported, **kwargs)) - for prompt in prompts - ] - texts = await asyncio.gather(*tasks) - self.count_output_tokens += sum([self.get_num_tokens(text) for text in texts]) - [generations.append([Generation(text=text)]) for text in texts] - return LLMResult(generations=generations) - - async def _agenerate_one( - self, - prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, - new_arg_supported=None, - **kwargs: Any, - ) -> str: - async with self.async_sem: # semaphore limits num of simultaneous downloads - return await self._acall(prompt, stop=stop, run_manager=run_manager, **kwargs) \ - if new_arg_supported else \ - await self._acall(prompt, stop=stop, **kwargs) - - def get_token_ids(self, text: str) -> List[int]: - return self.tokenizer.encode(text) - # avoid base method that is not aware of how to properly tokenize (uses GPT2) - # return _get_token_ids_default_method(text) - - -from langchain.chat_models import ChatOpenAI, AzureChatOpenAI -from langchain.llms import OpenAI, AzureOpenAI, Replicate -from langchain.llms.openai import _streaming_response_template, completion_with_retry, _update_response, \ - update_token_usage - - -class H2OOpenAI(OpenAI): - """ - New class to handle vLLM's use of OpenAI, no vllm_chat supported, so only need here - Handles prompting that OpenAI doesn't need, stopping as well - """ - stop_sequences: Any = None - sanitize_bot_response: bool = False - prompter: Any = None - context: Any = '' - iinput: Any = '' - tokenizer: Any = None - - @classmethod - def _all_required_field_names(cls) -> Set: - _all_required_field_names = super(OpenAI, cls)._all_required_field_names() - _all_required_field_names.update( - {'top_p', 'frequency_penalty', 'presence_penalty', 'stop_sequences', 'sanitize_bot_response', 'prompter', - 'tokenizer', 'logit_bias'}) - return _all_required_field_names - - def _generate( - self, - prompts: List[str], - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> LLMResult: - stop_tmp = self.stop_sequences if not stop else self.stop_sequences + stop - stop = [] - [stop.append(x) for x in stop_tmp if x not in stop] - - # HF inference server needs control over input tokens - assert self.tokenizer is not None - from h2oai_pipeline import H2OTextGenerationPipeline - for prompti, prompt in enumerate(prompts): - prompt, num_prompt_tokens = H2OTextGenerationPipeline.limit_prompt(prompt, self.tokenizer) - # NOTE: OpenAI/vLLM server does not add prompting, so must do here - data_point = dict(context=self.context, instruction=prompt, input=self.iinput) - prompt = self.prompter.generate_prompt(data_point) - prompts[prompti] = prompt - - params = self._invocation_params - params = {**params, **kwargs} - sub_prompts = self.get_sub_prompts(params, prompts, stop) - choices = [] - token_usage: Dict[str, int] = {} - # Get the token usage from the response. - # Includes prompt, completion, and total tokens used. - _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} - text = '' - for _prompts in sub_prompts: - if self.streaming: - text_with_prompt = "" - prompt = _prompts[0] - if len(_prompts) > 1: - raise ValueError("Cannot stream results with multiple prompts.") - params["stream"] = True - response = _streaming_response_template() - first = True - for stream_resp in completion_with_retry( - self, prompt=_prompts, **params - ): - if first: - stream_resp["choices"][0]["text"] = prompt + stream_resp["choices"][0]["text"] - first = False - text_chunk = stream_resp["choices"][0]["text"] - text_with_prompt += text_chunk - text = self.prompter.get_response(text_with_prompt, prompt=prompt, - sanitize_bot_response=self.sanitize_bot_response) - if run_manager: - run_manager.on_llm_new_token( - text_chunk, - verbose=self.verbose, - logprobs=stream_resp["choices"][0]["logprobs"], - ) - _update_response(response, stream_resp) - choices.extend(response["choices"]) - else: - response = completion_with_retry(self, prompt=_prompts, **params) - choices.extend(response["choices"]) - if not self.streaming: - # Can't update token usage if streaming - update_token_usage(_keys, response, token_usage) - if self.streaming: - choices[0]['text'] = text - return self.create_llm_result(choices, prompts, token_usage) - - def get_token_ids(self, text: str) -> List[int]: - if self.tokenizer is not None: - return self.tokenizer.encode(text) - else: - # OpenAI uses tiktoken - return super().get_token_ids(text) - - -class H2OReplicate(Replicate): - stop_sequences: Any = None - sanitize_bot_response: bool = False - prompter: Any = None - context: Any = '' - iinput: Any = '' - tokenizer: Any = None - - def _call( - self, - prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> str: - """Call to replicate endpoint.""" - stop_tmp = self.stop_sequences if not stop else self.stop_sequences + stop - stop = [] - [stop.append(x) for x in stop_tmp if x not in stop] - - # HF inference server needs control over input tokens - assert self.tokenizer is not None - from h2oai_pipeline import H2OTextGenerationPipeline - prompt, num_prompt_tokens = H2OTextGenerationPipeline.limit_prompt(prompt, self.tokenizer) - # Note Replicate handles the prompting of the specific model - return super()._call(prompt, stop=stop, run_manager=run_manager, **kwargs) - - def get_token_ids(self, text: str) -> List[int]: - return self.tokenizer.encode(text) - # avoid base method that is not aware of how to properly tokenize (uses GPT2) - # return _get_token_ids_default_method(text) - - -class H2OChatOpenAI(ChatOpenAI): - @classmethod - def _all_required_field_names(cls) -> Set: - _all_required_field_names = super(ChatOpenAI, cls)._all_required_field_names() - _all_required_field_names.update({'top_p', 'frequency_penalty', 'presence_penalty', 'logit_bias'}) - return _all_required_field_names - - -class H2OAzureChatOpenAI(AzureChatOpenAI): - @classmethod - def _all_required_field_names(cls) -> Set: - _all_required_field_names = super(AzureChatOpenAI, cls)._all_required_field_names() - _all_required_field_names.update({'top_p', 'frequency_penalty', 'presence_penalty', 'logit_bias'}) - return _all_required_field_names - - -class H2OAzureOpenAI(AzureOpenAI): - @classmethod - def _all_required_field_names(cls) -> Set: - _all_required_field_names = super(AzureOpenAI, cls)._all_required_field_names() - _all_required_field_names.update({'top_p', 'frequency_penalty', 'presence_penalty', 'logit_bias'}) - return _all_required_field_names - - -class H2OHuggingFacePipeline(HuggingFacePipeline): - def _call( - self, - prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> str: - response = self.pipeline(prompt, stop=stop) - if self.pipeline.task == "text-generation": - # Text generation return includes the starter text. - text = response[0]["generated_text"][len(prompt):] - elif self.pipeline.task == "text2text-generation": - text = response[0]["generated_text"] - elif self.pipeline.task == "summarization": - text = response[0]["summary_text"] - else: - raise ValueError( - f"Got invalid task {self.pipeline.task}, " - f"currently only {VALID_TASKS} are supported" - ) - if stop: - # This is a bit hacky, but I can't figure out a better way to enforce - # stop tokens when making calls to huggingface_hub. - text = enforce_stop_tokens(text, stop) - return text - - -def get_llm(use_openai_model=False, - model_name=None, - model=None, - tokenizer=None, - inference_server=None, - langchain_only_model=None, - stream_output=False, - async_output=True, - num_async=3, - do_sample=False, - temperature=0.1, - top_k=40, - top_p=0.7, - num_beams=1, - max_new_tokens=512, - min_new_tokens=1, - early_stopping=False, - max_time=180, - repetition_penalty=1.0, - num_return_sequences=1, - prompt_type=None, - prompt_dict=None, - prompter=None, - context=None, - iinput=None, - sanitize_bot_response=False, - system_prompt='', - visible_models=0, - h2ogpt_key=None, - min_max_new_tokens=None, - n_jobs=None, - cli=False, - llamacpp_dict=None, - verbose=False, - ): - # currently all but h2oai_pipeline case return prompt + new text, but could change - only_new_text = False - - if n_jobs in [None, -1]: - n_jobs = int(os.getenv('OMP_NUM_THREADS', str(os.cpu_count() // 2))) - if inference_server is None: - inference_server = '' - if inference_server.startswith('replicate'): - model_string = ':'.join(inference_server.split(':')[1:]) - if 'meta/llama' in model_string: - temperature = max(0.01, temperature if do_sample else 0) - else: - temperature =temperature if do_sample else 0 - gen_kwargs = dict(temperature=temperature, - seed=1234, - max_length=max_new_tokens, # langchain - max_new_tokens=max_new_tokens, # replicate docs - top_p=top_p if do_sample else 1, - top_k=top_k, # not always supported - repetition_penalty=repetition_penalty) - if system_prompt in [None, 'None', 'auto']: - if prompter.system_prompt: - system_prompt = prompter.system_prompt - else: - system_prompt = '' - if system_prompt: - gen_kwargs.update(dict(system_prompt=system_prompt)) - - # replicate handles prompting, so avoid get_response() filter - prompter.prompt_type = 'plain' - if stream_output: - callbacks = [StreamingGradioCallbackHandler()] - streamer = callbacks[0] if stream_output else None - llm = H2OReplicate( - streaming=True, - callbacks=callbacks, - model=model_string, - input=gen_kwargs, - stop=prompter.stop_sequences, - stop_sequences=prompter.stop_sequences, - sanitize_bot_response=sanitize_bot_response, - prompter=prompter, - context=context, - iinput=iinput, - tokenizer=tokenizer, - ) - else: - streamer = None - llm = H2OReplicate( - model=model_string, - input=gen_kwargs, - stop=prompter.stop_sequences, - stop_sequences=prompter.stop_sequences, - sanitize_bot_response=sanitize_bot_response, - prompter=prompter, - context=context, - iinput=iinput, - tokenizer=tokenizer, - ) - elif use_openai_model or inference_server.startswith('openai') or inference_server.startswith('vllm'): - if use_openai_model and model_name is None: - model_name = "gpt-3.5-turbo" - # FIXME: Will later import be ignored? I think so, so should be fine - openai, inf_type, deployment_name, base_url, api_version = set_openai(inference_server) - kwargs_extra = {} - if inf_type == 'openai_chat' or inf_type == 'vllm_chat': - cls = H2OChatOpenAI - # FIXME: Support context, iinput - # if inf_type == 'vllm_chat': - # kwargs_extra.update(dict(tokenizer=tokenizer)) - openai_api_key = openai.api_key - elif inf_type == 'openai_azure_chat': - cls = H2OAzureChatOpenAI - kwargs_extra.update(dict(openai_api_type='azure')) - # FIXME: Support context, iinput - if os.getenv('OPENAI_AZURE_KEY') is not None: - openai_api_key = os.getenv('OPENAI_AZURE_KEY') - else: - openai_api_key = openai.api_key - elif inf_type == 'openai_azure': - cls = H2OAzureOpenAI - kwargs_extra.update(dict(openai_api_type='azure')) - # FIXME: Support context, iinput - if os.getenv('OPENAI_AZURE_KEY') is not None: - openai_api_key = os.getenv('OPENAI_AZURE_KEY') - else: - openai_api_key = openai.api_key - else: - cls = H2OOpenAI - if inf_type == 'vllm': - kwargs_extra.update(dict(stop_sequences=prompter.stop_sequences, - sanitize_bot_response=sanitize_bot_response, - prompter=prompter, - context=context, - iinput=iinput, - tokenizer=tokenizer, - openai_api_base=openai.api_base, - client=None)) - else: - assert inf_type == 'openai' or use_openai_model - openai_api_key = openai.api_key - - if deployment_name: - kwargs_extra.update(dict(deployment_name=deployment_name)) - if api_version: - kwargs_extra.update(dict(openai_api_version=api_version)) - elif openai.api_version: - kwargs_extra.update(dict(openai_api_version=openai.api_version)) - elif inf_type in ['openai_azure', 'openai_azure_chat']: - kwargs_extra.update(dict(openai_api_version="2023-05-15")) - if base_url: - kwargs_extra.update(dict(openai_api_base=base_url)) - else: - kwargs_extra.update(dict(openai_api_base=openai.api_base)) - - callbacks = [StreamingGradioCallbackHandler()] - llm = cls(model_name=model_name, - temperature=temperature if do_sample else 0, - # FIXME: Need to count tokens and reduce max_new_tokens to fit like in generate.py - max_tokens=max_new_tokens, - top_p=top_p if do_sample else 1, - frequency_penalty=0, - presence_penalty=1.07 - repetition_penalty + 0.6, # so good default - callbacks=callbacks if stream_output else None, - openai_api_key=openai_api_key, - logit_bias=None if inf_type == 'vllm' else {}, - max_retries=6, - streaming=stream_output, - **kwargs_extra - ) - streamer = callbacks[0] if stream_output else None - if inf_type in ['openai', 'openai_chat', 'openai_azure', 'openai_azure_chat']: - prompt_type = inference_server - else: - # vllm goes here - prompt_type = prompt_type or 'plain' - elif inference_server and inference_server.startswith('sagemaker'): - callbacks = [StreamingGradioCallbackHandler()] # FIXME - streamer = None - - endpoint_name = ':'.join(inference_server.split(':')[1:2]) - region_name = ':'.join(inference_server.split(':')[2:]) - - from sagemaker import H2OSagemakerEndpoint, ChatContentHandler, BaseContentHandler - if inference_server.startswith('sagemaker_chat'): - content_handler = ChatContentHandler() - else: - content_handler = BaseContentHandler() - model_kwargs = dict(temperature=temperature if do_sample else 1E-10, - return_full_text=False, top_p=top_p, max_new_tokens=max_new_tokens) - llm = H2OSagemakerEndpoint( - endpoint_name=endpoint_name, - region_name=region_name, - aws_access_key_id=os.environ.get('AWS_ACCESS_KEY_ID'), - aws_secret_access_key=os.environ.get('AWS_SECRET_ACCESS_KEY'), - model_kwargs=model_kwargs, - content_handler=content_handler, - endpoint_kwargs={'CustomAttributes': 'accept_eula=true'}, - ) - elif inference_server: - assert inference_server.startswith( - 'http'), "Malformed inference_server=%s. Did you add http:// in front?" % inference_server - - from gradio_utils.grclient import GradioClient - from text_generation import Client as HFClient - if isinstance(model, GradioClient): - gr_client = model - hf_client = None - else: - gr_client = None - hf_client = model - assert isinstance(hf_client, HFClient) - - inference_server, headers = get_hf_server(inference_server) - - # quick sanity check to avoid long timeouts, just see if can reach server - requests.get(inference_server, timeout=int(os.getenv('REQUEST_TIMEOUT_FAST', '10'))) - callbacks = [StreamingGradioCallbackHandler()] - - if gr_client: - async_output = False # FIXME: not implemented yet - chat_client = False - llm = GradioInference( - inference_server_url=inference_server, - return_full_text=False, - - temperature=temperature, - top_p=top_p, - top_k=top_k, - num_beams=num_beams, - max_new_tokens=max_new_tokens, - min_new_tokens=min_new_tokens, - early_stopping=early_stopping, - max_time=max_time, - repetition_penalty=repetition_penalty, - num_return_sequences=num_return_sequences, - do_sample=do_sample, - chat_client=chat_client, - - callbacks=callbacks if stream_output else None, - stream_output=stream_output, - prompter=prompter, - context=context, - iinput=iinput, - client=gr_client, - sanitize_bot_response=sanitize_bot_response, - tokenizer=tokenizer, - system_prompt=system_prompt, - visible_models=visible_models, - h2ogpt_key=h2ogpt_key, - min_max_new_tokens=min_max_new_tokens, - ) - elif hf_client: - # no need to pass original client, no state and fast, so can use same validate_environment from base class - async_sem = asyncio.Semaphore(num_async) if async_output else NullContext() - llm = H2OHuggingFaceTextGenInference( - inference_server_url=inference_server, - do_sample=do_sample, - max_new_tokens=max_new_tokens, - repetition_penalty=repetition_penalty, - return_full_text=False, # this only controls internal behavior, still returns processed text - seed=SEED, - - stop_sequences=prompter.stop_sequences, - temperature=temperature, - top_k=top_k, - top_p=top_p, - # typical_p=top_p, - callbacks=callbacks if stream_output else None, - stream_output=stream_output, - prompter=prompter, - context=context, - iinput=iinput, - tokenizer=tokenizer, - timeout=max_time, - sanitize_bot_response=sanitize_bot_response, - async_sem=async_sem, - ) - else: - raise RuntimeError("No defined client") - streamer = callbacks[0] if stream_output else None - elif model_name in non_hf_types: - async_output = False # FIXME: not implemented yet - assert langchain_only_model - if model_name == 'llama': - callbacks = [StreamingGradioCallbackHandler()] - streamer = callbacks[0] if stream_output else None - else: - # stream_output = False - # doesn't stream properly as generator, but at least - callbacks = [streaming_stdout.StreamingStdOutCallbackHandler()] - streamer = None - if prompter: - prompt_type = prompter.prompt_type - else: - prompter = Prompter(prompt_type, prompt_dict, debug=False, chat=False, stream_output=stream_output) - pass # assume inputted prompt_type is correct - from gpt4all_llm import get_llm_gpt4all - max_max_tokens = tokenizer.model_max_length - llm = get_llm_gpt4all(model_name, - model=model, - max_new_tokens=max_new_tokens, - temperature=temperature, - repetition_penalty=repetition_penalty, - top_k=top_k, - top_p=top_p, - callbacks=callbacks, - n_jobs=n_jobs, - verbose=verbose, - streaming=stream_output, - prompter=prompter, - context=context, - iinput=iinput, - max_seq_len=max_max_tokens, - llamacpp_dict=llamacpp_dict, - ) - elif hasattr(model, 'is_exlama') and model.is_exlama(): - async_output = False # FIXME: not implemented yet - assert langchain_only_model - callbacks = [StreamingGradioCallbackHandler()] - streamer = callbacks[0] if stream_output else None - max_max_tokens = tokenizer.model_max_length - - from src.llm_exllama import Exllama - llm = Exllama(streaming=stream_output, - model_path=None, - model=model, - lora_path=None, - temperature=temperature, - top_k=top_k, - top_p=top_p, - typical=.7, - beams=1, - # beam_length = 40, - stop_sequences=prompter.stop_sequences, - callbacks=callbacks, - verbose=verbose, - max_seq_len=max_max_tokens, - fused_attn=False, - # alpha_value = 1.0, #For use with any models - # compress_pos_emb = 4.0, #For use with superhot - # set_auto_map = "3, 2" #Gpu split, this will split 3gigs/2gigs - prompter=prompter, - context=context, - iinput=iinput, - ) - else: - async_output = False # FIXME: not implemented yet - if model is None: - # only used if didn't pass model in - assert tokenizer is None - prompt_type = 'human_bot' - if model_name is None: - model_name = 'h2oai/h2ogpt-oasst1-512-12b' - # model_name = 'h2oai/h2ogpt-oig-oasst1-512-6_9b' - # model_name = 'h2oai/h2ogpt-oasst1-512-20b' - inference_server = '' - model, tokenizer, device = get_model(load_8bit=True, base_model=model_name, - inference_server=inference_server, gpu_id=0) - - max_max_tokens = tokenizer.model_max_length - only_new_text = True - gen_kwargs = dict(do_sample=do_sample, - num_beams=num_beams, - max_new_tokens=max_new_tokens, - min_new_tokens=min_new_tokens, - early_stopping=early_stopping, - max_time=max_time, - repetition_penalty=repetition_penalty, - num_return_sequences=num_return_sequences, - return_full_text=not only_new_text, - handle_long_generation=None) - if do_sample: - gen_kwargs.update(dict(temperature=temperature, - top_k=top_k, - top_p=top_p)) - assert len(set(gen_hyper).difference(gen_kwargs.keys())) == 0 - else: - assert len(set(gen_hyper0).difference(gen_kwargs.keys())) == 0 - - if stream_output: - skip_prompt = only_new_text - from gen import H2OTextIteratorStreamer - decoder_kwargs = {} - streamer = H2OTextIteratorStreamer(tokenizer, skip_prompt=skip_prompt, block=False, **decoder_kwargs) - gen_kwargs.update(dict(streamer=streamer)) - else: - streamer = None - - from h2oai_pipeline import H2OTextGenerationPipeline - pipe = H2OTextGenerationPipeline(model=model, use_prompter=True, - prompter=prompter, - context=context, - iinput=iinput, - prompt_type=prompt_type, - prompt_dict=prompt_dict, - sanitize_bot_response=sanitize_bot_response, - chat=False, stream_output=stream_output, - tokenizer=tokenizer, - # leave some room for 1 paragraph, even if min_new_tokens=0 - max_input_tokens=max_max_tokens - max(min_new_tokens, 256), - base_model=model_name, - **gen_kwargs) - # pipe.task = "text-generation" - # below makes it listen only to our prompt removal, - # not built in prompt removal that is less general and not specific for our model - pipe.task = "text2text-generation" - - llm = H2OHuggingFacePipeline(pipeline=pipe) - return llm, model_name, streamer, prompt_type, async_output, only_new_text - - -def get_device_dtype(): - # torch.device("cuda") leads to cuda:x cuda:y mismatches for multi-GPU consistently - import torch - n_gpus = torch.cuda.device_count() if torch.cuda.is_available else 0 - device = 'cpu' if n_gpus == 0 else 'cuda' - # from utils import NullContext - # context_class = NullContext if n_gpus > 1 or n_gpus == 0 else context_class - context_class = torch.device - torch_dtype = torch.float16 if device == 'cuda' else torch.float32 - return device, torch_dtype, context_class - - -def get_wiki_data(title, first_paragraph_only, text_limit=None, take_head=True): - """ - Get wikipedia data from online - :param title: - :param first_paragraph_only: - :param text_limit: - :param take_head: - :return: - """ - filename = 'wiki_%s_%s_%s_%s.data' % (first_paragraph_only, title, text_limit, take_head) - url = f"https://en.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&explaintext=1&titles={title}" - if first_paragraph_only: - url += "&exintro=1" - import json - if not os.path.isfile(filename): - data = requests.get(url).json() - json.dump(data, open(filename, 'wt')) - else: - data = json.load(open(filename, "rt")) - page_content = list(data["query"]["pages"].values())[0]["extract"] - if take_head is not None and text_limit is not None: - page_content = page_content[:text_limit] if take_head else page_content[-text_limit:] - title_url = str(title).replace(' ', '_') - return Document( - page_content=str(page_content), - metadata={"source": f"https://en.wikipedia.org/wiki/{title_url}"}, - ) - - -def get_wiki_sources(first_para=True, text_limit=None): - """ - Get specific named sources from wikipedia - :param first_para: - :param text_limit: - :return: - """ - default_wiki_sources = ['Unix', 'Microsoft_Windows', 'Linux'] - wiki_sources = list(os.getenv('WIKI_SOURCES', default_wiki_sources)) - return [get_wiki_data(x, first_para, text_limit=text_limit) for x in wiki_sources] - - -def get_github_docs(repo_owner, repo_name): - """ - Access github from specific repo - :param repo_owner: - :param repo_name: - :return: - """ - with tempfile.TemporaryDirectory() as d: - subprocess.check_call( - f"git clone --depth 1 https://github.com/{repo_owner}/{repo_name}.git .", - cwd=d, - shell=True, - ) - git_sha = ( - subprocess.check_output("git rev-parse HEAD", shell=True, cwd=d) - .decode("utf-8") - .strip() - ) - repo_path = pathlib.Path(d) - markdown_files = list(repo_path.glob("*/*.md")) + list( - repo_path.glob("*/*.mdx") - ) - for markdown_file in markdown_files: - with open(markdown_file, "r") as f: - relative_path = markdown_file.relative_to(repo_path) - github_url = f"https://github.com/{repo_owner}/{repo_name}/blob/{git_sha}/{relative_path}" - yield Document(page_content=str(f.read()), metadata={"source": github_url}) - - -def get_dai_pickle(dest="."): - from huggingface_hub import hf_hub_download - # True for case when locally already logged in with correct token, so don't have to set key - token = os.getenv('HUGGING_FACE_HUB_TOKEN', True) - path_to_zip_file = hf_hub_download('h2oai/dai_docs', 'dai_docs.pickle', token=token, repo_type='dataset') - shutil.copy(path_to_zip_file, dest) - - -def get_dai_docs(from_hf=False, get_pickle=True): - """ - Consume DAI documentation, or consume from public pickle - :param from_hf: get DAI docs from HF, then generate pickle for later use by LangChain - :param get_pickle: Avoid raw DAI docs, just get pickle directly from HF - :return: - """ - import pickle - - if get_pickle: - get_dai_pickle() - - dai_store = 'dai_docs.pickle' - dst = "working_dir_docs" - if not os.path.isfile(dai_store): - from create_data import setup_dai_docs - dst = setup_dai_docs(dst=dst, from_hf=from_hf) - - import glob - files = list(glob.glob(os.path.join(dst, '*rst'), recursive=True)) - - basedir = os.path.abspath(os.getcwd()) - from create_data import rst_to_outputs - new_outputs = rst_to_outputs(files) - os.chdir(basedir) - - pickle.dump(new_outputs, open(dai_store, 'wb')) - else: - new_outputs = pickle.load(open(dai_store, 'rb')) - - sources = [] - for line, file in new_outputs: - # gradio requires any linked file to be with app.py - sym_src = os.path.abspath(os.path.join(dst, file)) - sym_dst = os.path.abspath(os.path.join(os.getcwd(), file)) - if os.path.lexists(sym_dst): - os.remove(sym_dst) - os.symlink(sym_src, sym_dst) - itm = Document(page_content=str(line), metadata={"source": file}) - # NOTE: yield has issues when going into db, loses metadata - # yield itm - sources.append(itm) - return sources - - -def get_supported_types(): - non_image_types0 = ["pdf", "txt", "csv", "toml", "py", "rst", "xml", "rtf", - "md", - "html", "mhtml", "htm", - "enex", "eml", "epub", "odt", "pptx", "ppt", - "zip", - "gz", - "gzip", - "urls", - ] - # "msg", GPL3 - - video_types0 = ['WEBM', - 'MPG', 'MP2', 'MPEG', 'MPE', '.PV', - 'OGG', - 'MP4', 'M4P', 'M4V', - 'AVI', 'WMV', - 'MOV', 'QT', - 'FLV', 'SWF', - 'AVCHD'] - video_types0 = [x.lower() for x in video_types0] - if have_pillow: - from PIL import Image - exts = Image.registered_extensions() - image_types0 = {ex for ex, f in exts.items() if f in Image.OPEN if ex not in video_types0 + non_image_types0} - image_types0 = sorted(image_types0) - image_types0 = [x[1:] if x.startswith('.') else x for x in image_types0] - else: - image_types0 = [] - return non_image_types0, image_types0, video_types0 - - -non_image_types, image_types, video_types = get_supported_types() -set_image_types = set(image_types) - -if have_libreoffice or True: - # or True so it tries to load, e.g. on MAC/Windows, even if don't have libreoffice since works without that - non_image_types.extend(["docx", "doc", "xls", "xlsx"]) -if have_jq: - non_image_types.extend(["json", "jsonl"]) - -file_types = non_image_types + image_types - - -def try_as_html(file): - # try treating as html as occurs when scraping websites - from bs4 import BeautifulSoup - with open(file, "rt") as f: - try: - is_html = bool(BeautifulSoup(f.read(), "html.parser").find()) - except: # FIXME - is_html = False - if is_html: - file_url = 'file://' + file - doc1 = UnstructuredURLLoader(urls=[file_url]).load() - doc1 = [x for x in doc1 if x.page_content] - else: - doc1 = [] - return doc1 - - -def json_metadata_func(record: dict, metadata: dict) -> dict: - # Define the metadata extraction function. - - if isinstance(record, dict): - metadata["sender_name"] = record.get("sender_name") - metadata["timestamp_ms"] = record.get("timestamp_ms") - - if "source" in metadata: - metadata["source_json"] = metadata['source'] - if "seq_num" in metadata: - metadata["seq_num_json"] = metadata['seq_num'] - - return metadata - - -def file_to_doc(file, - filei=0, - base_path=None, verbose=False, fail_any_exception=False, - chunk=True, chunk_size=512, n_jobs=-1, - is_url=False, is_txt=False, - - # urls - use_unstructured=True, - use_playwright=False, - use_selenium=False, - - # pdfs - use_pymupdf='auto', - use_unstructured_pdf='auto', - use_pypdf='auto', - enable_pdf_ocr='auto', - try_pdf_as_html='auto', - enable_pdf_doctr='auto', - - # images - enable_ocr=False, - enable_doctr=False, - enable_pix2struct=False, - enable_captions=True, - captions_model=None, - model_loaders=None, - - # json - jq_schema='.[]', - - headsize=50, # see also H2OSerpAPIWrapper - db_type=None, - selected_file_types=None): - assert isinstance(model_loaders, dict) - if selected_file_types is not None: - set_image_types1 = set_image_types.intersection(set(selected_file_types)) - else: - set_image_types1 = set_image_types - - assert db_type is not None - chunk_sources = functools.partial(_chunk_sources, chunk=chunk, chunk_size=chunk_size, db_type=db_type) - add_meta = functools.partial(_add_meta, headsize=headsize, filei=filei) - # FIXME: if zip, file index order will not be correct if other files involved - path_to_docs_func = functools.partial(path_to_docs, - verbose=verbose, - fail_any_exception=fail_any_exception, - n_jobs=n_jobs, - chunk=chunk, chunk_size=chunk_size, - # url=file if is_url else None, - # text=file if is_txt else None, - - # urls - use_unstructured=use_unstructured, - use_playwright=use_playwright, - use_selenium=use_selenium, - - # pdfs - use_pymupdf=use_pymupdf, - use_unstructured_pdf=use_unstructured_pdf, - use_pypdf=use_pypdf, - enable_pdf_ocr=enable_pdf_ocr, - enable_pdf_doctr=enable_pdf_doctr, - try_pdf_as_html=try_pdf_as_html, - - # images - enable_ocr=enable_ocr, - enable_doctr=enable_doctr, - enable_pix2struct=enable_pix2struct, - enable_captions=enable_captions, - captions_model=captions_model, - - caption_loader=model_loaders['caption'], - doctr_loader=model_loaders['doctr'], - pix2struct_loader=model_loaders['pix2struct'], - - # json - jq_schema=jq_schema, - - db_type=db_type, - ) - - if file is None: - if fail_any_exception: - raise RuntimeError("Unexpected None file") - else: - return [] - doc1 = [] # in case no support, or disabled support - if base_path is None and not is_txt and not is_url: - # then assume want to persist but don't care which path used - # can't be in base_path - dir_name = os.path.dirname(file) - base_name = os.path.basename(file) - # if from gradio, will have its own temp uuid too, but that's ok - base_name = sanitize_filename(base_name) + "_" + str(uuid.uuid4())[:10] - base_path = os.path.join(dir_name, base_name) - if is_url: - file = file.strip() # in case accidental spaces in front or at end - file_lower = file.lower() - case1 = file_lower.startswith('arxiv:') and len(file_lower.split('arxiv:')) == 2 - case2 = file_lower.startswith('https://arxiv.org/abs') and len(file_lower.split('https://arxiv.org/abs')) == 2 - case3 = file_lower.startswith('http://arxiv.org/abs') and len(file_lower.split('http://arxiv.org/abs')) == 2 - case4 = file_lower.startswith('arxiv.org/abs/') and len(file_lower.split('arxiv.org/abs/')) == 2 - if case1 or case2 or case3 or case4: - if case1: - query = file.lower().split('arxiv:')[1].strip() - elif case2: - query = file.lower().split('https://arxiv.org/abs/')[1].strip() - elif case2: - query = file.lower().split('http://arxiv.org/abs/')[1].strip() - elif case3: - query = file.lower().split('arxiv.org/abs/')[1].strip() - else: - raise RuntimeError("Unexpected arxiv error for %s" % file) - if have_arxiv: - trials = 3 - docs1 = [] - for trial in range(trials): - try: - docs1 = ArxivLoader(query=query, load_max_docs=20, load_all_available_meta=True).load() - break - except urllib.error.URLError: - pass - if not docs1: - print("Failed to get arxiv %s" % query, flush=True) - # ensure string, sometimes None - [[x.metadata.update({k: str(v)}) for k, v in x.metadata.items()] for x in docs1] - query_url = f"https://arxiv.org/abs/{query}" - [x.metadata.update( - dict(source=x.metadata.get('entry_id', query_url), query=query_url, - input_type='arxiv', head=x.metadata.get('Title', ''), date=str(datetime.now))) for x in - docs1] - else: - docs1 = [] - else: - if not (file.startswith("http://") or file.startswith("file://") or file.startswith("https://")): - file = 'http://' + file - docs1 = [] - do_unstructured = only_unstructured_urls or use_unstructured - if only_selenium or only_playwright: - do_unstructured = False - do_playwright = have_playwright and (use_playwright or only_playwright) - if only_unstructured_urls or only_selenium: - do_playwright = False - do_selenium = have_selenium and (use_selenium or only_selenium) - if only_unstructured_urls or only_playwright: - do_selenium = False - if do_unstructured or use_unstructured: - docs1a = UnstructuredURLLoader(urls=[file]).load() - docs1a = [x for x in docs1a if x.page_content] - add_parser(docs1a, 'UnstructuredURLLoader') - docs1.extend(docs1a) - if len(docs1) == 0 and have_playwright or do_playwright: - # then something went wrong, try another loader: - from langchain.document_loaders import PlaywrightURLLoader - docs1a = asyncio.run(PlaywrightURLLoader(urls=[file]).aload()) - # docs1 = PlaywrightURLLoader(urls=[file]).load() - docs1a = [x for x in docs1a if x.page_content] - add_parser(docs1a, 'PlaywrightURLLoader') - docs1.extend(docs1a) - if len(docs1) == 0 and have_selenium or do_selenium: - # then something went wrong, try another loader: - # but requires Chrome binary, else get: selenium.common.exceptions.WebDriverException: - # Message: unknown error: cannot find Chrome binary - from langchain.document_loaders import SeleniumURLLoader - from selenium.common.exceptions import WebDriverException - try: - docs1a = SeleniumURLLoader(urls=[file]).load() - docs1a = [x for x in docs1a if x.page_content] - add_parser(docs1a, 'SeleniumURLLoader') - docs1.extend(docs1a) - except WebDriverException as e: - print("No web driver: %s" % str(e), flush=True) - [x.metadata.update(dict(input_type='url', date=str(datetime.now))) for x in docs1] - add_meta(docs1, file, parser="is_url") - docs1 = clean_doc(docs1) - doc1 = chunk_sources(docs1) - elif is_txt: - base_path = "user_paste" - base_path = makedirs(base_path, exist_ok=True, tmp_ok=True, use_base=True) - source_file = os.path.join(base_path, "_%s" % str(uuid.uuid4())[:10]) - with open(source_file, "wt") as f: - f.write(file) - metadata = dict(source=source_file, date=str(datetime.now()), input_type='pasted txt') - doc1 = Document(page_content=str(file), metadata=metadata) - add_meta(doc1, file, parser="f.write") - # Bit odd to change if was original text - # doc1 = clean_doc(doc1) - elif file.lower().endswith('.html') or file.lower().endswith('.mhtml') or file.lower().endswith('.htm'): - docs1 = UnstructuredHTMLLoader(file_path=file).load() - add_meta(docs1, file, parser='UnstructuredHTMLLoader') - docs1 = clean_doc(docs1) - doc1 = chunk_sources(docs1, language=Language.HTML) - elif (file.lower().endswith('.docx') or file.lower().endswith('.doc')) and (have_libreoffice or True): - docs1 = UnstructuredWordDocumentLoader(file_path=file).load() - add_meta(docs1, file, parser='UnstructuredWordDocumentLoader') - doc1 = chunk_sources(docs1) - elif (file.lower().endswith('.xlsx') or file.lower().endswith('.xls')) and (have_libreoffice or True): - docs1 = UnstructuredExcelLoader(file_path=file).load() - add_meta(docs1, file, parser='UnstructuredExcelLoader') - doc1 = chunk_sources(docs1) - elif file.lower().endswith('.odt'): - docs1 = UnstructuredODTLoader(file_path=file).load() - add_meta(docs1, file, parser='UnstructuredODTLoader') - doc1 = chunk_sources(docs1) - elif file.lower().endswith('pptx') or file.lower().endswith('ppt'): - docs1 = UnstructuredPowerPointLoader(file_path=file).load() - add_meta(docs1, file, parser='UnstructuredPowerPointLoader') - docs1 = clean_doc(docs1) - doc1 = chunk_sources(docs1) - elif file.lower().endswith('.txt'): - # use UnstructuredFileLoader ? - docs1 = TextLoader(file, encoding="utf8", autodetect_encoding=True).load() - # makes just one, but big one - doc1 = chunk_sources(docs1) - # Bit odd to change if was original text - # doc1 = clean_doc(doc1) - add_meta(doc1, file, parser='TextLoader') - elif file.lower().endswith('.rtf'): - docs1 = UnstructuredRTFLoader(file).load() - add_meta(docs1, file, parser='UnstructuredRTFLoader') - doc1 = chunk_sources(docs1) - elif file.lower().endswith('.md'): - docs1 = UnstructuredMarkdownLoader(file).load() - add_meta(docs1, file, parser='UnstructuredMarkdownLoader') - docs1 = clean_doc(docs1) - doc1 = chunk_sources(docs1, language=Language.MARKDOWN) - elif file.lower().endswith('.enex'): - docs1 = EverNoteLoader(file).load() - add_meta(doc1, file, parser='EverNoteLoader') - doc1 = chunk_sources(docs1) - elif file.lower().endswith('.epub'): - docs1 = UnstructuredEPubLoader(file).load() - add_meta(docs1, file, parser='UnstructuredEPubLoader') - doc1 = chunk_sources(docs1) - elif any(file.lower().endswith(x) for x in set_image_types1): - docs1 = [] - if verbose: - print("BEGIN: Tesseract", flush=True) - if have_tesseract and enable_ocr: - # OCR, somewhat works, but not great - docs1a = UnstructuredImageLoader(file, strategy='ocr_only').load() - # docs1a = UnstructuredImageLoader(file, strategy='hi_res').load() - docs1a = [x for x in docs1a if x.page_content] - add_meta(docs1a, file, parser='UnstructuredImageLoader') - docs1.extend(docs1a) - if verbose: - print("END: Tesseract", flush=True) - if have_doctr and enable_doctr: - if verbose: - print("BEGIN: DocTR", flush=True) - if model_loaders['doctr'] is not None and not isinstance(model_loaders['doctr'], (str, bool)): - if verbose: - print("Reuse DocTR", flush=True) - model_loaders['doctr'].load_model() - else: - if verbose: - print("Fresh DocTR", flush=True) - from image_doctr import H2OOCRLoader - model_loaders['doctr'] = H2OOCRLoader() - model_loaders['doctr'].set_document_paths([file]) - docs1c = model_loaders['doctr'].load() - docs1c = [x for x in docs1c if x.page_content] - add_meta(docs1c, file, parser='H2OOCRLoader: %s' % 'DocTR') - # caption didn't set source, so fix-up meta - for doci in docs1c: - doci.metadata['source'] = doci.metadata.get('document_path', file) - doci.metadata['hashid'] = hash_file(doci.metadata['source']) - docs1.extend(docs1c) - if verbose: - print("END: DocTR", flush=True) - if enable_captions: - # BLIP - if verbose: - print("BEGIN: BLIP", flush=True) - if model_loaders['caption'] is not None and not isinstance(model_loaders['caption'], (str, bool)): - # assumes didn't fork into this process with joblib, else can deadlock - if verbose: - print("Reuse BLIP", flush=True) - model_loaders['caption'].load_model() - else: - if verbose: - print("Fresh BLIP", flush=True) - from image_captions import H2OImageCaptionLoader - model_loaders['caption'] = H2OImageCaptionLoader(caption_gpu=model_loaders['caption'] == 'gpu', - blip_model=captions_model, - blip_processor=captions_model) - model_loaders['caption'].set_image_paths([file]) - docs1c = model_loaders['caption'].load() - docs1c = [x for x in docs1c if x.page_content] - add_meta(docs1c, file, parser='H2OImageCaptionLoader: %s' % captions_model) - # caption didn't set source, so fix-up meta - for doci in docs1c: - doci.metadata['source'] = doci.metadata.get('image_path', file) - doci.metadata['hashid'] = hash_file(doci.metadata['source']) - docs1.extend(docs1c) - - if verbose: - print("END: BLIP", flush=True) - if enable_pix2struct: - # BLIP - if verbose: - print("BEGIN: Pix2Struct", flush=True) - if model_loaders['pix2struct'] is not None and not isinstance(model_loaders['pix2struct'], (str, bool)): - if verbose: - print("Reuse pix2struct", flush=True) - model_loaders['pix2struct'].load_model() - else: - if verbose: - print("Fresh pix2struct", flush=True) - from image_pix2struct import H2OPix2StructLoader - model_loaders['pix2struct'] = H2OPix2StructLoader() - model_loaders['pix2struct'].set_image_paths([file]) - docs1c = model_loaders['pix2struct'].load() - docs1c = [x for x in docs1c if x.page_content] - add_meta(docs1c, file, parser='H2OPix2StructLoader: %s' % model_loaders['pix2struct']) - # caption didn't set source, so fix-up meta - for doci in docs1c: - doci.metadata['source'] = doci.metadata.get('image_path', file) - doci.metadata['hashid'] = hash_file(doci.metadata['source']) - docs1.extend(docs1c) - if verbose: - print("END: Pix2Struct", flush=True) - doc1 = chunk_sources(docs1) - elif file.lower().endswith('.msg'): - raise RuntimeError("Not supported, GPL3 license") - # docs1 = OutlookMessageLoader(file).load() - # docs1[0].metadata['source'] = file - elif file.lower().endswith('.eml'): - try: - docs1 = UnstructuredEmailLoader(file).load() - add_meta(docs1, file, parser='UnstructuredEmailLoader') - doc1 = chunk_sources(docs1) - except ValueError as e: - if 'text/html content not found in email' in str(e): - pass - else: - raise - doc1 = [x for x in doc1 if x.page_content] - if len(doc1) == 0: - # e.g. plain/text dict key exists, but not - # doc1 = TextLoader(file, encoding="utf8").load() - docs1 = UnstructuredEmailLoader(file, content_source="text/plain").load() - docs1 = [x for x in docs1 if x.page_content] - add_meta(docs1, file, parser='UnstructuredEmailLoader text/plain') - doc1 = chunk_sources(docs1) - # elif file.lower().endswith('.gcsdir'): - # doc1 = GCSDirectoryLoader(project_name, bucket, prefix).load() - # elif file.lower().endswith('.gcsfile'): - # doc1 = GCSFileLoader(project_name, bucket, blob).load() - elif file.lower().endswith('.rst'): - with open(file, "r") as f: - doc1 = Document(page_content=str(f.read()), metadata={"source": file}) - add_meta(doc1, file, parser='f.read()') - doc1 = chunk_sources(doc1, language=Language.RST) - elif file.lower().endswith('.json'): - # 10k rows, 100 columns-like parts 4 bytes each - JSON_SIZE_LIMIT = int(os.getenv('JSON_SIZE_LIMIT', str(10 * 10 * 1024 * 10 * 4))) - if os.path.getsize(file) > JSON_SIZE_LIMIT: - raise ValueError( - "JSON file sizes > %s not supported for naive parsing and embedding, requires Agents enabled" % JSON_SIZE_LIMIT) - loader = JSONLoader( - file_path=file, - # jq_schema='.messages[].content', - jq_schema=jq_schema, - text_content=False, - metadata_func=json_metadata_func) - doc1 = loader.load() - add_meta(doc1, file, parser='JSONLoader: %s' % jq_schema) - fix_json_meta(doc1) - elif file.lower().endswith('.jsonl'): - loader = JSONLoader( - file_path=file, - # jq_schema='.messages[].content', - jq_schema=jq_schema, - json_lines=True, - text_content=False, - metadata_func=json_metadata_func) - doc1 = loader.load() - add_meta(doc1, file, parser='JSONLoader: %s' % jq_schema) - fix_json_meta(doc1) - elif file.lower().endswith('.pdf'): - # migration - if isinstance(use_pymupdf, bool): - if use_pymupdf == False: - use_pymupdf = 'off' - if use_pymupdf == True: - use_pymupdf = 'on' - if isinstance(use_unstructured_pdf, bool): - if use_unstructured_pdf == False: - use_unstructured_pdf = 'off' - if use_unstructured_pdf == True: - use_unstructured_pdf = 'on' - if isinstance(use_pypdf, bool): - if use_pypdf == False: - use_pypdf = 'off' - if use_pypdf == True: - use_pypdf = 'on' - if isinstance(enable_pdf_ocr, bool): - if enable_pdf_ocr == False: - enable_pdf_ocr = 'off' - if enable_pdf_ocr == True: - enable_pdf_ocr = 'on' - if isinstance(try_pdf_as_html, bool): - if try_pdf_as_html == False: - try_pdf_as_html = 'off' - if try_pdf_as_html == True: - try_pdf_as_html = 'on' - - doc1 = [] - tried_others = False - handled = False - did_pymupdf = False - did_unstructured = False - e = None - if have_pymupdf and (len(doc1) == 0 and use_pymupdf == 'auto' or use_pymupdf == 'on'): - # GPL, only use if installed - from langchain.document_loaders import PyMuPDFLoader - # load() still chunks by pages, but every page has title at start to help - try: - doc1a = PyMuPDFLoader(file).load() - did_pymupdf = True - except BaseException as e0: - doc1a = [] - print("PyMuPDFLoader: %s" % str(e0), flush=True) - e = e0 - # remove empty documents - handled |= len(doc1a) > 0 - doc1a = [x for x in doc1a if x.page_content] - doc1a = clean_doc(doc1a) - add_parser(doc1a, 'PyMuPDFLoader') - doc1.extend(doc1a) - if len(doc1) == 0 and use_unstructured_pdf == 'auto' or use_unstructured_pdf == 'on': - tried_others = True - try: - doc1a = UnstructuredPDFLoader(file).load() - did_unstructured = True - except BaseException as e0: - doc1a = [] - print("UnstructuredPDFLoader: %s" % str(e0), flush=True) - e = e0 - handled |= len(doc1a) > 0 - # remove empty documents - doc1a = [x for x in doc1a if x.page_content] - add_parser(doc1a, 'UnstructuredPDFLoader') - # seems to not need cleaning in most cases - doc1.extend(doc1a) - if len(doc1) == 0 and use_pypdf == 'auto' or use_pypdf == 'on': - tried_others = True - # open-source fallback - # load() still chunks by pages, but every page has title at start to help - try: - doc1a = PyPDFLoader(file).load() - except BaseException as e0: - doc1a = [] - print("PyPDFLoader: %s" % str(e0), flush=True) - e = e0 - handled |= len(doc1a) > 0 - # remove empty documents - doc1a = [x for x in doc1a if x.page_content] - doc1a = clean_doc(doc1a) - add_parser(doc1a, 'PyPDFLoader') - doc1.extend(doc1a) - if not did_pymupdf and ((have_pymupdf and len(doc1) == 0) and tried_others): - # try again in case only others used, but only if didn't already try (2nd part of and) - # GPL, only use if installed - from langchain.document_loaders import PyMuPDFLoader - # load() still chunks by pages, but every page has title at start to help - try: - doc1a = PyMuPDFLoader(file).load() - except BaseException as e0: - doc1a = [] - print("PyMuPDFLoader: %s" % str(e0), flush=True) - e = e0 - handled |= len(doc1a) > 0 - # remove empty documents - doc1a = [x for x in doc1a if x.page_content] - doc1a = clean_doc(doc1a) - add_parser(doc1a, 'PyMuPDFLoader2') - doc1.extend(doc1a) - did_pdf_ocr = False - if len(doc1) == 0 and (enable_pdf_ocr == 'auto' and enable_pdf_doctr != 'on') or enable_pdf_ocr == 'on': - did_pdf_ocr = True - # no did_unstructured condition here because here we do OCR, and before we did not - # try OCR in end since slowest, but works on pure image pages well - doc1a = UnstructuredPDFLoader(file, strategy='ocr_only').load() - handled |= len(doc1a) > 0 - # remove empty documents - doc1a = [x for x in doc1a if x.page_content] - add_parser(doc1a, 'UnstructuredPDFLoader ocr_only') - # seems to not need cleaning in most cases - doc1.extend(doc1a) - # Some PDFs return nothing or junk from PDFMinerLoader - if len(doc1) == 0 and enable_pdf_doctr == 'auto' or enable_pdf_doctr == 'on': - if verbose: - print("BEGIN: DocTR", flush=True) - if model_loaders['doctr'] is not None and not isinstance(model_loaders['doctr'], (str, bool)): - model_loaders['doctr'].load_model() - else: - from image_doctr import H2OOCRLoader - model_loaders['doctr'] = H2OOCRLoader() - model_loaders['doctr'].set_document_paths([file]) - doc1a = model_loaders['doctr'].load() - doc1a = [x for x in doc1a if x.page_content] - add_meta(doc1a, file, parser='H2OOCRLoader: %s' % 'DocTR') - handled |= len(doc1a) > 0 - # caption didn't set source, so fix-up meta - for doci in doc1a: - doci.metadata['source'] = doci.metadata.get('document_path', file) - doci.metadata['hashid'] = hash_file(doci.metadata['source']) - doc1.extend(doc1a) - if verbose: - print("END: DocTR", flush=True) - if try_pdf_as_html in ['auto', 'on']: - doc1a = try_as_html(file) - add_parser(doc1a, 'try_as_html') - doc1.extend(doc1a) - - if len(doc1) == 0: - # if literally nothing, show failed to parse so user knows, since unlikely nothing in PDF at all. - if handled: - raise ValueError("%s had no valid text, but meta data was parsed" % file) - else: - raise ValueError("%s had no valid text and no meta data was parsed: %s" % (file, str(e))) - add_meta(doc1, file, parser='pdf') - doc1 = chunk_sources(doc1) - elif file.lower().endswith('.csv'): - CSV_SIZE_LIMIT = int(os.getenv('CSV_SIZE_LIMIT', str(10 * 1024 * 10 * 4))) - if os.path.getsize(file) > CSV_SIZE_LIMIT: - raise ValueError( - "CSV file sizes > %s not supported for naive parsing and embedding, requires Agents enabled" % CSV_SIZE_LIMIT) - doc1 = CSVLoader(file).load() - add_meta(doc1, file, parser='CSVLoader') - if isinstance(doc1, list): - # each row is a Document, identify - [x.metadata.update(dict(chunk_id=chunk_id)) for chunk_id, x in enumerate(doc1)] - if db_type in ['chroma', 'chroma_old']: - # then separate summarize list - sdoc1 = clone_documents(doc1) - [x.metadata.update(dict(chunk_id=-1)) for chunk_id, x in enumerate(sdoc1)] - doc1 = sdoc1 + doc1 - elif file.lower().endswith('.py'): - doc1 = PythonLoader(file).load() - add_meta(doc1, file, parser='PythonLoader') - doc1 = chunk_sources(doc1, language=Language.PYTHON) - elif file.lower().endswith('.toml'): - doc1 = TomlLoader(file).load() - add_meta(doc1, file, parser='TomlLoader') - doc1 = chunk_sources(doc1) - elif file.lower().endswith('.xml'): - from langchain.document_loaders import UnstructuredXMLLoader - loader = UnstructuredXMLLoader(file_path=file) - doc1 = loader.load() - add_meta(doc1, file, parser='UnstructuredXMLLoader') - elif file.lower().endswith('.urls'): - with open(file, "r") as f: - urls = f.readlines() - # recurse - doc1 = path_to_docs_func(None, url=urls) - elif file.lower().endswith('.zip'): - with zipfile.ZipFile(file, 'r') as zip_ref: - # don't put into temporary path, since want to keep references to docs inside zip - # so just extract in path where - zip_ref.extractall(base_path) - # recurse - doc1 = path_to_docs_func(base_path) - elif file.lower().endswith('.gz') or file.lower().endswith('.gzip'): - if file.lower().endswith('.gz'): - de_file = file.lower().replace('.gz', '') - else: - de_file = file.lower().replace('.gzip', '') - with gzip.open(file, 'rb') as f_in: - with open(de_file, 'wb') as f_out: - shutil.copyfileobj(f_in, f_out) - # recurse - doc1 = file_to_doc(de_file, - filei=filei, # single file, same file index as outside caller - base_path=base_path, verbose=verbose, fail_any_exception=fail_any_exception, - chunk=chunk, chunk_size=chunk_size, n_jobs=n_jobs, - is_url=is_url, is_txt=is_txt, - - # urls - use_unstructured=use_unstructured, - use_playwright=use_playwright, - use_selenium=use_selenium, - - # pdfs - use_pymupdf=use_pymupdf, - use_unstructured_pdf=use_unstructured_pdf, - use_pypdf=use_pypdf, - enable_pdf_ocr=enable_pdf_ocr, - enable_pdf_doctr=enable_pdf_doctr, - try_pdf_as_html=try_pdf_as_html, - - # images - enable_ocr=enable_ocr, - enable_doctr=enable_doctr, - enable_pix2struct=enable_pix2struct, - enable_captions=enable_captions, - captions_model=captions_model, - model_loaders=model_loaders, - - # json - jq_schema=jq_schema, - - headsize=headsize, - db_type=db_type, - selected_file_types=selected_file_types) - else: - raise RuntimeError("No file handler for %s" % os.path.basename(file)) - - # allow doc1 to be list or not. - if not isinstance(doc1, list): - # If not list, did not chunk yet, so chunk now - docs = chunk_sources([doc1]) - elif isinstance(doc1, list) and len(doc1) == 1: - # if list of length one, don't trust and chunk it, chunk_id's will still be correct if repeat - docs = chunk_sources(doc1) - else: - docs = doc1 - - assert isinstance(docs, list) - return docs - - -def path_to_doc1(file, - filei=0, - verbose=False, fail_any_exception=False, return_file=True, - chunk=True, chunk_size=512, - n_jobs=-1, - is_url=False, is_txt=False, - - # urls - use_unstructured=True, - use_playwright=False, - use_selenium=False, - - # pdfs - use_pymupdf='auto', - use_unstructured_pdf='auto', - use_pypdf='auto', - enable_pdf_ocr='auto', - enable_pdf_doctr='auto', - try_pdf_as_html='auto', - - # images - enable_ocr=False, - enable_doctr=False, - enable_pix2struct=False, - enable_captions=True, - captions_model=None, - model_loaders=None, - - # json - jq_schema='.[]', - - db_type=None, - selected_file_types=None): - assert db_type is not None - if verbose: - if is_url: - print("Ingesting URL: %s" % file, flush=True) - elif is_txt: - print("Ingesting Text: %s" % file, flush=True) - else: - print("Ingesting file: %s" % file, flush=True) - res = None - try: - # don't pass base_path=path, would infinitely recurse - res = file_to_doc(file, - filei=filei, - base_path=None, verbose=verbose, fail_any_exception=fail_any_exception, - chunk=chunk, chunk_size=chunk_size, - n_jobs=n_jobs, - is_url=is_url, is_txt=is_txt, - - # urls - use_unstructured=use_unstructured, - use_playwright=use_playwright, - use_selenium=use_selenium, - - # pdfs - use_pymupdf=use_pymupdf, - use_unstructured_pdf=use_unstructured_pdf, - use_pypdf=use_pypdf, - enable_pdf_ocr=enable_pdf_ocr, - enable_pdf_doctr=enable_pdf_doctr, - try_pdf_as_html=try_pdf_as_html, - - # images - enable_ocr=enable_ocr, - enable_doctr=enable_doctr, - enable_pix2struct=enable_pix2struct, - enable_captions=enable_captions, - captions_model=captions_model, - model_loaders=model_loaders, - - # json - jq_schema=jq_schema, - - db_type=db_type, - selected_file_types=selected_file_types) - except BaseException as e: - print("Failed to ingest %s due to %s" % (file, traceback.format_exc())) - if fail_any_exception: - raise - else: - exception_doc = Document( - page_content='', - metadata={"source": file, "exception": '%s Exception: %s' % (file, str(e)), - "traceback": traceback.format_exc()}) - res = [exception_doc] - if verbose: - if is_url: - print("DONE Ingesting URL: %s" % file, flush=True) - elif is_txt: - print("DONE Ingesting Text: %s" % file, flush=True) - else: - print("DONE Ingesting file: %s" % file, flush=True) - if return_file: - base_tmp = "temp_path_to_doc1" - if not os.path.isdir(base_tmp): - base_tmp = makedirs(base_tmp, exist_ok=True, tmp_ok=True, use_base=True) - filename = os.path.join(base_tmp, str(uuid.uuid4()) + ".tmp.pickle") - with open(filename, 'wb') as f: - pickle.dump(res, f) - return filename - return res - - -def path_to_docs(path_or_paths, verbose=False, fail_any_exception=False, n_jobs=-1, - chunk=True, chunk_size=512, - url=None, text=None, - - # urls - use_unstructured=True, - use_playwright=False, - use_selenium=False, - - # pdfs - use_pymupdf='auto', - use_unstructured_pdf='auto', - use_pypdf='auto', - enable_pdf_ocr='auto', - enable_pdf_doctr='auto', - try_pdf_as_html='auto', - - # images - enable_ocr=False, - enable_doctr=False, - enable_pix2struct=False, - enable_captions=True, - captions_model=None, - - caption_loader=None, - doctr_loader=None, - pix2struct_loader=None, - - # json - jq_schema='.[]', - - existing_files=[], - existing_hash_ids={}, - db_type=None, - selected_file_types=None, - ): - if verbose: - print("BEGIN Consuming path_or_paths=%s url=%s text=%s" % (path_or_paths, url, text), flush=True) - if selected_file_types is not None: - non_image_types1 = [x for x in non_image_types if x in selected_file_types] - image_types1 = [x for x in image_types if x in selected_file_types] - else: - non_image_types1 = non_image_types.copy() - image_types1 = image_types.copy() - - assert db_type is not None - # path_or_paths could be str, list, tuple, generator - globs_image_types = [] - globs_non_image_types = [] - if not path_or_paths and not url and not text: - return [] - elif url: - url = get_list_or_str(url) - globs_non_image_types = url if isinstance(url, (list, tuple, types.GeneratorType)) else [url] - elif text: - globs_non_image_types = text if isinstance(text, (list, tuple, types.GeneratorType)) else [text] - elif isinstance(path_or_paths, str) and os.path.isdir(path_or_paths): - # single path, only consume allowed files - path = path_or_paths - # Below globs should match patterns in file_to_doc() - [globs_image_types.extend(glob.glob(os.path.join(path, "./**/*.%s" % ftype), recursive=True)) - for ftype in image_types1] - globs_image_types = [os.path.normpath(x) for x in globs_image_types] - [globs_non_image_types.extend(glob.glob(os.path.join(path, "./**/*.%s" % ftype), recursive=True)) - for ftype in non_image_types1] - globs_non_image_types = [os.path.normpath(x) for x in globs_non_image_types] - else: - if isinstance(path_or_paths, str): - if os.path.isfile(path_or_paths) or os.path.isdir(path_or_paths): - path_or_paths = [path_or_paths] - else: - # path was deleted etc. - return [] - # list/tuple of files (consume what can, and exception those that selected but cannot consume so user knows) - assert isinstance(path_or_paths, (list, tuple, types.GeneratorType)), \ - "Wrong type for path_or_paths: %s %s" % (path_or_paths, type(path_or_paths)) - # reform out of allowed types - globs_image_types.extend( - flatten_list([[os.path.normpath(x) for x in path_or_paths if x.endswith(y)] for y in image_types1])) - # could do below: - # globs_non_image_types = flatten_list([[x for x in path_or_paths if x.endswith(y)] for y in non_image_types1]) - # But instead, allow fail so can collect unsupported too - set_globs_image_types = set(globs_image_types) - globs_non_image_types.extend([os.path.normpath(x) for x in path_or_paths if x not in set_globs_image_types]) - - # filter out any files to skip (e.g. if already processed them) - # this is easy, but too aggressive in case a file changed, so parent probably passed existing_files=[] - assert not existing_files, "DEV: assume not using this approach" - if existing_files: - set_skip_files = set(existing_files) - globs_image_types = [x for x in globs_image_types if x not in set_skip_files] - globs_non_image_types = [x for x in globs_non_image_types if x not in set_skip_files] - if existing_hash_ids: - # assume consistent with add_meta() use of hash_file(file) - # also assume consistent with get_existing_hash_ids for dict creation - # assume hashable values - existing_hash_ids_set = set(existing_hash_ids.items()) - hash_ids_all_image = set({x: hash_file(x) for x in globs_image_types}.items()) - hash_ids_all_non_image = set({x: hash_file(x) for x in globs_non_image_types}.items()) - # don't use symmetric diff. If file is gone, ignore and don't remove or something - # just consider existing files (key) having new hash or not (value) - new_files_image = set(dict(hash_ids_all_image - existing_hash_ids_set).keys()) - new_files_non_image = set(dict(hash_ids_all_non_image - existing_hash_ids_set).keys()) - globs_image_types = [x for x in globs_image_types if x in new_files_image] - globs_non_image_types = [x for x in globs_non_image_types if x in new_files_non_image] - - # could use generator, but messes up metadata handling in recursive case - if caption_loader and not isinstance(caption_loader, (bool, str)) and caption_loader.device != 'cpu' or \ - get_device() == 'cuda': - # to avoid deadlocks, presume was preloaded and so can't fork due to cuda context - # get_device() == 'cuda' because presume faster to process image from (temporarily) preloaded model - n_jobs_image = 1 - else: - n_jobs_image = n_jobs - if enable_doctr or enable_pdf_doctr in [True, 'auto', 'on']: - if doctr_loader and not isinstance(doctr_loader, (bool, str)) and doctr_loader.device != 'cpu': - # can't fork cuda context - n_jobs = 1 - - return_file = True # local choice - is_url = url is not None - is_txt = text is not None - model_loaders = dict(caption=caption_loader, - doctr=doctr_loader, - pix2struct=pix2struct_loader) - model_loaders0 = model_loaders.copy() - kwargs = dict(verbose=verbose, fail_any_exception=fail_any_exception, - return_file=return_file, - chunk=chunk, chunk_size=chunk_size, - n_jobs=n_jobs, - is_url=is_url, - is_txt=is_txt, - - # urls - use_unstructured=use_unstructured, - use_playwright=use_playwright, - use_selenium=use_selenium, - - # pdfs - use_pymupdf=use_pymupdf, - use_unstructured_pdf=use_unstructured_pdf, - use_pypdf=use_pypdf, - enable_pdf_ocr=enable_pdf_ocr, - enable_pdf_doctr=enable_pdf_doctr, - try_pdf_as_html=try_pdf_as_html, - - # images - enable_ocr=enable_ocr, - enable_doctr=enable_doctr, - enable_pix2struct=enable_pix2struct, - enable_captions=enable_captions, - captions_model=captions_model, - model_loaders=model_loaders, - - # json - jq_schema=jq_schema, - - db_type=db_type, - selected_file_types=selected_file_types, - ) - if n_jobs != 1 and len(globs_non_image_types) > 1: - # avoid nesting, e.g. upload 1 zip and then inside many files - # harder to handle if upload many zips with many files, inner parallel one will be disabled by joblib - documents = ProgressParallel(n_jobs=n_jobs, verbose=10 if verbose else 0, backend='multiprocessing')( - delayed(path_to_doc1)(file, filei=filei, **kwargs) for filei, file in enumerate(globs_non_image_types) - ) - else: - documents = [path_to_doc1(file, filei=filei, **kwargs) for filei, file in - enumerate(tqdm(globs_non_image_types))] - - # do images separately since can't fork after cuda in parent, so can't be parallel - if n_jobs_image != 1 and len(globs_image_types) > 1: - # avoid nesting, e.g. upload 1 zip and then inside many files - # harder to handle if upload many zips with many files, inner parallel one will be disabled by joblib - image_documents = ProgressParallel(n_jobs=n_jobs, verbose=10 if verbose else 0, backend='multiprocessing')( - delayed(path_to_doc1)(file, filei=filei, **kwargs) for filei, file in enumerate(globs_image_types) - ) - else: - image_documents = [path_to_doc1(file, filei=filei, **kwargs) for filei, file in - enumerate(tqdm(globs_image_types))] - - # unload loaders (image loaders, includes enable_pdf_doctr that uses same loader) - for name, loader in model_loaders.items(): - loader0 = model_loaders0[name] - real_model_initial = loader0 is not None and not isinstance(loader0, (str, bool)) - real_model_final = model_loaders[name] is not None and not isinstance(model_loaders[name], (str, bool)) - if not real_model_initial and real_model_final: - # clear off GPU newly added model - model_loaders[name].unload_model() - - # add image docs in - documents += image_documents - - if return_file: - # then documents really are files - files = documents.copy() - documents = [] - for fil in files: - with open(fil, 'rb') as f: - documents.extend(pickle.load(f)) - # remove temp pickle - remove(fil) - else: - documents = reduce(concat, documents) - - if verbose: - print("END consuming path_or_paths=%s url=%s text=%s" % (path_or_paths, url, text), flush=True) - return documents - - -def prep_langchain(persist_directory, - load_db_if_exists, - db_type, use_openai_embedding, - langchain_mode, langchain_mode_paths, langchain_mode_types, - hf_embedding_model, - migrate_embedding_model, - auto_migrate_db, - n_jobs=-1, kwargs_make_db={}, - verbose=False): - """ - do prep first time, involving downloads - # FIXME: Add github caching then add here - :return: - """ - if os.getenv("HARD_ASSERTS"): - assert langchain_mode not in ['MyData'], "Should not prep scratch/personal data" - - if langchain_mode in langchain_modes_intrinsic: - return None - - db_dir_exists = os.path.isdir(persist_directory) - user_path = langchain_mode_paths.get(langchain_mode) - - if db_dir_exists and user_path is None: - if verbose: - print("Prep: persist_directory=%s exists, using" % persist_directory, flush=True) - db, use_openai_embedding, hf_embedding_model = \ - get_existing_db(None, persist_directory, load_db_if_exists, - db_type, use_openai_embedding, - langchain_mode, langchain_mode_paths, langchain_mode_types, - hf_embedding_model, migrate_embedding_model, auto_migrate_db, - n_jobs=n_jobs) - else: - if db_dir_exists and user_path is not None: - if verbose: - print("Prep: persist_directory=%s exists, user_path=%s passed, adding any changed or new documents" % ( - persist_directory, user_path), flush=True) - elif not db_dir_exists: - if verbose: - print("Prep: persist_directory=%s does not exist, regenerating" % persist_directory, flush=True) - db = None - if langchain_mode in ['DriverlessAI docs']: - # FIXME: Could also just use dai_docs.pickle directly and upload that - get_dai_docs(from_hf=True) - - if langchain_mode in ['wiki']: - get_wiki_sources(first_para=kwargs_make_db['first_para'], text_limit=kwargs_make_db['text_limit']) - - langchain_kwargs = kwargs_make_db.copy() - langchain_kwargs.update(locals()) - db, num_new_sources, new_sources_metadata = make_db(**langchain_kwargs) - - return db - - -import posthog - -posthog.disabled = True - - -class FakeConsumer(object): - def __init__(self, *args, **kwargs): - pass - - def run(self): - pass - - def pause(self): - pass - - def upload(self): - pass - - def next(self): - pass - - def request(self, batch): - pass - - -posthog.Consumer = FakeConsumer - - -def check_update_chroma_embedding(db, - db_type, - use_openai_embedding, - hf_embedding_model, migrate_embedding_model, auto_migrate_db, - langchain_mode, langchain_mode_paths, langchain_mode_types, - n_jobs=-1): - changed_db = False - embed_tuple = load_embed(db=db) - if embed_tuple not in [(True, use_openai_embedding, hf_embedding_model), - (False, use_openai_embedding, hf_embedding_model)]: - print("Detected new embedding %s vs. %s %s, updating db: %s" % ( - use_openai_embedding, hf_embedding_model, embed_tuple, langchain_mode), flush=True) - # handle embedding changes - db_get = get_documents(db) - sources = [Document(page_content=result[0], metadata=result[1] or {}) - for result in zip(db_get['documents'], db_get['metadatas'])] - # delete index, has to be redone - persist_directory = db._persist_directory - shutil.move(persist_directory, persist_directory + "_" + str(uuid.uuid4()) + ".bak") - assert db_type in ['chroma', 'chroma_old'] - load_db_if_exists = False - db = get_db(sources, use_openai_embedding=use_openai_embedding, db_type=db_type, - persist_directory=persist_directory, load_db_if_exists=load_db_if_exists, - langchain_mode=langchain_mode, - langchain_mode_paths=langchain_mode_paths, - langchain_mode_types=langchain_mode_types, - collection_name=None, - hf_embedding_model=hf_embedding_model, - migrate_embedding_model=migrate_embedding_model, - auto_migrate_db=auto_migrate_db, - n_jobs=n_jobs, - ) - changed_db = True - print("Done updating db for new embedding: %s" % langchain_mode, flush=True) - - return db, changed_db - - -def migrate_meta_func(db, langchain_mode): - changed_db = False - db_get = get_documents(db) - # just check one doc - if len(db_get['metadatas']) > 0 and 'chunk_id' not in db_get['metadatas'][0]: - print("Detected old metadata, adding additional information", flush=True) - t0 = time.time() - # handle meta changes - [x.update(dict(chunk_id=x.get('chunk_id', 0))) for x in db_get['metadatas']] - client_collection = db._client.get_collection(name=db._collection.name, - embedding_function=db._collection._embedding_function) - client_collection.update(ids=db_get['ids'], metadatas=db_get['metadatas']) - # check - db_get = get_documents(db) - assert 'chunk_id' in db_get['metadatas'][0], "Failed to add meta" - changed_db = True - print("Done updating db for new meta: %s in %s seconds" % (langchain_mode, time.time() - t0), flush=True) - - return db, changed_db - - -def get_existing_db(db, persist_directory, - load_db_if_exists, db_type, use_openai_embedding, - langchain_mode, langchain_mode_paths, langchain_mode_types, - hf_embedding_model, - migrate_embedding_model, - auto_migrate_db=False, - verbose=False, check_embedding=True, migrate_meta=True, - n_jobs=-1): - if load_db_if_exists and db_type in ['chroma', 'chroma_old'] and os.path.isdir(persist_directory): - if os.path.isfile(os.path.join(persist_directory, 'chroma.sqlite3')): - must_migrate = False - elif os.path.isdir(os.path.join(persist_directory, 'index')): - must_migrate = True - else: - return db, use_openai_embedding, hf_embedding_model - chroma_settings = dict(is_persistent=True) - use_chromamigdb = False - if must_migrate: - if auto_migrate_db: - print("Detected chromadb<0.4 database, require migration, doing now....", flush=True) - from chroma_migrate.import_duckdb import migrate_from_duckdb - import chromadb - api = chromadb.PersistentClient(path=persist_directory) - did_migration = migrate_from_duckdb(api, persist_directory) - assert did_migration, "Failed to migrate chroma collection at %s, see https://docs.trychroma.com/migration for CLI tool" % persist_directory - elif have_chromamigdb: - print( - "Detected chroma<0.4 database but --auto_migrate_db=False, but detected chromamigdb package, so using old database that still requires duckdb", - flush=True) - chroma_settings = dict(chroma_db_impl="duckdb+parquet") - use_chromamigdb = True - else: - raise ValueError( - "Detected chromadb<0.4 database, require migration, but did not detect chromamigdb package or did not choose auto_migrate_db=False (see FAQ.md)") - - if db is None: - if verbose: - print("DO Loading db: %s" % langchain_mode, flush=True) - got_embedding, use_openai_embedding0, hf_embedding_model0 = load_embed(persist_directory=persist_directory) - if got_embedding: - use_openai_embedding, hf_embedding_model = use_openai_embedding0, hf_embedding_model0 - embedding = get_embedding(use_openai_embedding, hf_embedding_model=hf_embedding_model) - import logging - logging.getLogger("chromadb").setLevel(logging.ERROR) - if use_chromamigdb: - from chromamigdb.config import Settings - chroma_class = ChromaMig - else: - from chromadb.config import Settings - chroma_class = Chroma - client_settings = Settings(anonymized_telemetry=False, - **chroma_settings, - persist_directory=persist_directory) - db = chroma_class(persist_directory=persist_directory, embedding_function=embedding, - collection_name=langchain_mode.replace(' ', '_'), - client_settings=client_settings) - try: - db.similarity_search('') - except BaseException as e: - # migration when no embed_info - if 'Dimensionality of (768) does not match index dimensionality (384)' in str(e) or \ - 'Embedding dimension 768 does not match collection dimensionality 384' in str(e): - hf_embedding_model = "sentence-transformers/all-MiniLM-L6-v2" - embedding = get_embedding(use_openai_embedding, hf_embedding_model=hf_embedding_model) - db = chroma_class(persist_directory=persist_directory, embedding_function=embedding, - collection_name=langchain_mode.replace(' ', '_'), - client_settings=client_settings) - # should work now, let fail if not - db.similarity_search('') - save_embed(db, use_openai_embedding, hf_embedding_model) - else: - raise - - if verbose: - print("DONE Loading db: %s" % langchain_mode, flush=True) - else: - if not migrate_embedding_model: - # OVERRIDE embedding choices if could load embedding info when not migrating - got_embedding, use_openai_embedding, hf_embedding_model = load_embed(db=db) - if verbose: - print("USING already-loaded db: %s" % langchain_mode, flush=True) - if check_embedding: - db_trial, changed_db = check_update_chroma_embedding(db, - db_type, - use_openai_embedding, - hf_embedding_model, - migrate_embedding_model, - auto_migrate_db, - langchain_mode, - langchain_mode_paths, - langchain_mode_types, - n_jobs=n_jobs) - if changed_db: - db = db_trial - # only call persist if really changed db, else takes too long for large db - if db is not None: - db.persist() - clear_embedding(db) - save_embed(db, use_openai_embedding, hf_embedding_model) - if migrate_meta and db is not None: - db_trial, changed_db = migrate_meta_func(db, langchain_mode) - if changed_db: - db = db_trial - return db, use_openai_embedding, hf_embedding_model - return db, use_openai_embedding, hf_embedding_model - - -def clear_embedding(db): - if db is None: - return - # don't keep on GPU, wastes memory, push back onto CPU and only put back on GPU once again embed - try: - if hasattr(db._embedding_function, 'client') and hasattr(db._embedding_function.client, 'cpu'): - # only push back to CPU if each db/user has own embedding model, else if shared share on GPU - if hasattr(db._embedding_function.client, 'preload') and not db._embedding_function.client.preload: - db._embedding_function.client.cpu() - clear_torch_cache() - except RuntimeError as e: - print("clear_embedding error: %s" % ''.join(traceback.format_tb(e.__traceback__)), flush=True) - - -def make_db(**langchain_kwargs): - func_names = list(inspect.signature(_make_db).parameters) - missing_kwargs = [x for x in func_names if x not in langchain_kwargs] - defaults_db = {k: v.default for k, v in dict(inspect.signature(run_qa_db).parameters).items()} - for k in missing_kwargs: - if k in defaults_db: - langchain_kwargs[k] = defaults_db[k] - # final check for missing - missing_kwargs = [x for x in func_names if x not in langchain_kwargs] - assert not missing_kwargs, "Missing kwargs for make_db: %s" % missing_kwargs - # only keep actual used - langchain_kwargs = {k: v for k, v in langchain_kwargs.items() if k in func_names} - return _make_db(**langchain_kwargs) - - -embed_lock_name = 'embed.lock' - - -def get_embed_lock_file(db, persist_directory=None): - if hasattr(db, '_persist_directory') or persist_directory: - if persist_directory is None: - persist_directory = db._persist_directory - check_persist_directory(persist_directory) - base_path = os.path.join('locks', persist_directory) - base_path = makedirs(base_path, exist_ok=True, tmp_ok=True, use_base=True) - lock_file = os.path.join(base_path, embed_lock_name) - makedirs(os.path.dirname(lock_file)) - return lock_file - return None - - -def save_embed(db, use_openai_embedding, hf_embedding_model): - if hasattr(db, '_persist_directory'): - persist_directory = db._persist_directory - lock_file = get_embed_lock_file(db) - with filelock.FileLock(lock_file): - embed_info_file = os.path.join(persist_directory, 'embed_info') - with open(embed_info_file, 'wb') as f: - if isinstance(hf_embedding_model, str): - hf_embedding_model_save = hf_embedding_model - elif hasattr(hf_embedding_model, 'model_name'): - hf_embedding_model_save = hf_embedding_model.model_name - elif isinstance(hf_embedding_model, dict) and 'name' in hf_embedding_model: - hf_embedding_model_save = hf_embedding_model['name'] - elif isinstance(hf_embedding_model, dict) and 'name' in hf_embedding_model: - if os.getenv('HARD_ASSERTS'): - # unexpected in testing or normally - raise RuntimeError("HERE") - hf_embedding_model_save = 'hkunlp/instructor-large' - pickle.dump((use_openai_embedding, hf_embedding_model_save), f) - return use_openai_embedding, hf_embedding_model - - -def load_embed(db=None, persist_directory=None): - if hasattr(db, 'embeddings') and hasattr(db.embeddings, 'model_name'): - hf_embedding_model = db.embeddings.model_name if 'openai' not in db.embeddings.model_name.lower() else None - use_openai_embedding = hf_embedding_model is None - save_embed(db, use_openai_embedding, hf_embedding_model) - return True, use_openai_embedding, hf_embedding_model - if persist_directory is None: - persist_directory = db._persist_directory - embed_info_file = os.path.join(persist_directory, 'embed_info') - if os.path.isfile(embed_info_file): - lock_file = get_embed_lock_file(db, persist_directory=persist_directory) - with filelock.FileLock(lock_file): - with open(embed_info_file, 'rb') as f: - try: - use_openai_embedding, hf_embedding_model = pickle.load(f) - if not isinstance(hf_embedding_model, str): - # work-around bug introduced here: https://github.com/h2oai/h2ogpt/commit/54c4414f1ce3b5b7c938def651c0f6af081c66de - hf_embedding_model = 'hkunlp/instructor-large' - # fix file - save_embed(db, use_openai_embedding, hf_embedding_model) - got_embedding = True - except EOFError: - use_openai_embedding, hf_embedding_model = False, 'hkunlp/instructor-large' - got_embedding = False - if os.getenv('HARD_ASSERTS'): - # unexpected in testing or normally - raise - else: - # migration, assume defaults - use_openai_embedding, hf_embedding_model = False, "sentence-transformers/all-MiniLM-L6-v2" - got_embedding = False - assert isinstance(hf_embedding_model, str) - return got_embedding, use_openai_embedding, hf_embedding_model - - -def get_persist_directory(langchain_mode, langchain_type=None, db1s=None, dbs=None): - if langchain_mode in [LangChainMode.DISABLED.value, LangChainMode.LLM.value]: - # not None so join works but will fail to find db - return '', langchain_type - - userid = get_userid_direct(db1s) - username = get_username_direct(db1s) - - # sanity for bad code - assert userid != 'None' - assert username != 'None' - - dirid = username or userid - if langchain_type == LangChainTypes.SHARED.value and not dirid: - dirid = './' # just to avoid error - if langchain_type == LangChainTypes.PERSONAL.value and not dirid: - # e.g. from client when doing transient calls with MyData - if db1s is None: - # just trick to get filled locally - db1s = {LangChainMode.MY_DATA.value: [None, None, None]} - set_userid_direct(db1s, str(uuid.uuid4()), str(uuid.uuid4())) - userid = get_userid_direct(db1s) - username = get_username_direct(db1s) - dirid = username or userid - langchain_type = LangChainTypes.PERSONAL.value - - # deal with existing locations - user_base_dir = os.getenv('USERS_BASE_DIR', 'users') - persist_directory = os.path.join(user_base_dir, dirid, 'db_dir_%s' % langchain_mode) - if userid and \ - (os.path.isdir(persist_directory) or - db1s is not None and langchain_mode in db1s or - langchain_type == LangChainTypes.PERSONAL.value): - langchain_type = LangChainTypes.PERSONAL.value - persist_directory = makedirs(persist_directory, use_base=True) - check_persist_directory(persist_directory) - return persist_directory, langchain_type - - persist_directory = 'db_dir_%s' % langchain_mode - if (os.path.isdir(persist_directory) or - dbs is not None and langchain_mode in dbs or - langchain_type == LangChainTypes.SHARED.value): - # ensure consistent - langchain_type = LangChainTypes.SHARED.value - persist_directory = makedirs(persist_directory, use_base=True) - check_persist_directory(persist_directory) - return persist_directory, langchain_type - - # dummy return for prep_langchain() or full personal space - base_others = 'db_nonusers' - persist_directory = os.path.join(base_others, 'db_dir_%s' % str(uuid.uuid4())) - persist_directory = makedirs(persist_directory, use_base=True) - langchain_type = LangChainTypes.PERSONAL.value - - check_persist_directory(persist_directory) - return persist_directory, langchain_type - - -def check_persist_directory(persist_directory): - # deal with some cases when see intrinsic names being used as shared - for langchain_mode in langchain_modes_intrinsic: - if persist_directory == 'db_dir_%s' % langchain_mode: - raise RuntimeError("Illegal access to %s" % persist_directory) - - -def _make_db(use_openai_embedding=False, - hf_embedding_model=None, - migrate_embedding_model=False, - auto_migrate_db=False, - first_para=False, text_limit=None, - chunk=True, chunk_size=512, - - # urls - use_unstructured=True, - use_playwright=False, - use_selenium=False, - - # pdfs - use_pymupdf='auto', - use_unstructured_pdf='auto', - use_pypdf='auto', - enable_pdf_ocr='auto', - enable_pdf_doctr='auto', - try_pdf_as_html='auto', - - # images - enable_ocr=False, - enable_doctr=False, - enable_pix2struct=False, - enable_captions=True, - captions_model=None, - caption_loader=None, - doctr_loader=None, - pix2struct_loader=None, - - # json - jq_schema='.[]', - - langchain_mode=None, - langchain_mode_paths=None, - langchain_mode_types=None, - db_type='faiss', - load_db_if_exists=True, - db=None, - n_jobs=-1, - verbose=False): - assert hf_embedding_model is not None - user_path = langchain_mode_paths.get(langchain_mode) - langchain_type = langchain_mode_types.get(langchain_mode, LangChainTypes.EITHER.value) - persist_directory, langchain_type = get_persist_directory(langchain_mode, langchain_type=langchain_type) - langchain_mode_types[langchain_mode] = langchain_type - # see if can get persistent chroma db - db_trial, use_openai_embedding, hf_embedding_model = \ - get_existing_db(db, persist_directory, load_db_if_exists, db_type, - use_openai_embedding, - langchain_mode, langchain_mode_paths, langchain_mode_types, - hf_embedding_model, migrate_embedding_model, auto_migrate_db, verbose=verbose, - n_jobs=n_jobs) - if db_trial is not None: - db = db_trial - - sources = [] - if not db: - chunk_sources = functools.partial(_chunk_sources, chunk=chunk, chunk_size=chunk_size, db_type=db_type) - if langchain_mode in ['wiki_full']: - from read_wiki_full import get_all_documents - small_test = None - print("Generating new wiki", flush=True) - sources1 = get_all_documents(small_test=small_test, n_jobs=os.cpu_count() // 2) - print("Got new wiki", flush=True) - sources1 = chunk_sources(sources1, chunk=chunk) - print("Chunked new wiki", flush=True) - sources.extend(sources1) - elif langchain_mode in ['wiki']: - sources1 = get_wiki_sources(first_para=first_para, text_limit=text_limit) - sources1 = chunk_sources(sources1, chunk=chunk) - sources.extend(sources1) - elif langchain_mode in ['github h2oGPT']: - # sources = get_github_docs("dagster-io", "dagster") - sources1 = get_github_docs("h2oai", "h2ogpt") - # FIXME: always chunk for now - sources1 = chunk_sources(sources1) - sources.extend(sources1) - elif langchain_mode in ['DriverlessAI docs']: - sources1 = get_dai_docs(from_hf=True) - # FIXME: DAI docs are already chunked well, should only chunk more if over limit - sources1 = chunk_sources(sources1, chunk=False) - sources.extend(sources1) - if user_path: - # UserData or custom, which has to be from user's disk - if db is not None: - # NOTE: Ignore file names for now, only go by hash ids - # existing_files = get_existing_files(db) - existing_files = [] - existing_hash_ids = get_existing_hash_ids(db) - else: - # pretend no existing files so won't filter - existing_files = [] - existing_hash_ids = [] - # chunk internally for speed over multiple docs - # FIXME: If first had old Hash=None and switch embeddings, - # then re-embed, and then hit here and reload so have hash, and then re-embed. - sources1 = path_to_docs(user_path, n_jobs=n_jobs, chunk=chunk, chunk_size=chunk_size, - # urls - use_unstructured=use_unstructured, - use_playwright=use_playwright, - use_selenium=use_selenium, - - # pdfs - use_pymupdf=use_pymupdf, - use_unstructured_pdf=use_unstructured_pdf, - use_pypdf=use_pypdf, - enable_pdf_ocr=enable_pdf_ocr, - enable_pdf_doctr=enable_pdf_doctr, - try_pdf_as_html=try_pdf_as_html, - - # images - enable_ocr=enable_ocr, - enable_doctr=enable_doctr, - enable_pix2struct=enable_pix2struct, - enable_captions=enable_captions, - captions_model=captions_model, - caption_loader=caption_loader, - doctr_loader=doctr_loader, - pix2struct_loader=pix2struct_loader, - - # json - jq_schema=jq_schema, - - existing_files=existing_files, existing_hash_ids=existing_hash_ids, - db_type=db_type) - new_metadata_sources = set([x.metadata['source'] for x in sources1]) - if new_metadata_sources: - if os.getenv('NO_NEW_FILES') is not None: - raise RuntimeError("Expected no new files! %s" % new_metadata_sources) - print("Loaded %s new files as sources to add to %s" % (len(new_metadata_sources), langchain_mode), - flush=True) - if verbose: - print("Files added: %s" % '\n'.join(new_metadata_sources), flush=True) - sources.extend(sources1) - if len(sources) > 0 and os.getenv('NO_NEW_FILES') is not None: - raise RuntimeError("Expected no new files! %s" % langchain_mode) - if len(sources) == 0 and os.getenv('SHOULD_NEW_FILES') is not None: - raise RuntimeError("Expected new files! %s" % langchain_mode) - print("Loaded %s sources for potentially adding to %s" % (len(sources), langchain_mode), flush=True) - - # see if got sources - if not sources: - if verbose: - if db is not None: - print("langchain_mode %s has no new sources, nothing to add to db" % langchain_mode, flush=True) - else: - print("langchain_mode %s has no sources, not making new db" % langchain_mode, flush=True) - return db, 0, [] - if verbose: - if db is not None: - print("Generating db", flush=True) - else: - print("Adding to db", flush=True) - if not db: - if sources: - db = get_db(sources, use_openai_embedding=use_openai_embedding, db_type=db_type, - persist_directory=persist_directory, - langchain_mode=langchain_mode, - langchain_mode_paths=langchain_mode_paths, - langchain_mode_types=langchain_mode_types, - hf_embedding_model=hf_embedding_model, - migrate_embedding_model=migrate_embedding_model, - auto_migrate_db=auto_migrate_db, - n_jobs=n_jobs) - if verbose: - print("Generated db", flush=True) - elif langchain_mode not in langchain_modes_intrinsic: - print("Did not generate db for %s since no sources" % langchain_mode, flush=True) - new_sources_metadata = [x.metadata for x in sources] - elif user_path is not None: - print("Existing db, potentially adding %s sources from user_path=%s" % (len(sources), user_path), flush=True) - db, num_new_sources, new_sources_metadata = add_to_db(db, sources, db_type=db_type, - use_openai_embedding=use_openai_embedding, - hf_embedding_model=hf_embedding_model) - print("Existing db, added %s new sources from user_path=%s" % (num_new_sources, user_path), flush=True) - else: - new_sources_metadata = [x.metadata for x in sources] - - return db, len(new_sources_metadata), new_sources_metadata - - -def get_metadatas(db): - metadatas = [] - from langchain.vectorstores import FAISS - if isinstance(db, FAISS): - metadatas = [v.metadata for k, v in db.docstore._dict.items()] - elif isinstance(db, Chroma) or isinstance(db, ChromaMig) or ChromaMig.__name__ in str(db): - metadatas = get_documents(db)['metadatas'] - elif db is not None: - # FIXME: Hack due to https://github.com/weaviate/weaviate/issues/1947 - # seems no way to get all metadata, so need to avoid this approach for weaviate - metadatas = [x.metadata for x in db.similarity_search("", k=10000)] - return metadatas - - -def get_db_lock_file(db, lock_type='getdb'): - if hasattr(db, '_persist_directory'): - persist_directory = db._persist_directory - check_persist_directory(persist_directory) - base_path = os.path.join('locks', persist_directory) - base_path = makedirs(base_path, exist_ok=True, tmp_ok=True, use_base=True) - lock_file = os.path.join(base_path, "%s.lock" % lock_type) - makedirs(os.path.dirname(lock_file)) # ensure made - return lock_file - return None - - -def get_documents(db): - if hasattr(db, '_persist_directory'): - lock_file = get_db_lock_file(db) - with filelock.FileLock(lock_file): - # get segfaults and other errors when multiple threads access this - return _get_documents(db) - else: - return _get_documents(db) - - -def _get_documents(db): - from langchain.vectorstores import FAISS - if isinstance(db, FAISS): - documents = [v for k, v in db.docstore._dict.items()] - documents = dict(documents=documents) - elif isinstance(db, Chroma) or isinstance(db, ChromaMig) or ChromaMig.__name__ in str(db): - documents = db.get() - else: - # FIXME: Hack due to https://github.com/weaviate/weaviate/issues/1947 - # seems no way to get all metadata, so need to avoid this approach for weaviate - documents = [x for x in db.similarity_search("", k=10000)] - documents = dict(documents=documents) - return documents - - -def get_docs_and_meta(db, top_k_docs, filter_kwargs={}, text_context_list=None): - if hasattr(db, '_persist_directory'): - lock_file = get_db_lock_file(db) - with filelock.FileLock(lock_file): - return _get_docs_and_meta(db, top_k_docs, filter_kwargs=filter_kwargs, text_context_list=text_context_list) - else: - return _get_docs_and_meta(db, top_k_docs, filter_kwargs=filter_kwargs, text_context_list=text_context_list) - - -def _get_docs_and_meta(db, top_k_docs, filter_kwargs={}, text_context_list=None): - db_documents = [] - db_metadatas = [] - - if text_context_list: - db_documents += [x.page_content if hasattr(x, 'page_content') else x for x in text_context_list] - db_metadatas += [x.metadata if hasattr(x, 'metadata') else {} for x in text_context_list] - - from langchain.vectorstores import FAISS - if isinstance(db, Chroma) or isinstance(db, ChromaMig) or ChromaMig.__name__ in str(db): - db_get = db._collection.get(where=filter_kwargs.get('filter')) - db_metadatas += db_get['metadatas'] - db_documents += db_get['documents'] - elif isinstance(db, FAISS): - import itertools - db_metadatas += get_metadatas(db) - # FIXME: FAISS has no filter - if top_k_docs == -1: - db_documents += list(db.docstore._dict.values()) - else: - # slice dict first - db_documents += list(dict(itertools.islice(db.docstore._dict.items(), top_k_docs)).values()) - elif db is not None: - db_metadatas += get_metadatas(db) - db_documents += get_documents(db)['documents'] - - return db_documents, db_metadatas - - -def get_existing_files(db): - metadatas = get_metadatas(db) - metadata_sources = set([x['source'] for x in metadatas]) - return metadata_sources - - -def get_existing_hash_ids(db): - metadatas = get_metadatas(db) - # assume consistency, that any prior hashed source was single hashed file at the time among all source chunks - metadata_hash_ids = {os.path.normpath(x['source']): x.get('hashid') for x in metadatas} - return metadata_hash_ids - - -def run_qa_db(**kwargs): - func_names = list(inspect.signature(_run_qa_db).parameters) - # hard-coded defaults - kwargs['answer_with_sources'] = kwargs.get('answer_with_sources', True) - kwargs['show_rank'] = kwargs.get('show_rank', False) - kwargs['show_accordions'] = kwargs.get('show_accordions', True) - kwargs['show_link_in_sources'] = kwargs.get('show_link_in_sources', True) - kwargs['top_k_docs_max_show'] = kwargs.get('top_k_docs_max_show', 10) - kwargs['llamacpp_dict'] = {} # shouldn't be required unless from test using _run_qa_db - missing_kwargs = [x for x in func_names if x not in kwargs] - assert not missing_kwargs, "Missing kwargs for run_qa_db: %s" % missing_kwargs - # only keep actual used - kwargs = {k: v for k, v in kwargs.items() if k in func_names} - try: - return _run_qa_db(**kwargs) - finally: - clear_torch_cache() - - -def _run_qa_db(query=None, - iinput=None, - context=None, - use_openai_model=False, use_openai_embedding=False, - first_para=False, text_limit=None, top_k_docs=4, chunk=True, chunk_size=512, - - # urls - use_unstructured=True, - use_playwright=False, - use_selenium=False, - - # pdfs - use_pymupdf='auto', - use_unstructured_pdf='auto', - use_pypdf='auto', - enable_pdf_ocr='auto', - enable_pdf_doctr='auto', - try_pdf_as_html='auto', - - # images - enable_ocr=False, - enable_doctr=False, - enable_pix2struct=False, - enable_captions=True, - captions_model=None, - caption_loader=None, - doctr_loader=None, - pix2struct_loader=None, - - # json - jq_schema='.[]', - - langchain_mode_paths={}, - langchain_mode_types={}, - detect_user_path_changes_every_query=False, - db_type=None, - model_name=None, model=None, tokenizer=None, inference_server=None, - langchain_only_model=False, - hf_embedding_model=None, - migrate_embedding_model=False, - auto_migrate_db=False, - stream_output=False, - async_output=True, - num_async=3, - prompter=None, - prompt_type=None, - prompt_dict=None, - answer_with_sources=True, - append_sources_to_answer=True, - cut_distance=1.64, - add_chat_history_to_context=True, - add_search_to_context=False, - keep_sources_in_context=False, - memory_restriction_level=0, - system_prompt='', - sanitize_bot_response=False, - show_rank=False, - show_accordions=True, - show_link_in_sources=True, - top_k_docs_max_show=10, - use_llm_if_no_docs=True, - load_db_if_exists=False, - db=None, - do_sample=False, - temperature=0.1, - top_k=40, - top_p=0.7, - num_beams=1, - max_new_tokens=512, - min_new_tokens=1, - early_stopping=False, - max_time=180, - repetition_penalty=1.0, - num_return_sequences=1, - langchain_mode=None, - langchain_action=None, - langchain_agents=None, - document_subset=DocumentSubset.Relevant.name, - document_choice=[DocumentChoice.ALL.value], - pre_prompt_query=None, - prompt_query=None, - pre_prompt_summary=None, - prompt_summary=None, - text_context_list=None, - chat_conversation=None, - visible_models=None, - h2ogpt_key=None, - docs_ordering_type='reverse_ucurve_sort', - min_max_new_tokens=256, - - n_jobs=-1, - llamacpp_dict=None, - verbose=False, - cli=False, - lora_weights='', - auto_reduce_chunks=True, - max_chunks=100, - total_tokens_for_docs=None, - headsize=50, - ): - """ - - :param query: - :param use_openai_model: - :param use_openai_embedding: - :param first_para: - :param text_limit: - :param top_k_docs: - :param chunk: - :param chunk_size: - :param langchain_mode_paths: dict of langchain_mode -> user path to glob recursively from - :param db_type: 'faiss' for in-memory - 'chroma' (for chroma >= 0.4) - 'chroma_old' (for chroma < 0.4) - 'weaviate' for persisted on disk - :param model_name: model name, used to switch behaviors - :param model: pre-initialized model, else will make new one - :param tokenizer: pre-initialized tokenizer, else will make new one. Required not None if model is not None - :param answer_with_sources - :return: - """ - t_run = time.time() - if stream_output: - # threads and asyncio don't mix - async_output = False - if langchain_action in [LangChainAction.QUERY.value]: - # only summarization supported - async_output = False - - # in case None, e.g. lazy client, then set based upon actual model - pre_prompt_query, prompt_query, pre_prompt_summary, prompt_summary = \ - get_langchain_prompts(pre_prompt_query, prompt_query, - pre_prompt_summary, prompt_summary, - model_name, inference_server, - llamacpp_dict.get('model_path_llama')) - - assert db_type is not None - assert hf_embedding_model is not None - assert langchain_mode_paths is not None - assert langchain_mode_types is not None - if model is not None: - assert model_name is not None # require so can make decisions - assert query is not None - assert prompter is not None or prompt_type is not None or model is None # if model is None, then will generate - if prompter is not None: - prompt_type = prompter.prompt_type - prompt_dict = prompter.prompt_dict - if model is not None: - assert prompt_type is not None - if prompt_type == PromptType.custom.name: - assert prompt_dict is not None # should at least be {} or '' - else: - prompt_dict = '' - - if LangChainAgent.SEARCH.value in langchain_agents and 'llama' in model_name.lower(): - system_prompt = """You are a zero shot react agent. -Consider to prompt of Question that was original query from the user. -Respond to prompt of Thought with a thought that may lead to a reasonable new action choice. -Respond to prompt of Action with an action to take out of the tools given, giving exactly single word for the tool name. -Respond to prompt of Action Input with an input to give the tool. -Consider to prompt of Observation that was response from the tool. -Repeat this Thought, Action, Action Input, Observation, Thought sequence several times with new and different thoughts and actions each time, do not repeat. -Once satisfied that the thoughts, responses are sufficient to answer the question, then respond to prompt of Thought with: I now know the final answer -Respond to prompt of Final Answer with your final high-quality bullet list answer to the original query. -""" - prompter.system_prompt = system_prompt - - assert len(set(gen_hyper).difference(inspect.signature(get_llm).parameters)) == 0 - # pass in context to LLM directly, since already has prompt_type structure - # can't pass through langchain in get_chain() to LLM: https://github.com/hwchase17/langchain/issues/6638 - llm, model_name, streamer, prompt_type_out, async_output, only_new_text = \ - get_llm(use_openai_model=use_openai_model, model_name=model_name, - model=model, - tokenizer=tokenizer, - inference_server=inference_server, - langchain_only_model=langchain_only_model, - stream_output=stream_output, - async_output=async_output, - num_async=num_async, - do_sample=do_sample, - temperature=temperature, - top_k=top_k, - top_p=top_p, - num_beams=num_beams, - max_new_tokens=max_new_tokens, - min_new_tokens=min_new_tokens, - early_stopping=early_stopping, - max_time=max_time, - repetition_penalty=repetition_penalty, - num_return_sequences=num_return_sequences, - prompt_type=prompt_type, - prompt_dict=prompt_dict, - prompter=prompter, - context=context, - iinput=iinput, - sanitize_bot_response=sanitize_bot_response, - system_prompt=system_prompt, - visible_models=visible_models, - h2ogpt_key=h2ogpt_key, - min_max_new_tokens=min_max_new_tokens, - n_jobs=n_jobs, - llamacpp_dict=llamacpp_dict, - cli=cli, - verbose=verbose, - ) - # in case change, override original prompter - if hasattr(llm, 'prompter'): - prompter = llm.prompter - if hasattr(llm, 'pipeline') and hasattr(llm.pipeline, 'prompter'): - prompter = llm.pipeline.prompter - - if prompter is None: - if prompt_type is None: - prompt_type = prompt_type_out - # get prompter - chat = True # FIXME? - prompter = Prompter(prompt_type, prompt_dict, debug=False, chat=chat, stream_output=stream_output, - system_prompt=system_prompt) - - use_docs_planned = False - scores = [] - chain = None - - # basic version of prompt without docs etc. - data_point = dict(context=context, instruction=query, input=iinput) - prompt_basic = prompter.generate_prompt(data_point) - - if isinstance(document_choice, str): - # support string as well - document_choice = [document_choice] - - func_names = list(inspect.signature(get_chain).parameters) - sim_kwargs = {k: v for k, v in locals().items() if k in func_names} - missing_kwargs = [x for x in func_names if x not in sim_kwargs] - assert not missing_kwargs, "Missing: %s" % missing_kwargs - docs, chain, scores, \ - use_docs_planned, num_docs_before_cut, \ - use_llm_if_no_docs, llm_mode, top_k_docs_max_show = \ - get_chain(**sim_kwargs) - if document_subset in non_query_commands: - formatted_doc_chunks = '\n\n'.join([get_url(x) + '\n\n' + x.page_content for x in docs]) - if not formatted_doc_chunks and not use_llm_if_no_docs: - yield dict(prompt=prompt_basic, response="No sources", sources='', num_prompt_tokens=0) - return - # if no souces, outside gpt_langchain, LLM will be used with '' input - scores = [1] * len(docs) - get_answer_args = tuple([query, docs, formatted_doc_chunks, scores, show_rank, - answer_with_sources, - append_sources_to_answer]) - get_answer_kwargs = dict(show_accordions=show_accordions, - show_link_in_sources=show_link_in_sources, - top_k_docs_max_show=top_k_docs_max_show, - docs_ordering_type=docs_ordering_type, - num_docs_before_cut=num_docs_before_cut, - verbose=verbose) - ret, extra = get_sources_answer(*get_answer_args, **get_answer_kwargs) - yield dict(prompt=prompt_basic, response=formatted_doc_chunks, sources=extra, num_prompt_tokens=0) - return - if not use_llm_if_no_docs: - if not docs and langchain_action in [LangChainAction.SUMMARIZE_MAP.value, - LangChainAction.SUMMARIZE_ALL.value, - LangChainAction.SUMMARIZE_REFINE.value]: - ret = 'No relevant documents to summarize.' if num_docs_before_cut else 'No documents to summarize.' - extra = '' - yield dict(prompt=prompt_basic, response=ret, sources=extra, num_prompt_tokens=0) - return - if not docs and not llm_mode: - ret = 'No relevant documents to query (for chatting with LLM, pick Resources->Collections->LLM).' if num_docs_before_cut else 'No documents to query (for chatting with LLM, pick Resources->Collections->LLM).' - extra = '' - yield dict(prompt=prompt_basic, response=ret, sources=extra, num_prompt_tokens=0) - return - - if chain is None and not langchain_only_model: - # here if no docs at all and not HF type - # can only return if HF type - return - - # context stuff similar to used in evaluate() - import torch - device, torch_dtype, context_class = get_device_dtype() - conditional_type = hasattr(llm, 'pipeline') and hasattr(llm.pipeline, 'model') and hasattr(llm.pipeline.model, - 'conditional_type') and llm.pipeline.model.conditional_type - with torch.no_grad(): - have_lora_weights = lora_weights not in [no_lora_str, '', None] - context_class_cast = NullContext if device == 'cpu' or have_lora_weights else torch.autocast - if conditional_type: - # issues when casting to float16, can mess up t5 model, e.g. only when not streaming, or other odd behaviors - context_class_cast = NullContext - with context_class_cast(device): - if stream_output and streamer: - answer = None - import queue - bucket = queue.Queue() - thread = EThread(target=chain, streamer=streamer, bucket=bucket) - thread.start() - outputs = "" - try: - for new_text in streamer: - # print("new_text: %s" % new_text, flush=True) - if bucket.qsize() > 0 or thread.exc: - thread.join() - outputs += new_text - if prompter: # and False: # FIXME: pipeline can already use prompter - if conditional_type: - if prompter.botstr: - prompt = prompter.botstr - output_with_prompt = prompt + outputs - only_new_text = False - else: - prompt = None - output_with_prompt = outputs - only_new_text = True - else: - prompt = None # FIXME - output_with_prompt = outputs - # don't specify only_new_text here, use get_llm() value - output1 = prompter.get_response(output_with_prompt, prompt=prompt, - only_new_text=only_new_text, - sanitize_bot_response=sanitize_bot_response) - yield dict(prompt=prompt, response=output1, sources='', num_prompt_tokens=0) - else: - yield dict(prompt=prompt, response=outputs, sources='', num_prompt_tokens=0) - except BaseException: - # if any exception, raise that exception if was from thread, first - if thread.exc: - raise thread.exc - raise - finally: - # in case no exception and didn't join with thread yet, then join - if not thread.exc: - answer = thread.join() - if isinstance(answer, dict): - if 'output_text' in answer: - answer = answer['output_text'] - elif 'output' in answer: - answer = answer['output'] - # in case raise StopIteration or broke queue loop in streamer, but still have exception - if thread.exc: - raise thread.exc - else: - if async_output: - import asyncio - answer = asyncio.run(chain()) - else: - answer = chain() - if isinstance(answer, dict): - if 'output_text' in answer: - answer = answer['output_text'] - elif 'output' in answer: - answer = answer['output'] - - get_answer_args = tuple([query, docs, answer, scores, show_rank, - answer_with_sources, - append_sources_to_answer]) - get_answer_kwargs = dict(show_accordions=show_accordions, - show_link_in_sources=show_link_in_sources, - top_k_docs_max_show=top_k_docs_max_show, - docs_ordering_type=docs_ordering_type, - num_docs_before_cut=num_docs_before_cut, - verbose=verbose, - t_run=t_run, - count_input_tokens=llm.count_input_tokens - if hasattr(llm, 'count_input_tokens') else None, - count_output_tokens=llm.count_output_tokens - if hasattr(llm, 'count_output_tokens') else None) - - t_run = time.time() - t_run - - # for final yield, get real prompt used - if hasattr(llm, 'prompter') and llm.prompter.prompt is not None: - prompt = llm.prompter.prompt - else: - prompt = prompt_basic - num_prompt_tokens = get_token_count(prompt, tokenizer) - - if not use_docs_planned: - ret = answer - extra = '' - yield dict(prompt=prompt, response=ret, sources=extra, num_prompt_tokens=num_prompt_tokens) - elif answer is not None: - ret, extra = get_sources_answer(*get_answer_args, **get_answer_kwargs) - yield dict(prompt=prompt, response=ret, sources=extra, num_prompt_tokens=num_prompt_tokens) - return - - -def get_docs_with_score(query, k_db, filter_kwargs, db, db_type, text_context_list=None, verbose=False): - docs_with_score = [] - got_db_docs = False - - if text_context_list: - docs_with_score += [(x, x.metadata.get('score', 1.0)) for x in text_context_list] - - # deal with bug in chroma where if (say) 234 doc chunks and ask for 233+ then fails due to reduction misbehavior - if hasattr(db, '_embedding_function') and isinstance(db._embedding_function, FakeEmbeddings): - top_k_docs = -1 - # don't add text_context_list twice - db_documents, db_metadatas = get_docs_and_meta(db, top_k_docs, filter_kwargs=filter_kwargs, - text_context_list=None) - # sort by order given to parser (file_id) and any chunk_id if chunked - doc_file_ids = [x.get('file_id', 0) for x in db_metadatas] - doc_chunk_ids = [x.get('chunk_id', 0) for x in db_metadatas] - docs_with_score_fake = [(Document(page_content=result[0], metadata=result[1] or {}), 1.0) - for result in zip(db_documents, db_metadatas)] - docs_with_score_fake = [x for fx, cx, x in - sorted(zip(doc_file_ids, doc_chunk_ids, docs_with_score_fake), - key=lambda x: (x[0], x[1])) - ] - got_db_docs |= len(docs_with_score_fake) > 0 - docs_with_score += docs_with_score_fake - elif db is not None and db_type in ['chroma', 'chroma_old']: - while True: - try: - docs_with_score_chroma = db.similarity_search_with_score(query, k=k_db, **filter_kwargs) - break - except (RuntimeError, AttributeError) as e: - # AttributeError is for people with wrong version of langchain - if verbose: - print("chroma bug: %s" % str(e), flush=True) - if k_db == 1: - raise - if k_db > 500: - k_db -= 200 - elif k_db > 100: - k_db -= 50 - elif k_db > 10: - k_db -= 5 - else: - k_db -= 1 - k_db = max(1, k_db) - got_db_docs |= len(docs_with_score_chroma) > 0 - docs_with_score += docs_with_score_chroma - elif db is not None: - docs_with_score_other = db.similarity_search_with_score(query, k=k_db, **filter_kwargs) - got_db_docs |= len(docs_with_score_other) > 0 - docs_with_score += docs_with_score_other - - # set in metadata original order of docs - [x[0].metadata.update(orig_index=ii) for ii, x in enumerate(docs_with_score)] - - return docs_with_score, got_db_docs - - -def get_chain(query=None, - iinput=None, - context=None, # FIXME: https://github.com/hwchase17/langchain/issues/6638 - use_openai_model=False, use_openai_embedding=False, - first_para=False, text_limit=None, top_k_docs=4, chunk=True, chunk_size=512, - - # urls - use_unstructured=True, - use_playwright=False, - use_selenium=False, - - # pdfs - use_pymupdf='auto', - use_unstructured_pdf='auto', - use_pypdf='auto', - enable_pdf_ocr='auto', - enable_pdf_doctr='auto', - try_pdf_as_html='auto', - - # images - enable_ocr=False, - enable_doctr=False, - enable_pix2struct=False, - enable_captions=True, - captions_model=None, - caption_loader=None, - doctr_loader=None, - pix2struct_loader=None, - - # json - jq_schema='.[]', - - langchain_mode_paths=None, - langchain_mode_types=None, - detect_user_path_changes_every_query=False, - db_type='faiss', - model_name=None, - inference_server='', - max_new_tokens=None, - langchain_only_model=False, - hf_embedding_model=None, - migrate_embedding_model=False, - auto_migrate_db=False, - prompter=None, - prompt_type=None, - prompt_dict=None, - system_prompt=None, - cut_distance=1.1, - add_chat_history_to_context=True, # FIXME: https://github.com/hwchase17/langchain/issues/6638 - add_search_to_context=False, - keep_sources_in_context=False, - memory_restriction_level=0, - top_k_docs_max_show=10, - - load_db_if_exists=False, - db=None, - langchain_mode=None, - langchain_action=None, - langchain_agents=None, - document_subset=DocumentSubset.Relevant.name, - document_choice=[DocumentChoice.ALL.value], - pre_prompt_query=None, - prompt_query=None, - pre_prompt_summary=None, - prompt_summary=None, - text_context_list=None, - chat_conversation=None, - - n_jobs=-1, - # beyond run_db_query: - llm=None, - tokenizer=None, - verbose=False, - docs_ordering_type='reverse_ucurve_sort', - min_max_new_tokens=256, - stream_output=True, - async_output=True, - - # local - auto_reduce_chunks=True, - max_chunks=100, - total_tokens_for_docs=None, - use_llm_if_no_docs=None, - headsize=50, - ): - if inference_server is None: - inference_server = '' - assert hf_embedding_model is not None - assert langchain_agents is not None # should be at least [] - if text_context_list is None: - text_context_list = [] - - # default value: - llm_mode = langchain_mode in ['Disabled', 'LLM'] and len(text_context_list) == 0 - query_action = langchain_action == LangChainAction.QUERY.value - summarize_action = langchain_action in [LangChainAction.SUMMARIZE_MAP.value, - LangChainAction.SUMMARIZE_ALL.value, - LangChainAction.SUMMARIZE_REFINE.value] - - if len(text_context_list) > 0: - # turn into documents to make easy to manage and add meta - # try to account for summarization vs. query - chunk_id = 0 if query_action else -1 - text_context_list = [ - Document(page_content=x, metadata=dict(source='text_context_list', score=1.0, chunk_id=chunk_id)) for x - in text_context_list] - - if add_search_to_context: - params = { - "engine": "duckduckgo", - "gl": "us", - "hl": "en", - } - search = H2OSerpAPIWrapper(params=params) - # if doing search, allow more docs - docs_search, top_k_docs = search.get_search_documents(query, - query_action=query_action, - chunk=chunk, chunk_size=chunk_size, - db_type=db_type, - headsize=headsize, - top_k_docs=top_k_docs) - text_context_list = docs_search + text_context_list - add_search_to_context &= len(docs_search) > 0 - top_k_docs_max_show = max(top_k_docs_max_show, len(docs_search)) - - if len(text_context_list) > 0: - llm_mode = False - use_llm_if_no_docs = True - - from src.output_parser import H2OMRKLOutputParser - from langchain.agents import AgentType, load_tools, initialize_agent, create_vectorstore_agent, \ - create_pandas_dataframe_agent, create_json_agent, create_csv_agent - from langchain.agents.agent_toolkits import VectorStoreInfo, VectorStoreToolkit, create_python_agent, JsonToolkit - if LangChainAgent.SEARCH.value in langchain_agents: - output_parser = H2OMRKLOutputParser() - tools = load_tools(["serpapi"], llm=llm, serpapi_api_key=os.environ.get('SERPAPI_API_KEY')) - if inference_server.startswith('openai'): - agent_type = AgentType.OPENAI_FUNCTIONS - agent_executor_kwargs = {"handle_parsing_errors": True, 'output_parser': output_parser} - else: - agent_type = AgentType.ZERO_SHOT_REACT_DESCRIPTION - agent_executor_kwargs = {'output_parser': output_parser} - chain = initialize_agent(tools, llm, agent=agent_type, - agent_executor_kwargs=agent_executor_kwargs, - agent_kwargs=dict(output_parser=output_parser, - format_instructions=output_parser.get_format_instructions()), - output_parser=output_parser, - max_iterations=10, - verbose=True) - chain_kwargs = dict(input=query) - target = wrapped_partial(chain, chain_kwargs) - - docs = [] - scores = [] - use_docs_planned = False - num_docs_before_cut = 0 - use_llm_if_no_docs = True - return docs, target, scores, use_docs_planned, num_docs_before_cut, use_llm_if_no_docs, llm_mode, top_k_docs_max_show - - if LangChainAgent.COLLECTION.value in langchain_agents: - output_parser = H2OMRKLOutputParser() - vectorstore_info = VectorStoreInfo( - name=langchain_mode, - description="DataBase of text from PDFs, Image Captions, or web URL content", - vectorstore=db, - ) - toolkit = VectorStoreToolkit(vectorstore_info=vectorstore_info) - chain = create_vectorstore_agent(llm=llm, toolkit=toolkit, - agent_executor_kwargs=dict(output_parser=output_parser), - verbose=True) - - chain_kwargs = dict(input=query) - target = wrapped_partial(chain, chain_kwargs) - - docs = [] - scores = [] - use_docs_planned = False - num_docs_before_cut = 0 - use_llm_if_no_docs = True - return docs, target, scores, use_docs_planned, num_docs_before_cut, use_llm_if_no_docs, llm_mode, top_k_docs_max_show - - if LangChainAgent.PYTHON.value in langchain_agents and inference_server.startswith('openai'): - chain = create_python_agent( - llm=llm, - tool=PythonREPLTool(), - verbose=True, - agent_type=AgentType.OPENAI_FUNCTIONS, - agent_executor_kwargs={"handle_parsing_errors": True}, - ) - - chain_kwargs = dict(input=query) - target = wrapped_partial(chain, chain_kwargs) - - docs = [] - scores = [] - use_docs_planned = False - num_docs_before_cut = 0 - use_llm_if_no_docs = True - return docs, target, scores, use_docs_planned, num_docs_before_cut, use_llm_if_no_docs, llm_mode, top_k_docs_max_show - - if LangChainAgent.PANDAS.value in langchain_agents and inference_server.startswith('openai_chat'): - # FIXME: DATA - df = pd.DataFrame(None) - chain = create_pandas_dataframe_agent( - llm, - df, - verbose=True, - agent_type=AgentType.OPENAI_FUNCTIONS, - ) - - chain_kwargs = dict(input=query) - target = wrapped_partial(chain, chain_kwargs) - - docs = [] - scores = [] - use_docs_planned = False - num_docs_before_cut = 0 - use_llm_if_no_docs = True - return docs, target, scores, use_docs_planned, num_docs_before_cut, use_llm_if_no_docs, llm_mode, top_k_docs_max_show - - if isinstance(document_choice, str): - document_choice = [document_choice] - if document_choice and document_choice[0] == DocumentChoice.ALL.value: - document_choice_agent = document_choice[1:] - else: - document_choice_agent = document_choice - document_choice_agent = [x for x in document_choice_agent if x.endswith('.json')] - if LangChainAgent.JSON.value in \ - langchain_agents and \ - inference_server.startswith('openai_chat') and \ - len(document_choice_agent) == 1 and \ - document_choice_agent[0].endswith('.json'): - # with open('src/openai.yaml') as f: - # data = yaml.load(f, Loader=yaml.FullLoader) - with open(document_choice[0], 'rt') as f: - data = json.loads(f.read()) - json_spec = JsonSpec(dict_=data, max_value_length=4000) - json_toolkit = JsonToolkit(spec=json_spec) - - chain = create_json_agent( - llm=llm, toolkit=json_toolkit, verbose=True - ) - - chain_kwargs = dict(input=query) - target = wrapped_partial(chain, chain_kwargs) - - docs = [] - scores = [] - use_docs_planned = False - num_docs_before_cut = 0 - use_llm_if_no_docs = True - return docs, target, scores, use_docs_planned, num_docs_before_cut, use_llm_if_no_docs, llm_mode, top_k_docs_max_show - - if isinstance(document_choice, str): - document_choice = [document_choice] - if document_choice and document_choice[0] == DocumentChoice.ALL.value: - document_choice_agent = document_choice[1:] - else: - document_choice_agent = document_choice - document_choice_agent = [x for x in document_choice_agent if x.endswith('.csv')] - if LangChainAgent.CSV.value in langchain_agents and len(document_choice_agent) == 1 and document_choice_agent[ - 0].endswith( - '.csv'): - data_file = document_choice[0] - if inference_server.startswith('openai_chat'): - chain = create_csv_agent( - llm, - data_file, - verbose=True, - agent_type=AgentType.OPENAI_FUNCTIONS, - ) - else: - chain = create_csv_agent( - llm, - data_file, - verbose=True, - agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, - ) - chain_kwargs = dict(input=query) - target = wrapped_partial(chain, chain_kwargs) - - docs = [] - scores = [] - use_docs_planned = False - num_docs_before_cut = 0 - use_llm_if_no_docs = True - return docs, target, scores, use_docs_planned, num_docs_before_cut, use_llm_if_no_docs, llm_mode, top_k_docs_max_show - - # determine whether use of context out of docs is planned - if not use_openai_model and prompt_type not in ['plain'] or langchain_only_model: - if llm_mode: - use_docs_planned = False - else: - use_docs_planned = True - else: - use_docs_planned = True - - # https://github.com/hwchase17/langchain/issues/1946 - # FIXME: Seems to way to get size of chroma db to limit top_k_docs to avoid - # Chroma collection MyData contains fewer than 4 elements. - # type logger error - if top_k_docs == -1: - k_db = 1000 if db_type in ['chroma', 'chroma_old'] else 100 - else: - # top_k_docs=100 works ok too - k_db = 1000 if db_type in ['chroma', 'chroma_old'] else top_k_docs - - # FIXME: For All just go over all dbs instead of a separate db for All - if not detect_user_path_changes_every_query and db is not None: - # avoid looking at user_path during similarity search db handling, - # if already have db and not updating from user_path every query - # but if db is None, no db yet loaded (e.g. from prep), so allow user_path to be whatever it was - if langchain_mode_paths is None: - langchain_mode_paths = {} - langchain_mode_paths = langchain_mode_paths.copy() - langchain_mode_paths[langchain_mode] = None - # once use_openai_embedding, hf_embedding_model passed in, possibly changed, - # but that's ok as not used below or in calling functions - db, num_new_sources, new_sources_metadata = make_db(use_openai_embedding=use_openai_embedding, - hf_embedding_model=hf_embedding_model, - migrate_embedding_model=migrate_embedding_model, - auto_migrate_db=auto_migrate_db, - first_para=first_para, text_limit=text_limit, - chunk=chunk, chunk_size=chunk_size, - - # urls - use_unstructured=use_unstructured, - use_playwright=use_playwright, - use_selenium=use_selenium, - - # pdfs - use_pymupdf=use_pymupdf, - use_unstructured_pdf=use_unstructured_pdf, - use_pypdf=use_pypdf, - enable_pdf_ocr=enable_pdf_ocr, - enable_pdf_doctr=enable_pdf_doctr, - try_pdf_as_html=try_pdf_as_html, - - # images - enable_ocr=enable_ocr, - enable_doctr=enable_doctr, - enable_pix2struct=enable_pix2struct, - enable_captions=enable_captions, - captions_model=captions_model, - caption_loader=caption_loader, - doctr_loader=doctr_loader, - pix2struct_loader=pix2struct_loader, - - # json - jq_schema=jq_schema, - - langchain_mode=langchain_mode, - langchain_mode_paths=langchain_mode_paths, - langchain_mode_types=langchain_mode_types, - db_type=db_type, - load_db_if_exists=load_db_if_exists, - db=db, - n_jobs=n_jobs, - verbose=verbose) - num_docs_before_cut = 0 - use_template = not use_openai_model and prompt_type not in ['plain'] or langchain_only_model - got_db_docs = False # not yet at least - template, template_if_no_docs, auto_reduce_chunks, query = \ - get_template(query, iinput, - pre_prompt_query, prompt_query, - pre_prompt_summary, prompt_summary, - langchain_action, - llm_mode, - use_docs_planned, - auto_reduce_chunks, - got_db_docs, - add_search_to_context) - - max_input_tokens = get_max_input_tokens(llm=llm, tokenizer=tokenizer, inference_server=inference_server, - model_name=model_name, max_new_tokens=max_new_tokens) - - if (db or text_context_list) and use_docs_planned: - if hasattr(db, '_persist_directory'): - lock_file = get_db_lock_file(db, lock_type='sim') - else: - base_path = 'locks' - base_path = makedirs(base_path, exist_ok=True, tmp_ok=True, use_base=True) - name_path = "sim.lock" - lock_file = os.path.join(base_path, name_path) - - if not (isinstance(db, Chroma) or isinstance(db, ChromaMig) or ChromaMig.__name__ in str(db)): - # only chroma supports filtering - filter_kwargs = {} - filter_kwargs_backup = {} - else: - import logging - logging.getLogger("chromadb").setLevel(logging.ERROR) - assert document_choice is not None, "Document choice was None" - if isinstance(db, Chroma): - filter_kwargs_backup = {} # shouldn't ever need backup - # chroma >= 0.4 - if len(document_choice) == 0 or len(document_choice) >= 1 and document_choice[ - 0] == DocumentChoice.ALL.value: - filter_kwargs = {"filter": {"chunk_id": {"$gte": 0}}} if query_action else \ - {"filter": {"chunk_id": {"$eq": -1}}} - else: - if document_choice[0] == DocumentChoice.ALL.value: - document_choice = document_choice[1:] - if len(document_choice) == 0: - filter_kwargs = {} - elif len(document_choice) > 1: - or_filter = [ - {"$and": [dict(source={"$eq": x}), dict(chunk_id={"$gte": 0})]} if query_action else { - "$and": [dict(source={"$eq": x}), dict(chunk_id={"$eq": -1})]} - for x in document_choice] - filter_kwargs = dict(filter={"$or": or_filter}) - else: - # still chromadb UX bug, have to do different thing for 1 vs. 2+ docs when doing filter - one_filter = \ - [{"source": {"$eq": x}, "chunk_id": {"$gte": 0}} if query_action else { - "source": {"$eq": x}, - "chunk_id": { - "$eq": -1}} - for x in document_choice][0] - - filter_kwargs = dict(filter={"$and": [dict(source=one_filter['source']), - dict(chunk_id=one_filter['chunk_id'])]}) - else: - # migration for chroma < 0.4 - if len(document_choice) == 0 or len(document_choice) >= 1 and document_choice[ - 0] == DocumentChoice.ALL.value: - filter_kwargs = {"filter": {"chunk_id": {"$gte": 0}}} if query_action else \ - {"filter": {"chunk_id": {"$eq": -1}}} - filter_kwargs_backup = {"filter": {"chunk_id": {"$gte": 0}}} - elif len(document_choice) >= 2: - if document_choice[0] == DocumentChoice.ALL.value: - document_choice = document_choice[1:] - or_filter = [ - {"source": {"$eq": x}, "chunk_id": {"$gte": 0}} if query_action else {"source": {"$eq": x}, - "chunk_id": { - "$eq": -1}} - for x in document_choice] - filter_kwargs = dict(filter={"$or": or_filter}) - or_filter_backup = [ - {"source": {"$eq": x}} if query_action else {"source": {"$eq": x}} - for x in document_choice] - filter_kwargs_backup = dict(filter={"$or": or_filter_backup}) - elif len(document_choice) == 1: - # degenerate UX bug in chroma - one_filter = \ - [{"source": {"$eq": x}, "chunk_id": {"$gte": 0}} if query_action else {"source": {"$eq": x}, - "chunk_id": { - "$eq": -1}} - for x in document_choice][0] - filter_kwargs = dict(filter=one_filter) - one_filter_backup = \ - [{"source": {"$eq": x}} if query_action else {"source": {"$eq": x}} - for x in document_choice][0] - filter_kwargs_backup = dict(filter=one_filter_backup) - else: - # shouldn't reach - filter_kwargs = {} - filter_kwargs_backup = {} - - if llm_mode: - docs = [] - scores = [] - elif document_subset == DocumentSubset.TopKSources.name or query in [None, '', '\n']: - db_documents, db_metadatas = get_docs_and_meta(db, top_k_docs, filter_kwargs=filter_kwargs, - text_context_list=text_context_list) - if len(db_documents) == 0 and filter_kwargs_backup: - db_documents, db_metadatas = get_docs_and_meta(db, top_k_docs, filter_kwargs=filter_kwargs_backup, - text_context_list=text_context_list) - - if top_k_docs == -1: - top_k_docs = len(db_documents) - # similar to langchain's chroma's _results_to_docs_and_scores - docs_with_score = [(Document(page_content=result[0], metadata=result[1] or {}), 0) - for result in zip(db_documents, db_metadatas)] - # set in metadata original order of docs - [x[0].metadata.update(orig_index=ii) for ii, x in enumerate(docs_with_score)] - - # order documents - doc_hashes = [x.get('doc_hash', 'None') for x in db_metadatas] - if query_action: - doc_chunk_ids = [x.get('chunk_id', 0) for x in db_metadatas] - docs_with_score2 = [x for hx, cx, x in - sorted(zip(doc_hashes, doc_chunk_ids, docs_with_score), key=lambda x: (x[0], x[1])) - if cx >= 0] - else: - assert summarize_action - doc_chunk_ids = [x.get('chunk_id', -1) for x in db_metadatas] - docs_with_score2 = [x for hx, cx, x in - sorted(zip(doc_hashes, doc_chunk_ids, docs_with_score), key=lambda x: (x[0], x[1])) - if cx == -1 - ] - if len(docs_with_score2) == 0 and len(docs_with_score) > 0: - # old database without chunk_id, migration added 0 but didn't make -1 as that would be expensive - # just do again and relax filter, let summarize operate on actual chunks if nothing else - docs_with_score2 = [x for hx, cx, x in - sorted(zip(doc_hashes, doc_chunk_ids, docs_with_score), - key=lambda x: (x[0], x[1])) - ] - docs_with_score = docs_with_score2 - - docs_with_score = docs_with_score[:top_k_docs] - docs = [x[0] for x in docs_with_score] - scores = [x[1] for x in docs_with_score] - num_docs_before_cut = len(docs) - else: - with filelock.FileLock(lock_file): - docs_with_score, got_db_docs = get_docs_with_score(query, k_db, filter_kwargs, db, db_type, - text_context_list=text_context_list, - verbose=verbose) - if len(docs_with_score) == 0 and filter_kwargs_backup: - docs_with_score, got_db_docs = get_docs_with_score(query, k_db, filter_kwargs_backup, db, - db_type, - text_context_list=text_context_list, - verbose=verbose) - - tokenizer = get_tokenizer(db=db, llm=llm, tokenizer=tokenizer, inference_server=inference_server, - use_openai_model=use_openai_model, - db_type=db_type) - # NOTE: if map_reduce, then no need to auto reduce chunks - if query_action and (top_k_docs == -1 or auto_reduce_chunks): - top_k_docs_tokenize = 100 - docs_with_score = docs_with_score[:top_k_docs_tokenize] - - prompt_no_docs = template.format(context='', question=query) - - model_max_length = tokenizer.model_max_length - chat = True # FIXME? - - # first docs_with_score are most important with highest score - full_prompt, \ - instruction, iinput, context, \ - num_prompt_tokens, max_new_tokens, \ - num_prompt_tokens0, num_prompt_tokens_actual, \ - chat_index, top_k_docs_trial, one_doc_size = \ - get_limited_prompt(prompt_no_docs, - iinput, - tokenizer, - prompter=prompter, - inference_server=inference_server, - prompt_type=prompt_type, - prompt_dict=prompt_dict, - chat=chat, - max_new_tokens=max_new_tokens, - system_prompt=system_prompt, - context=context, - chat_conversation=chat_conversation, - text_context_list=[x[0].page_content for x in docs_with_score], - keep_sources_in_context=keep_sources_in_context, - model_max_length=model_max_length, - memory_restriction_level=memory_restriction_level, - langchain_mode=langchain_mode, - add_chat_history_to_context=add_chat_history_to_context, - min_max_new_tokens=min_max_new_tokens, - ) - # avoid craziness - if 0 < top_k_docs_trial < max_chunks: - # avoid craziness - if top_k_docs == -1: - top_k_docs = top_k_docs_trial - else: - top_k_docs = min(top_k_docs, top_k_docs_trial) - elif top_k_docs_trial >= max_chunks: - top_k_docs = max_chunks - if top_k_docs > 0: - docs_with_score = docs_with_score[:top_k_docs] - elif one_doc_size is not None: - docs_with_score = [docs_with_score[0][:one_doc_size]] - else: - docs_with_score = [] - else: - if total_tokens_for_docs is not None: - # used to limit tokens for summarization, e.g. public instance - top_k_docs, one_doc_size, num_doc_tokens = \ - get_docs_tokens(tokenizer, - text_context_list=[x[0].page_content for x in docs_with_score], - max_input_tokens=total_tokens_for_docs) - - docs_with_score = docs_with_score[:top_k_docs] - - # put most relevant chunks closest to question, - # esp. if truncation occurs will be "oldest" or "farthest from response" text that is truncated - # BUT: for small models, e.g. 6_9 pythia, if sees some stuff related to h2oGPT first, it can connect that and not listen to rest - if docs_ordering_type in ['best_first']: - pass - elif docs_ordering_type in ['best_near_prompt', 'reverse_sort']: - docs_with_score.reverse() - elif docs_ordering_type in ['', None, 'reverse_ucurve_sort']: - docs_with_score = reverse_ucurve_list(docs_with_score) - else: - raise ValueError("No such docs_ordering_type=%s" % docs_ordering_type) - - # cut off so no high distance docs/sources considered - num_docs_before_cut = len(docs_with_score) - docs = [x[0] for x in docs_with_score if x[1] < cut_distance] - scores = [x[1] for x in docs_with_score if x[1] < cut_distance] - if len(scores) > 0 and verbose: - print("Distance: min: %s max: %s mean: %s median: %s" % - (scores[0], scores[-1], np.mean(scores), np.median(scores)), flush=True) - else: - docs = [] - scores = [] - - if not docs and use_docs_planned and not langchain_only_model: - # if HF type and have no docs, can bail out - return docs, None, [], False, num_docs_before_cut, use_llm_if_no_docs, llm_mode, top_k_docs_max_show - - if document_subset in non_query_commands: - # no LLM use - return docs, None, [], False, num_docs_before_cut, use_llm_if_no_docs, llm_mode, top_k_docs_max_show - - # FIXME: WIP - common_words_file = "data/NGSL_1.2_stats.csv.zip" - if False and os.path.isfile(common_words_file) and langchain_action == LangChainAction.QUERY.value: - df = pd.read_csv("data/NGSL_1.2_stats.csv.zip") - import string - reduced_query = query.translate(str.maketrans(string.punctuation, ' ' * len(string.punctuation))).strip() - reduced_query_words = reduced_query.split(' ') - set_common = set(df['Lemma'].values.tolist()) - num_common = len([x.lower() in set_common for x in reduced_query_words]) - frac_common = num_common / len(reduced_query) if reduced_query else 0 - # FIXME: report to user bad query that uses too many common words - if verbose: - print("frac_common: %s" % frac_common, flush=True) - - if len(docs) == 0: - # avoid context == in prompt then - use_docs_planned = False - template = template_if_no_docs - - got_db_docs = got_db_docs and len(text_context_list) < len(docs) - # update template in case situation changed or did get docs - # then no new documents from database or not used, redo template - # got template earlier as estimate of template token size, here is final used version - template, template_if_no_docs, auto_reduce_chunks, query = \ - get_template(query, iinput, - pre_prompt_query, prompt_query, - pre_prompt_summary, prompt_summary, - langchain_action, - llm_mode, - use_docs_planned, - auto_reduce_chunks, - got_db_docs, - add_search_to_context) - - if langchain_action == LangChainAction.QUERY.value: - if use_template: - # instruct-like, rather than few-shot prompt_type='plain' as default - # but then sources confuse the model with how inserted among rest of text, so avoid - prompt = PromptTemplate( - # input_variables=["summaries", "question"], - input_variables=["context", "question"], - template=template, - ) - chain = load_qa_chain(llm, prompt=prompt, verbose=verbose) - else: - # only if use_openai_model = True, unused normally except in testing - chain = load_qa_with_sources_chain(llm) - if not use_docs_planned: - chain_kwargs = dict(input_documents=[], question=query) - else: - chain_kwargs = dict(input_documents=docs, question=query) - target = wrapped_partial(chain, chain_kwargs) - elif langchain_action in [LangChainAction.SUMMARIZE_MAP.value, - LangChainAction.SUMMARIZE_REFINE, - LangChainAction.SUMMARIZE_ALL.value]: - if async_output: - return_intermediate_steps = False - else: - return_intermediate_steps = True - from langchain.chains.summarize import load_summarize_chain - if langchain_action == LangChainAction.SUMMARIZE_MAP.value: - prompt = PromptTemplate(input_variables=["text"], template=template) - chain = load_summarize_chain(llm, chain_type="map_reduce", - map_prompt=prompt, combine_prompt=prompt, - return_intermediate_steps=return_intermediate_steps, - token_max=max_input_tokens, verbose=verbose) - if async_output: - chain_func = chain.arun - else: - chain_func = chain - target = wrapped_partial(chain_func, {"input_documents": docs}) # , return_only_outputs=True) - elif langchain_action == LangChainAction.SUMMARIZE_ALL.value: - assert use_template - prompt = PromptTemplate(input_variables=["text"], template=template) - chain = load_summarize_chain(llm, chain_type="stuff", prompt=prompt, - return_intermediate_steps=return_intermediate_steps, verbose=verbose) - if async_output: - chain_func = chain.arun - else: - chain_func = chain - target = wrapped_partial(chain_func) - elif langchain_action == LangChainAction.SUMMARIZE_REFINE.value: - chain = load_summarize_chain(llm, chain_type="refine", - return_intermediate_steps=return_intermediate_steps, verbose=verbose) - if async_output: - chain_func = chain.arun - else: - chain_func = chain - target = wrapped_partial(chain_func) - else: - raise RuntimeError("No such langchain_action=%s" % langchain_action) - else: - raise RuntimeError("No such langchain_action=%s" % langchain_action) - - return docs, target, scores, use_docs_planned, num_docs_before_cut, use_llm_if_no_docs, llm_mode, top_k_docs_max_show - - -def get_max_model_length(llm=None, tokenizer=None, inference_server=None, model_name=None): - if hasattr(tokenizer, 'model_max_length'): - return tokenizer.model_max_length - elif inference_server in ['openai', 'openai_azure']: - return llm.modelname_to_contextsize(model_name) - elif inference_server in ['openai_chat', 'openai_azure_chat']: - return model_token_mapping[model_name] - elif isinstance(tokenizer, FakeTokenizer): - # GGML - return tokenizer.model_max_length - else: - return 2048 - - -def get_max_input_tokens(llm=None, tokenizer=None, inference_server=None, model_name=None, max_new_tokens=None): - model_max_length = get_max_model_length(llm=llm, tokenizer=tokenizer, inference_server=inference_server, - model_name=model_name) - - if any([inference_server.startswith(x) for x in - ['openai', 'openai_azure', 'openai_chat', 'openai_azure_chat', 'vllm']]): - # openai can't handle tokens + max_new_tokens > max_tokens even if never generate those tokens - # and vllm uses OpenAI API with same limits - max_input_tokens = model_max_length - max_new_tokens - elif isinstance(tokenizer, FakeTokenizer): - # don't trust that fake tokenizer (e.g. GGML) will make lots of tokens normally, allow more input - max_input_tokens = model_max_length - min(256, max_new_tokens) - else: - if 'falcon' in model_name or inference_server.startswith('http'): - # allow for more input for falcon, assume won't make as long outputs as default max_new_tokens - # Also allow if TGI or Gradio, because we tell it input may be same as output, even if model can't actually handle - max_input_tokens = model_max_length - min(256, max_new_tokens) - else: - # trust that maybe model will make so many tokens, so limit input - max_input_tokens = model_max_length - max_new_tokens - - return max_input_tokens - - -def get_tokenizer(db=None, llm=None, tokenizer=None, inference_server=None, use_openai_model=False, - db_type='chroma'): - if hasattr(llm, 'pipeline') and hasattr(llm.pipeline, 'tokenizer'): - # more accurate - return llm.pipeline.tokenizer - elif hasattr(llm, 'tokenizer'): - # e.g. TGI client mode etc. - return llm.tokenizer - elif inference_server in ['openai', 'openai_chat', 'openai_azure', - 'openai_azure_chat']: - return tokenizer - elif isinstance(tokenizer, FakeTokenizer): - return tokenizer - elif use_openai_model: - return FakeTokenizer() - elif (hasattr(db, '_embedding_function') and - hasattr(db._embedding_function, 'client') and - hasattr(db._embedding_function.client, 'tokenize')): - # in case model is not our pipeline with HF tokenizer - return db._embedding_function.client.tokenize - else: - # backup method - if os.getenv('HARD_ASSERTS'): - assert db_type in ['faiss', 'weaviate'] - # use tiktoken for faiss since embedding called differently - return FakeTokenizer() - - -def get_template(query, iinput, - pre_prompt_query, prompt_query, - pre_prompt_summary, prompt_summary, - langchain_action, - llm_mode, - use_docs_planned, - auto_reduce_chunks, - got_db_docs, - add_search_to_context): - if got_db_docs and add_search_to_context: - # modify prompts, assumes patterns like in predefined prompts. If user customizes, then they'd need to account for that. - prompt_query = prompt_query.replace('information in the document sources', - 'information in the document and web search sources (and their source dates and website source)') - prompt_summary = prompt_summary.replace('information in the document sources', - 'information in the document and web search sources (and their source dates and website source)') - elif got_db_docs and not add_search_to_context: - pass - elif not got_db_docs and add_search_to_context: - # modify prompts, assumes patterns like in predefined prompts. If user customizes, then they'd need to account for that. - prompt_query = prompt_query.replace('information in the document sources', - 'information in the web search sources (and their source dates and website source)') - prompt_summary = prompt_summary.replace('information in the document sources', - 'information in the web search sources (and their source dates and website source)') - - if langchain_action == LangChainAction.QUERY.value: - if iinput: - query = "%s\n%s" % (query, iinput) - if llm_mode or not use_docs_planned: - template_if_no_docs = template = """{context}{question}""" - else: - template = """%s -\"\"\" -{context} -\"\"\" -%s{question}""" % (pre_prompt_query, prompt_query) - template_if_no_docs = """{context}{question}""" - elif langchain_action in [LangChainAction.SUMMARIZE_ALL.value, LangChainAction.SUMMARIZE_MAP.value]: - none = ['', '\n', None] - - # modify prompt_summary if user passes query or iinput - if query not in none and iinput not in none: - prompt_summary = "Focusing on %s, %s, %s" % (query, iinput, prompt_summary) - elif query not in none: - prompt_summary = "Focusing on %s, %s" % (query, prompt_summary) - # don't auto reduce - auto_reduce_chunks = False - if langchain_action == LangChainAction.SUMMARIZE_MAP.value: - fstring = '{text}' - else: - fstring = '{input_documents}' - template = """%s: -\"\"\" -%s -\"\"\"\n%s""" % (pre_prompt_summary, fstring, prompt_summary) - template_if_no_docs = "Exactly only say: There are no documents to summarize." - elif langchain_action in [LangChainAction.SUMMARIZE_REFINE]: - template = '' # unused - template_if_no_docs = '' # unused - else: - raise RuntimeError("No such langchain_action=%s" % langchain_action) - - return template, template_if_no_docs, auto_reduce_chunks, query - - -def get_sources_answer(query, docs, answer, scores, show_rank, - answer_with_sources, append_sources_to_answer, - show_accordions=True, - show_link_in_sources=True, - top_k_docs_max_show=10, - docs_ordering_type='reverse_ucurve_sort', - num_docs_before_cut=0, - verbose=False, - t_run=None, - count_input_tokens=None, count_output_tokens=None): - if verbose: - print("query: %s" % query, flush=True) - print("answer: %s" % answer, flush=True) - - if len(docs) == 0: - extra = '' - ret = answer + extra - return ret, extra - - if answer_with_sources == -1: - extra = [dict(score=score, content=get_doc(x), source=get_source(x), orig_index=x.metadata.get('orig_index', 0)) - for score, x in zip(scores, docs)][ - :top_k_docs_max_show] - if append_sources_to_answer: - extra_str = [str(x) for x in extra] - ret = answer + '\n\n' + '\n'.join(extra_str) - else: - ret = answer - return ret, extra - - # link - answer_sources = [(max(0.0, 1.5 - score) / 1.5, - get_url(doc, font_size=font_size), - get_accordion(doc, font_size=font_size, head_acc=head_acc)) for score, doc in - zip(scores, docs)] - if not show_accordions: - answer_sources_dict = defaultdict(list) - [answer_sources_dict[url].append(score) for score, url in answer_sources] - answers_dict = {} - for url, scores_url in answer_sources_dict.items(): - answers_dict[url] = np.max(scores_url) - answer_sources = [(score, url) for url, score in answers_dict.items()] - answer_sources.sort(key=lambda x: x[0], reverse=True) - if show_rank: - # answer_sources = ['%d | %s' % (1 + rank, url) for rank, (score, url) in enumerate(answer_sources)] - # sorted_sources_urls = "Sources [Rank | Link]:
      " + "
      ".join(answer_sources) - answer_sources = ['%s' % url for rank, (score, url) in enumerate(answer_sources)] - answer_sources = answer_sources[:top_k_docs_max_show] - sorted_sources_urls = "Ranked Sources:
      " + "
      ".join(answer_sources) - else: - if show_accordions: - if show_link_in_sources: - answer_sources = ['
    9. %.2g | %s
    10. %s
      ' % (font_size, score, url, accordion) - for score, url, accordion in answer_sources] - else: - answer_sources = ['
    11. %.2g
    12. %s
      ' % (font_size, score, accordion) - for score, url, accordion in answer_sources] - else: - if show_link_in_sources: - answer_sources = ['
    13. %.2g | %s
    14. ' % (font_size, score, url) - for score, url in answer_sources] - else: - answer_sources = ['
    15. %.2g
    16. ' % (font_size, score) - for score, url in answer_sources] - answer_sources = answer_sources[:top_k_docs_max_show] - if show_accordions: - sorted_sources_urls = f"{source_prefix}
        " + "".join(answer_sources) - else: - sorted_sources_urls = f"{source_prefix}

          " + "

          ".join( - answer_sources) - if verbose: - if int(t_run): - sorted_sources_urls += 'Total Time: %d [s]

          ' % t_run - if count_input_tokens and count_output_tokens: - sorted_sources_urls += 'Input Tokens: %s | Output Tokens: %d

          ' % ( - count_input_tokens, count_output_tokens) - sorted_sources_urls += f"

        {source_postfix}" - title_overall = "Sources" - sorted_sources_urls = f"""
        {title_overall}{sorted_sources_urls}
        """ - if os.getenv("HARD_ASSERTS"): - assert sorted_sources_urls.startswith(super_source_prefix) - assert sorted_sources_urls.endswith(super_source_postfix) - - if not answer.endswith('\n'): - answer += '\n' - - if answer_with_sources: - extra = '\n' + sorted_sources_urls - else: - extra = '' - if append_sources_to_answer: - ret = answer + extra - else: - ret = answer - return ret, extra - - -def get_any_db(db1s, langchain_mode, langchain_mode_paths, langchain_mode_types, - dbs=None, - load_db_if_exists=None, db_type=None, - use_openai_embedding=None, - hf_embedding_model=None, migrate_embedding_model=None, auto_migrate_db=None, - for_sources_list=False, - verbose=False, - n_jobs=-1, - ): - if langchain_mode in [LangChainMode.DISABLED.value, LangChainMode.LLM.value]: - return None - elif for_sources_list and langchain_mode in [LangChainMode.WIKI_FULL.value]: - # NOTE: avoid showing full wiki. Takes about 30 seconds over about 90k entries, but not useful for now - return None - elif langchain_mode in db1s and len(db1s[langchain_mode]) > 1 and db1s[langchain_mode][0]: - return db1s[langchain_mode][0] - elif dbs is not None and langchain_mode in dbs and dbs[langchain_mode] is not None: - return dbs[langchain_mode] - else: - db = None - - if db is None: - langchain_type = langchain_mode_types.get(langchain_mode, LangChainTypes.EITHER.value) - persist_directory, langchain_type = get_persist_directory(langchain_mode, db1s=db1s, dbs=dbs, - langchain_type=langchain_type) - langchain_mode_types[langchain_mode] = langchain_type - # see if actually have on disk, don't try to switch embedding yet, since can't use return here - migrate_embedding_model = False - db, _, _ = \ - get_existing_db(db, persist_directory, load_db_if_exists, db_type, - use_openai_embedding, - langchain_mode, langchain_mode_paths, langchain_mode_types, - hf_embedding_model, migrate_embedding_model, auto_migrate_db, - verbose=verbose, n_jobs=n_jobs) - if db is not None: - # if found db, then stuff into state, so don't have to reload again that takes time - if langchain_type == LangChainTypes.PERSONAL.value: - assert isinstance(db1s, dict), "db1s wrong type: %s" % type(db1s) - db1 = db1s[langchain_mode] = [db, None, None] - assert len(db1) == length_db1(), "Bad setup: %s" % len(db1) - set_dbid(db1) - else: - assert isinstance(dbs, dict), "dbs wrong type: %s" % type(dbs) - dbs[langchain_mode] = db - - return db - - -def get_sources(db1s, selection_docs_state1, requests_state1, langchain_mode, - dbs=None, docs_state0=None, - load_db_if_exists=None, - db_type=None, - use_openai_embedding=None, - hf_embedding_model=None, - migrate_embedding_model=None, - auto_migrate_db=None, - verbose=False, - get_userid_auth=None, - n_jobs=-1, - ): - for k in db1s: - set_dbid(db1s[k]) - langchain_mode_paths = selection_docs_state1['langchain_mode_paths'] - langchain_mode_types = selection_docs_state1['langchain_mode_types'] - set_userid(db1s, requests_state1, get_userid_auth) - db = get_any_db(db1s, langchain_mode, langchain_mode_paths, langchain_mode_types, - dbs=dbs, - load_db_if_exists=load_db_if_exists, - db_type=db_type, - use_openai_embedding=use_openai_embedding, - hf_embedding_model=hf_embedding_model, - migrate_embedding_model=migrate_embedding_model, - auto_migrate_db=auto_migrate_db, - for_sources_list=True, - verbose=verbose, - n_jobs=n_jobs, - ) - - if langchain_mode in ['LLM'] or db is None: - source_files_added = "NA" - source_list = [] - num_chunks = 0 - elif langchain_mode in ['wiki_full']: - source_files_added = "Not showing wiki_full, takes about 20 seconds and makes 4MB file." \ - " Ask jon.mckinney@h2o.ai for file if required." - source_list = [] - num_chunks = 0 - elif db is not None: - metadatas = get_metadatas(db) - source_list = sorted(set([x['source'] for x in metadatas])) - source_files_added = '\n'.join(source_list) - num_chunks = len(metadatas) - else: - source_list = [] - source_files_added = "None" - num_chunks = 0 - sources_dir = "sources_dir" - sources_dir = makedirs(sources_dir, exist_ok=True, tmp_ok=True, use_base=True) - sources_file = os.path.join(sources_dir, 'sources_%s_%s' % (langchain_mode, str(uuid.uuid4()))) - with open(sources_file, "wt") as f: - f.write(source_files_added) - source_list = docs_state0 + source_list - if DocumentChoice.ALL.value in source_list: - source_list.remove(DocumentChoice.ALL.value) - return sources_file, source_list, num_chunks, db - - -def update_user_db(file, db1s, selection_docs_state1, requests_state1, - langchain_mode=None, - get_userid_auth=None, - **kwargs): - kwargs.update(selection_docs_state1) - set_userid(db1s, requests_state1, get_userid_auth) - - if file is None: - raise RuntimeError("Don't use change, use input") - - try: - return _update_user_db(file, db1s=db1s, - langchain_mode=langchain_mode, - **kwargs) - except BaseException as e: - print(traceback.format_exc(), flush=True) - # gradio has issues if except, so fail semi-gracefully, else would hang forever in processing textbox - ex_str = "Exception: %s" % str(e) - source_files_added = """\ - - -

        - Sources:
        -

        -
        - {0} -
        - - - """.format(ex_str) - doc_exception_text = str(e) - return None, langchain_mode, source_files_added, doc_exception_text, None - finally: - clear_torch_cache() - - -def get_lock_file(db1, langchain_mode): - db_id = get_dbid(db1) - base_path = 'locks' - base_path = makedirs(base_path, exist_ok=True, tmp_ok=True, use_base=True) - # don't allow db_id to be '' or None, would be bug and lock up everything - if not db_id: - if os.getenv('HARD_ASSERTS'): - raise ValueError("Invalid access for langchain_mode=%s" % langchain_mode) - db_id = str(uuid.uuid4()) - lock_file = os.path.join(base_path, "db_%s_%s.lock" % (langchain_mode.replace(' ', '_').replace('/', '_'), db_id)) - makedirs(os.path.dirname(lock_file)) # ensure really made - return lock_file - - -def _update_user_db(file, - db1s=None, - langchain_mode='UserData', - chunk=None, chunk_size=None, - - # urls - use_unstructured=True, - use_playwright=False, - use_selenium=False, - - # pdfs - use_pymupdf='auto', - use_unstructured_pdf='auto', - use_pypdf='auto', - enable_pdf_ocr='auto', - enable_pdf_doctr='auto', - try_pdf_as_html='auto', - - # images - enable_ocr=False, - enable_doctr=False, - enable_pix2struct=False, - enable_captions=True, - captions_model=None, - caption_loader=None, - doctr_loader=None, - pix2struct_loader=None, - - # json - jq_schema='.[]', - - dbs=None, db_type=None, - langchain_modes=None, - langchain_mode_paths=None, - langchain_mode_types=None, - use_openai_embedding=None, - hf_embedding_model=None, - migrate_embedding_model=None, - auto_migrate_db=None, - verbose=None, - n_jobs=-1, - is_url=None, is_txt=None, - ): - assert db1s is not None - assert chunk is not None - assert chunk_size is not None - assert use_openai_embedding is not None - assert hf_embedding_model is not None - assert migrate_embedding_model is not None - assert auto_migrate_db is not None - assert caption_loader is not None - assert doctr_loader is not None - assert enable_captions is not None - assert captions_model is not None - assert enable_ocr is not None - assert enable_doctr is not None - assert enable_pdf_ocr is not None - assert enable_pdf_doctr is not None - assert enable_pix2struct is not None - assert verbose is not None - - if dbs is None: - dbs = {} - assert isinstance(dbs, dict), "Wrong type for dbs: %s" % str(type(dbs)) - # handle case of list of temp buffer - if isinstance(file, str) and file.strip().startswith('['): - try: - file = ast.literal_eval(file.strip()) - except Exception as e: - print("Tried to parse %s as list but failed: %s" % (file, str(e)), flush=True) - if isinstance(file, list) and len(file) > 0 and hasattr(file[0], 'name'): - file = [x.name for x in file] - # handle single file of temp buffer - if hasattr(file, 'name'): - file = file.name - if not isinstance(file, (list, tuple, typing.Generator)) and isinstance(file, str): - file = [file] - - if langchain_mode == LangChainMode.DISABLED.value: - return None, langchain_mode, get_source_files(), "", None - - if langchain_mode in [LangChainMode.LLM.value]: - # then switch to MyData, so langchain_mode also becomes way to select where upload goes - # but default to mydata if nothing chosen, since safest - if LangChainMode.MY_DATA.value in langchain_modes: - langchain_mode = LangChainMode.MY_DATA.value - elif len(langchain_modes) >= 1: - langchain_mode = langchain_modes[0] - else: - return None, langchain_mode, get_source_files(), "", None - - if langchain_mode_paths is None: - langchain_mode_paths = {} - user_path = langchain_mode_paths.get(langchain_mode) - # UserData or custom, which has to be from user's disk - if user_path is not None: - # move temp files from gradio upload to stable location - for fili, fil in enumerate(file): - if isinstance(fil, str) and os.path.isfile(fil): # not url, text - new_fil = os.path.normpath(os.path.join(user_path, os.path.basename(fil))) - if os.path.normpath(os.path.abspath(fil)) != os.path.normpath(os.path.abspath(new_fil)): - if os.path.isfile(new_fil): - remove(new_fil) - try: - if os.path.dirname(new_fil): - makedirs(os.path.dirname(new_fil)) - shutil.move(fil, new_fil) - except FileExistsError: - pass - file[fili] = new_fil - - if verbose: - print("Adding %s" % file, flush=True) - - # FIXME: could avoid even parsing, let alone embedding, same old files if upload same file again - # FIXME: but assume nominally user isn't uploading all files over again from UI - - if is_txt and hf_embedding_model == 'fake': - # avoid parallel if fake embedding since assume trivial ingestion - n_jobs = 1 - - sources = path_to_docs(file if not is_url and not is_txt else None, - verbose=verbose, - fail_any_exception=False, - n_jobs=n_jobs, - chunk=chunk, chunk_size=chunk_size, - url=file if is_url else None, - text=file if is_txt else None, - - # urls - use_unstructured=use_unstructured, - use_playwright=use_playwright, - use_selenium=use_selenium, - - # pdfs - use_pymupdf=use_pymupdf, - use_unstructured_pdf=use_unstructured_pdf, - use_pypdf=use_pypdf, - enable_pdf_ocr=enable_pdf_ocr, - enable_pdf_doctr=enable_pdf_doctr, - try_pdf_as_html=try_pdf_as_html, - - # images - enable_ocr=enable_ocr, - enable_doctr=enable_doctr, - enable_pix2struct=enable_pix2struct, - enable_captions=enable_captions, - captions_model=captions_model, - caption_loader=caption_loader, - doctr_loader=doctr_loader, - pix2struct_loader=pix2struct_loader, - - # json - jq_schema=jq_schema, - - db_type=db_type, - ) - exceptions = [x for x in sources if x.metadata.get('exception')] - exceptions_strs = [x.metadata['exception'] for x in exceptions] - sources = [x for x in sources if 'exception' not in x.metadata] - - # below must at least come after langchain_mode is modified in case was LLM -> MyData, - # so original langchain mode changed - for k in db1s: - set_dbid(db1s[k]) - db1 = get_db1(db1s, langchain_mode) - - lock_file = get_lock_file(db1s[LangChainMode.MY_DATA.value], langchain_mode) # user-level lock, not db-level lock - with filelock.FileLock(lock_file): - if langchain_mode in db1s: - if db1[0] is not None: - # then add - db, num_new_sources, new_sources_metadata = add_to_db(db1[0], sources, db_type=db_type, - use_openai_embedding=use_openai_embedding, - hf_embedding_model=hf_embedding_model) - else: - # in testing expect: - # assert len(db1) == length_db1() and db1[1] is None, "Bad MyData db: %s" % db1 - # for production hit, when user gets clicky: - assert len(db1) == length_db1(), "Bad %s db: %s" % (langchain_mode, db1) - assert get_dbid(db1) is not None, "db hash was None, not allowed" - # then create - # if added has to original state and didn't change, then would be shared db for all users - langchain_type = langchain_mode_types.get(langchain_mode, LangChainTypes.EITHER.value) - persist_directory, langchain_type = get_persist_directory(langchain_mode, db1s=db1s, dbs=dbs, - langchain_type=langchain_type) - langchain_mode_types[langchain_mode] = langchain_type - db = get_db(sources, use_openai_embedding=use_openai_embedding, - db_type=db_type, - persist_directory=persist_directory, - langchain_mode=langchain_mode, - langchain_mode_paths=langchain_mode_paths, - langchain_mode_types=langchain_mode_types, - hf_embedding_model=hf_embedding_model, - migrate_embedding_model=migrate_embedding_model, - auto_migrate_db=auto_migrate_db, - n_jobs=n_jobs) - if db is not None: - db1[0] = db - source_files_added = get_source_files(db=db1[0], exceptions=exceptions) - if len(sources) > 0: - sources_last = os.path.basename(sources[-1].metadata.get('source', 'Unknown Source')) - else: - sources_last = None - return None, langchain_mode, source_files_added, '\n'.join(exceptions_strs), sources_last - else: - langchain_type = langchain_mode_types.get(langchain_mode, LangChainTypes.EITHER.value) - persist_directory, langchain_type = get_persist_directory(langchain_mode, db1s=db1s, dbs=dbs, - langchain_type=langchain_type) - langchain_mode_types[langchain_mode] = langchain_type - if langchain_mode in dbs and dbs[langchain_mode] is not None: - # then add - db, num_new_sources, new_sources_metadata = add_to_db(dbs[langchain_mode], sources, db_type=db_type, - use_openai_embedding=use_openai_embedding, - hf_embedding_model=hf_embedding_model) - else: - # then create. Or might just be that dbs is unfilled, then it will fill, then add - db = get_db(sources, use_openai_embedding=use_openai_embedding, - db_type=db_type, - persist_directory=persist_directory, - langchain_mode=langchain_mode, - langchain_mode_paths=langchain_mode_paths, - langchain_mode_types=langchain_mode_types, - hf_embedding_model=hf_embedding_model, - migrate_embedding_model=migrate_embedding_model, - auto_migrate_db=auto_migrate_db, - n_jobs=n_jobs) - dbs[langchain_mode] = db - # NOTE we do not return db, because function call always same code path - # return dbs[langchain_mode] - # db in this code path is updated in place - source_files_added = get_source_files(db=dbs[langchain_mode], exceptions=exceptions) - if len(sources) > 0: - sources_last = os.path.basename(sources[-1].metadata.get('source', 'Unknown Source')) - else: - sources_last = None - return None, langchain_mode, source_files_added, '\n'.join(exceptions_strs), sources_last - - -def get_source_files_given_langchain_mode(db1s, selection_docs_state1, requests_state1, document_choice1, - langchain_mode, - dbs=None, - load_db_if_exists=None, - db_type=None, - use_openai_embedding=None, - hf_embedding_model=None, - migrate_embedding_model=None, - auto_migrate_db=None, - verbose=False, - get_userid_auth=None, - delete_sources=False, - n_jobs=-1): - langchain_mode_paths = selection_docs_state1['langchain_mode_paths'] - langchain_mode_types = selection_docs_state1['langchain_mode_types'] - set_userid(db1s, requests_state1, get_userid_auth) - db = get_any_db(db1s, langchain_mode, langchain_mode_paths, langchain_mode_types, - dbs=dbs, - load_db_if_exists=load_db_if_exists, - db_type=db_type, - use_openai_embedding=use_openai_embedding, - hf_embedding_model=hf_embedding_model, - migrate_embedding_model=migrate_embedding_model, - auto_migrate_db=auto_migrate_db, - for_sources_list=True, - verbose=verbose, - n_jobs=n_jobs, - ) - if delete_sources: - del_from_db(db, document_choice1, db_type=db_type) - - if langchain_mode in ['LLM'] or db is None: - return "Sources: N/A" - return get_source_files(db=db, exceptions=None) - - -def get_source_files(db=None, exceptions=None, metadatas=None): - if exceptions is None: - exceptions = [] - - # only should be one source, not confused - # assert db is not None or metadatas is not None - # clicky user - if db is None and metadatas is None: - return "No Sources at all" - - if metadatas is None: - source_label = "Sources:" - if db is not None: - metadatas = get_metadatas(db) - else: - metadatas = [] - adding_new = False - else: - source_label = "New Sources:" - adding_new = True - - # below automatically de-dups - small_dict = {get_url(x['source'], from_str=True, short_name=True): get_short_name(x.get('head')) for x in - metadatas if x.get('page', 0) == 0} - # if small_dict is empty dict, that's ok - df = pd.DataFrame(small_dict.items(), columns=['source', 'head']) - df.index = df.index + 1 - df.index.name = 'index' - source_files_added = tabulate.tabulate(df, headers='keys', tablefmt='unsafehtml') - - if exceptions: - exception_metadatas = [x.metadata for x in exceptions] - small_dict = {get_url(x['source'], from_str=True, short_name=True): get_short_name(x.get('exception')) for x in - exception_metadatas} - # if small_dict is empty dict, that's ok - df = pd.DataFrame(small_dict.items(), columns=['source', 'exception']) - df.index = df.index + 1 - df.index.name = 'index' - exceptions_html = tabulate.tabulate(df, headers='keys', tablefmt='unsafehtml') - else: - exceptions_html = '' - - if metadatas and exceptions: - source_files_added = """\ - - -

        - {0}
        -

        -
        - {1} - {2} -
        - - - """.format(source_label, source_files_added, exceptions_html) - elif metadatas: - source_files_added = """\ - - -

        - {0}
        -

        -
        - {1} -
        - - - """.format(source_label, source_files_added) - elif exceptions_html: - source_files_added = """\ - - -

        - Exceptions:
        -

        -
        - {0} -
        - - - """.format(exceptions_html) - else: - if adding_new: - source_files_added = "No New Sources" - else: - source_files_added = "No Sources" - - return source_files_added - - -def update_and_get_source_files_given_langchain_mode(db1s, - selection_docs_state, - requests_state, - langchain_mode, chunk, chunk_size, - - # urls - use_unstructured=True, - use_playwright=False, - use_selenium=False, - - # pdfs - use_pymupdf='auto', - use_unstructured_pdf='auto', - use_pypdf='auto', - enable_pdf_ocr='auto', - enable_pdf_doctr='auto', - try_pdf_as_html='auto', - - # images - enable_ocr=False, - enable_doctr=False, - enable_pix2struct=False, - enable_captions=True, - captions_model=None, - caption_loader=None, - doctr_loader=None, - pix2struct_loader=None, - - # json - jq_schema='.[]', - - dbs=None, first_para=None, - hf_embedding_model=None, - use_openai_embedding=None, - migrate_embedding_model=None, - auto_migrate_db=None, - text_limit=None, - db_type=None, load_db_if_exists=None, - n_jobs=None, verbose=None, get_userid_auth=None): - set_userid(db1s, requests_state, get_userid_auth) - assert hf_embedding_model is not None - assert migrate_embedding_model is not None - assert auto_migrate_db is not None - langchain_mode_paths = selection_docs_state['langchain_mode_paths'] - langchain_mode_types = selection_docs_state['langchain_mode_types'] - has_path = {k: v for k, v in langchain_mode_paths.items() if v} - if langchain_mode in [LangChainMode.LLM.value, LangChainMode.MY_DATA.value]: - # then assume user really meant UserData, to avoid extra clicks in UI, - # since others can't be on disk, except custom user modes, which they should then select to query it - if LangChainMode.USER_DATA.value in has_path: - langchain_mode = LangChainMode.USER_DATA.value - - db = get_any_db(db1s, langchain_mode, langchain_mode_paths, langchain_mode_types, - dbs=dbs, - load_db_if_exists=load_db_if_exists, - db_type=db_type, - use_openai_embedding=use_openai_embedding, - hf_embedding_model=hf_embedding_model, - migrate_embedding_model=migrate_embedding_model, - auto_migrate_db=auto_migrate_db, - for_sources_list=True, - verbose=verbose, - n_jobs=n_jobs, - ) - - # not designed for older way of using openai embeddings, why use_openai_embedding=False - # use_openai_embedding, hf_embedding_model passed in and possible different values used, - # but no longer used here or in calling functions so ok - db, num_new_sources, new_sources_metadata = make_db(use_openai_embedding=False, - hf_embedding_model=hf_embedding_model, - migrate_embedding_model=migrate_embedding_model, - auto_migrate_db=auto_migrate_db, - first_para=first_para, text_limit=text_limit, - chunk=chunk, - chunk_size=chunk_size, - - # urls - use_unstructured=use_unstructured, - use_playwright=use_playwright, - use_selenium=use_selenium, - - # pdfs - use_pymupdf=use_pymupdf, - use_unstructured_pdf=use_unstructured_pdf, - use_pypdf=use_pypdf, - enable_pdf_ocr=enable_pdf_ocr, - enable_pdf_doctr=enable_pdf_doctr, - try_pdf_as_html=try_pdf_as_html, - - # images - enable_ocr=enable_ocr, - enable_doctr=enable_doctr, - enable_pix2struct=enable_pix2struct, - enable_captions=enable_captions, - captions_model=captions_model, - caption_loader=caption_loader, - doctr_loader=doctr_loader, - pix2struct_loader=pix2struct_loader, - - # json - jq_schema=jq_schema, - - langchain_mode=langchain_mode, - langchain_mode_paths=langchain_mode_paths, - langchain_mode_types=langchain_mode_types, - db_type=db_type, - load_db_if_exists=load_db_if_exists, - db=db, - n_jobs=n_jobs, - verbose=verbose) - # during refreshing, might have "created" new db since not in dbs[] yet, so insert back just in case - # so even if persisted, not kept up-to-date with dbs memory - if langchain_mode in db1s: - db1s[langchain_mode][0] = db - else: - dbs[langchain_mode] = db - - # return only new sources with text saying such - return get_source_files(db=None, exceptions=None, metadatas=new_sources_metadata) - - -def get_db1(db1s, langchain_mode1): - if langchain_mode1 in db1s: - db1 = db1s[langchain_mode1] - else: - # indicates to code that not personal database - db1 = [None] * length_db1() - return db1 - - -def clean_doc(docs1): - if not isinstance(docs1, (list, tuple, types.GeneratorType)): - docs1 = [docs1] - for doci, doc in enumerate(docs1): - docs1[doci].page_content = '\n'.join([x.strip() for x in doc.page_content.split("\n") if x.strip()]) - return docs1 - - -def clone_documents(documents: Iterable[Document]) -> List[Document]: - # first clone documents - new_docs = [] - for doc in documents: - new_doc = Document(page_content=doc.page_content, metadata=copy.deepcopy(doc.metadata)) - new_docs.append(new_doc) - return new_docs - - -def get_db_from_hf(dest=".", db_dir='db_dir_DriverlessAI_docs.zip'): - from huggingface_hub import hf_hub_download - # True for case when locally already logged in with correct token, so don't have to set key - token = os.getenv('HUGGING_FACE_HUB_TOKEN', True) - path_to_zip_file = hf_hub_download('h2oai/db_dirs', db_dir, token=token, repo_type='dataset') - import zipfile - with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref: - persist_directory = os.path.dirname(zip_ref.namelist()[0]) - remove(persist_directory) - zip_ref.extractall(dest) - return path_to_zip_file - - -# Note dir has space in some cases, while zip does not -some_db_zips = [['db_dir_DriverlessAI_docs.zip', 'db_dir_DriverlessAI docs', 'CC-BY-NC license'], - ['db_dir_UserData.zip', 'db_dir_UserData', 'CC-BY license for ArXiv'], - ['db_dir_github_h2oGPT.zip', 'db_dir_github h2oGPT', 'ApacheV2 license'], - ['db_dir_wiki.zip', 'db_dir_wiki', 'CC-BY-SA Wikipedia license'], - # ['db_dir_wiki_full.zip', 'db_dir_wiki_full.zip', '23GB, 05/04/2023 CC-BY-SA Wiki license'], - ] - -all_db_zips = some_db_zips + \ - [['db_dir_wiki_full.zip', 'db_dir_wiki_full.zip', '23GB, 05/04/2023 CC-BY-SA Wiki license'], - ] - - -def get_some_dbs_from_hf(dest='.', db_zips=None): - if db_zips is None: - db_zips = some_db_zips - for db_dir, dir_expected, license1 in db_zips: - path_to_zip_file = get_db_from_hf(dest=dest, db_dir=db_dir) - assert os.path.isfile(path_to_zip_file), "Missing zip in %s" % path_to_zip_file - if dir_expected: - assert os.path.isdir(os.path.join(dest, dir_expected)), "Missing path for %s" % dir_expected - assert os.path.isdir( - os.path.join(dest, dir_expected, 'index')), "Missing index in %s" % dir_expected - - -def _create_local_weaviate_client(): - WEAVIATE_URL = os.getenv('WEAVIATE_URL', "http://localhost:8080") - WEAVIATE_USERNAME = os.getenv('WEAVIATE_USERNAME') - WEAVIATE_PASSWORD = os.getenv('WEAVIATE_PASSWORD') - WEAVIATE_SCOPE = os.getenv('WEAVIATE_SCOPE', "offline_access") - - resource_owner_config = None - try: - import weaviate - from weaviate.embedded import EmbeddedOptions - if WEAVIATE_USERNAME is not None and WEAVIATE_PASSWORD is not None: - resource_owner_config = weaviate.AuthClientPassword( - username=WEAVIATE_USERNAME, - password=WEAVIATE_PASSWORD, - scope=WEAVIATE_SCOPE - ) - - # if using remote server, don't choose persistent directory - client = weaviate.Client(WEAVIATE_URL, auth_client_secret=resource_owner_config) - return client - except Exception as e: - print(f"Failed to create Weaviate client: {e}") - return None - - -if __name__ == '__main__': - pass diff --git a/spaces/awacke1/DnD-Character-Sheet2/app.py b/spaces/awacke1/DnD-Character-Sheet2/app.py deleted file mode 100644 index f84a2c979c8db06eddaade80d40dd9c565677563..0000000000000000000000000000000000000000 --- a/spaces/awacke1/DnD-Character-Sheet2/app.py +++ /dev/null @@ -1,71 +0,0 @@ -import streamlit as st -import random - -class DNDCharacter: - def __init__(self, name, char_class, level, race, background, alignment): - self.name = name - self.char_class = char_class - self.level = level - self.race = race - self.background = background - self.alignment = alignment - self.stats = { - "Strength": 0, - "Dexterity": 0, - "Wisdom": 0, - "Charisma": 0, - "Constitution": 0, - "Intelligence": 0 - } - self.hit_points = 0 - self.armor_class = 0 - self.initiative = 0 - self.speed = 0 - self.proficiencies = [] - self.equipment = [] - self.features_and_traits = [] - self.spells = [] - - def roll_stats(self): - for stat in self.stats: - self.stats[stat] = random.randint(1, 20) - - def show_stats(self): - st.write("Strength: ", self.stats["Strength"]) - st.write("Dexterity: ", self.stats["Dexterity"]) - st.write("Wisdom: ", self.stats["Wisdom"]) - st.write("Charisma: ", self.stats["Charisma"]) - st.write("Constitution: ", self.stats["Constitution"]) - st.write("Intelligence: ", self.stats["Intelligence"]) - - def show_character_sheet(self): - st.write("Name: ", self.name) - st.write("Class: ", self.char_class) - st.write("Level: ", self.level) - st.write("Race: ", self.race) - st.write("Background: ", self.background) - st.write("Alignment: ", self.alignment) - self.show_stats() - st.write("Hit Points: ", self.hit_points) - st.write("Armor Class: ", self.armor_class) - st.write("Initiative: ", self.initiative) - st.write("Speed: ", self.speed) - st.write("Proficiencies: ", self.proficiencies) - st.write("Equipment: ", self.equipment) - st.write("Features and Traits: ", self.features_and_traits) - st.write("Spells: ", self.spells) - -# Streamlit app -st.title("D&D Character Sheet") - -name = st.text_input("Name") -char_class = st.text_input("Class") -level = st.number_input("Level", min_value=1, max_value=20, value=1) -race = st.text_input("Race") -background = st.text_input("Background") -alignment = st.text_input("Alignment") - -if st.button("Roll Stats"): - character = DNDCharacter(name, char_class, level, race, background, alignment) - character.roll_stats() - character.show_character_sheet() diff --git a/spaces/awacke1/Streamlit-AI-Letter-UI/README.md b/spaces/awacke1/Streamlit-AI-Letter-UI/README.md deleted file mode 100644 index 96db44b3e3ea3b583f0a89e6f2b0068445553a51..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Streamlit-AI-Letter-UI/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Streamlit AI Letter UI -emoji: 📉MD📉 -colorFrom: pink -colorTo: pink -sdk: streamlit -sdk_version: 1.19.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/VideoFromImage/share_btn.py b/spaces/awacke1/VideoFromImage/share_btn.py deleted file mode 100644 index 52a200db1e71b0b5655bb7b61e4046baf945a224..0000000000000000000000000000000000000000 --- a/spaces/awacke1/VideoFromImage/share_btn.py +++ /dev/null @@ -1,85 +0,0 @@ -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - - async function getInputImgFile(imgEl){ - const res = await fetch(imgEl.src); - const blob = await res.blob(); - const imgId = Date.now() % 200; - const isPng = imgEl.src.startsWith(`data:image/png`); - if(isPng){ - const fileName = `sd-perception-${{imgId}}.png`; - return new File([blob], fileName, { type: 'image/png' }); - }else{ - const fileName = `sd-perception-${{imgId}}.jpg`; - return new File([blob], fileName, { type: 'image/jpeg' }); - } - } - - async function getVideoBlobFile(videoEL){ - const res = await fetch(videoEL.src); - const blob = await res.blob(); - const videoId = Date.now() % 200; - const fileName = `ms-image2video-${{videoId}}.mp4`; - const videoBlob = new File([blob], fileName, { type: 'video/mp4' }); - console.log(videoBlob); - return videoBlob; - } - - const gradioEl = document.querySelector("gradio-app").shadowRoot || document.querySelector('body > gradio-app'); - const inputImgEl = gradioEl.querySelector('#image-in img'); - const outputVideo = gradioEl.querySelector('#video-out video'); - - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - if(!outputVideo){ - return; - }; - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - - const inputFile = await getInputImgFile(inputImgEl); - const urlInputImg = await uploadFile(inputFile); - const videoOutFile = await getVideoBlobFile(outputVideo); - const dataOutputVid = await uploadFile(videoOutFile); - - const descriptionMd = ` -#### Image init: - - -#### MS Image2Video result: -${dataOutputVid} -`; - const params = new URLSearchParams({ - title: "Please provide a title :)", - description: descriptionMd, - }); - const paramsStr = params.toString(); - window.open(`https://huggingface.co/spaces/fffiloni/MS-Image2Video/discussions/new?${paramsStr}`, '_blank'); - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" \ No newline at end of file diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/vr/deprecated/GearVRController.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/vr/deprecated/GearVRController.js deleted file mode 100644 index bda42560a9d739ff7bd10b0099f7637d9e1b5b05..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/vr/deprecated/GearVRController.js +++ /dev/null @@ -1,132 +0,0 @@ -/** - * @author servinlp - */ - -THREE.GearVRController = function () { - - THREE.Object3D.call( this ); - - var scope = this; - var gamepad; - - var axes = [ 0, 0 ]; - var touchpadIsPressed = false; - var triggerIsPressed = false; - var angularVelocity = new THREE.Vector3(); - - this.matrixAutoUpdate = true; - - function findGamepad() { - - var gamepads = navigator.getGamepads && navigator.getGamepads(); - - for ( var i = 0; i < 4; i ++ ) { - - var gamepad = gamepads[ i ]; - - if ( gamepad && ( gamepad.id === 'Gear VR Controller' || gamepad.id === 'Oculus Go Controller' ) ) { - - return gamepad; - - } - - } - - } - - this.getGamepad = function () { - - return gamepad; - - }; - - this.getTouchpadState = function () { - - return touchpadIsPressed; - - }; - - this.update = function () { - - gamepad = findGamepad(); - - if ( gamepad !== undefined && gamepad.pose !== undefined ) { - - var pose = gamepad.pose; - - if ( pose === null ) return; // no user action yet - - // orientation - - if ( pose.orientation !== null ) scope.quaternion.fromArray( pose.orientation ); - - scope.updateMatrix(); - scope.visible = true; - - // angular velocity - - if ( pose.angularVelocity !== null && ! angularVelocity.equals( pose.angularVelocity ) ) { - - angularVelocity.fromArray( pose.angularVelocity ); - scope.dispatchEvent( { type: 'angularvelocitychanged', angularVelocity: angularVelocity } ); - - } - - // axes (touchpad) - - if ( axes[ 0 ] !== gamepad.axes[ 0 ] || axes[ 1 ] !== gamepad.axes[ 1 ] ) { - - axes[ 0 ] = gamepad.axes[ 0 ]; - axes[ 1 ] = gamepad.axes[ 1 ]; - scope.dispatchEvent( { type: 'axischanged', axes: axes } ); - - } - - // button (touchpad) - - if ( touchpadIsPressed !== gamepad.buttons[ 0 ].pressed ) { - - touchpadIsPressed = gamepad.buttons[ 0 ].pressed; - scope.dispatchEvent( { type: touchpadIsPressed ? 'touchpaddown' : 'touchpadup', axes: axes } ); - - } - - - // trigger - - if ( triggerIsPressed !== gamepad.buttons[ 1 ].pressed ) { - - triggerIsPressed = gamepad.buttons[ 1 ].pressed; - scope.dispatchEvent( { type: triggerIsPressed ? 'triggerdown' : 'triggerup' } ); - - } - - // app button not available, reserved for use by the browser - - } else { - - scope.visible = false; - - } - - }; - - // DEPRECATED - - this.getTouchPadState = function () { - - console.warn( 'THREE.GearVRController: getTouchPadState() is now getTouchpadState()' ); - return touchpadIsPressed; - - }; - - this.setHand = function () { - - console.warn( 'THREE.GearVRController: setHand() has been removed.' ); - - }; - -}; - -THREE.GearVRController.prototype = Object.create( THREE.Object3D.prototype ); -THREE.GearVRController.prototype.constructor = THREE.GearVRController; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/animation/tracks/NumberKeyframeTrack.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/animation/tracks/NumberKeyframeTrack.d.ts deleted file mode 100644 index 8bb54fdd9bb210544bf7910b10d46e7c3a43ff68..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/animation/tracks/NumberKeyframeTrack.d.ts +++ /dev/null @@ -1,11 +0,0 @@ -import { KeyframeTrack } from './../KeyframeTrack'; -import { InterpolationModes } from '../../constants'; - -export class NumberKeyframeTrack extends KeyframeTrack { - constructor( - name: string, - times: any[], - values: any[], - interpolation?: InterpolationModes - ); -} diff --git a/spaces/banana-projects/web3d/node_modules/three/src/animation/tracks/NumberKeyframeTrack.js b/spaces/banana-projects/web3d/node_modules/three/src/animation/tracks/NumberKeyframeTrack.js deleted file mode 100644 index f717221998c890dda785d3234705e92f3c4fa837..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/animation/tracks/NumberKeyframeTrack.js +++ /dev/null @@ -1,30 +0,0 @@ -import { KeyframeTrack } from '../KeyframeTrack.js'; - -/** - * - * A Track of numeric keyframe values. - * - * @author Ben Houston / http://clara.io/ - * @author David Sarno / http://lighthaus.us/ - * @author tschw - */ - -function NumberKeyframeTrack( name, times, values, interpolation ) { - - KeyframeTrack.call( this, name, times, values, interpolation ); - -} - -NumberKeyframeTrack.prototype = Object.assign( Object.create( KeyframeTrack.prototype ), { - - constructor: NumberKeyframeTrack, - - ValueTypeName: 'number' - - // ValueBufferType is inherited - - // DefaultInterpolation is inherited - -} ); - -export { NumberKeyframeTrack }; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/premultiplied_alpha_fragment.glsl.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/premultiplied_alpha_fragment.glsl.js deleted file mode 100644 index a183739943b958ed7fa5c51eae3ae696499444f9..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/premultiplied_alpha_fragment.glsl.js +++ /dev/null @@ -1,8 +0,0 @@ -export default /* glsl */` -#ifdef PREMULTIPLIED_ALPHA - - // Get get normal blending with premultipled, use with CustomBlending, OneFactor, OneMinusSrcAlphaFactor, AddEquation. - gl_FragColor.rgb *= gl_FragColor.a; - -#endif -`; diff --git a/spaces/betheredge/air-vibrations/spaces_info.py b/spaces/betheredge/air-vibrations/spaces_info.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/better57/CHATGPT/readme/README_ja.md b/spaces/better57/CHATGPT/readme/README_ja.md deleted file mode 100644 index fc56eec0b81c22ff0a49e3960aa52ffd7d6dc5cb..0000000000000000000000000000000000000000 --- a/spaces/better57/CHATGPT/readme/README_ja.md +++ /dev/null @@ -1,126 +0,0 @@ -
        - - 简体中文 | English | 日本語 -
        - -

        川虎 Chat 🐯 Chuanhu Chat

        -
        - - Logo - - -

        -

        ChatGPT/ChatGLM/LLaMAなどのLLMのための軽量でユーザーフレンドリーなWeb-UI

        -

        - - Tests Passing - - - GitHub Contributors - - - GitHub pull requests - -

        - ストリーム出力/会話回数無制限/履歴保存/プリセットプロンプト/ファイルへの質問チャット
        - ウェブ検索/LaTeXレンダリング/表レンダリング/コードハイライト
        - オートダークモード/アダプティブ・ウェブ・インターフェイス/WeChatライク・テーマ
        - マルチパラメーターチューニング/マルチAPI-Key対応/マルチユーザー対応
        - GPT-4対応/LLMのローカルデプロイ可能。 -

        - 動画チュートリアル - · - 2.0 イントロダクション - · - 3.0 イントロダクション & チュートリアル - || - オンライントライアル - · - ワンクリックデプロイ -

        -

        - Animation Demo -

        -

        -
        - -## 使う上でのTips - -- ChatGPTをより適切に制御するために、システムプロンプトを使用できます。 -- プロンプトテンプレートを使用するには、プロンプトテンプレートコレクションを選択し、ドロップダウンメニューから特定のプロンプトを選択。回答が不十分な場合は、`🔄再生成`ボタンを使って再試行します。 -- 入力ボックスで改行するには、Shift + Enterキーを押してください。 -- 入力履歴を素早く切り替えるには、入力ボックスで キーを押す。 -- プログラムをサーバにデプロイするには、プログラムの最終行を `demo.launch(server_name="0.0.0.0", server_port=)`に変更します。 -- 共有リンクを取得するには、プログラムの最後の行を `demo.launch(share=True)` に変更してください。なお、公開リンクでアクセスするためには、プログラムが実行されている必要があることに注意してください。 -- Hugging Face Spacesで使用する場合: より速く、より安全に利用するために、**Duplicate Space**を使用し、自分のスペースでプログラムを実行することをお勧めします。 - -## インストール - -```shell -git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git -cd ChuanhuChatGPT -pip install -r requirements.txt -``` - -次に `config_example.json`をコピーして `config.json`にリネームし、そのファイルにAPI-Keyなどの設定を記入する。 - -```shell -python ChuanhuChatbot.py -``` - -ブラウザのウィンドウが開き、ChatGPTとチャットできるようになります。 - -> **Note** -> -> 詳しい手順は[wikiページ](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程)をご確認ください。 - -## トラブルシューティング - -問題が発生した場合は、まずこのプロジェクトの最新の変更点を手動で引っ張ってみるのがよいでしょう。その手順は以下の通りです: - -1. ウェブページの `Download ZIP` をクリックして最新のコードアーカイブをダウンロードするか、または - ```shell - git pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f - ``` -2. 新しい依存関係が導入されている可能性があるため、依存関係を再度インストールしてみてください。 - ``` - pip install -r requirements.txt - ``` -3. Gradioを更新 - ``` - pip install gradio --upgrade --force-reinstall - ``` - -一般的に、以下の手順でほとんどの問題を解決することができます。 - -それでも問題が解決しない場合は、こちらのページをご参照ください: [よくある質問(FAQ)](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题) - -このページでは、考えられるほぼすべての問題点と解決策を掲載しています。よくお読みください。 - -## More Information - -より詳細な情報は、[wiki](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki) をご覧ください。: - -- [How to contribute a translation](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/Localization) -- [How to make a contribution](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/贡献指南) -- [How to cite the project](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可#如何引用该项目) -- [Project changelog](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/更新日志) -- [Project license](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可) - -## Starchart - -[![Star History Chart](https://api.star-history.com/svg?repos=GaiZhenbiao/ChuanhuChatGPT&type=Date)](https://star-history.com/#GaiZhenbiao/ChuanhuChatGPT&Date) - -## Contributors - - - - - -## Sponsor - -🐯 この企画が役に立ったら、遠慮なくコーラかコーヒーでもおごってください〜。 - -Buy Me A Coffee - -image diff --git a/spaces/bigjoker/stable-diffusion-webui/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js b/spaces/bigjoker/stable-diffusion-webui/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js deleted file mode 100644 index 4a85c8ebf25110e911a6a1021fae6a014aa11000..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js +++ /dev/null @@ -1,110 +0,0 @@ -// Stable Diffusion WebUI - Bracket checker -// Version 1.0 -// By Hingashi no Florin/Bwin4L -// Counts open and closed brackets (round, square, curly) in the prompt and negative prompt text boxes in the txt2img and img2img tabs. -// If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong. - -function checkBrackets(evt, textArea, counterElt) { - errorStringParen = '(...) - Different number of opening and closing parentheses detected.\n'; - errorStringSquare = '[...] - Different number of opening and closing square brackets detected.\n'; - errorStringCurly = '{...} - Different number of opening and closing curly brackets detected.\n'; - - openBracketRegExp = /\(/g; - closeBracketRegExp = /\)/g; - - openSquareBracketRegExp = /\[/g; - closeSquareBracketRegExp = /\]/g; - - openCurlyBracketRegExp = /\{/g; - closeCurlyBracketRegExp = /\}/g; - - totalOpenBracketMatches = 0; - totalCloseBracketMatches = 0; - totalOpenSquareBracketMatches = 0; - totalCloseSquareBracketMatches = 0; - totalOpenCurlyBracketMatches = 0; - totalCloseCurlyBracketMatches = 0; - - openBracketMatches = textArea.value.match(openBracketRegExp); - if(openBracketMatches) { - totalOpenBracketMatches = openBracketMatches.length; - } - - closeBracketMatches = textArea.value.match(closeBracketRegExp); - if(closeBracketMatches) { - totalCloseBracketMatches = closeBracketMatches.length; - } - - openSquareBracketMatches = textArea.value.match(openSquareBracketRegExp); - if(openSquareBracketMatches) { - totalOpenSquareBracketMatches = openSquareBracketMatches.length; - } - - closeSquareBracketMatches = textArea.value.match(closeSquareBracketRegExp); - if(closeSquareBracketMatches) { - totalCloseSquareBracketMatches = closeSquareBracketMatches.length; - } - - openCurlyBracketMatches = textArea.value.match(openCurlyBracketRegExp); - if(openCurlyBracketMatches) { - totalOpenCurlyBracketMatches = openCurlyBracketMatches.length; - } - - closeCurlyBracketMatches = textArea.value.match(closeCurlyBracketRegExp); - if(closeCurlyBracketMatches) { - totalCloseCurlyBracketMatches = closeCurlyBracketMatches.length; - } - - if(totalOpenBracketMatches != totalCloseBracketMatches) { - if(!counterElt.title.includes(errorStringParen)) { - counterElt.title += errorStringParen; - } - } else { - counterElt.title = counterElt.title.replace(errorStringParen, ''); - } - - if(totalOpenSquareBracketMatches != totalCloseSquareBracketMatches) { - if(!counterElt.title.includes(errorStringSquare)) { - counterElt.title += errorStringSquare; - } - } else { - counterElt.title = counterElt.title.replace(errorStringSquare, ''); - } - - if(totalOpenCurlyBracketMatches != totalCloseCurlyBracketMatches) { - if(!counterElt.title.includes(errorStringCurly)) { - counterElt.title += errorStringCurly; - } - } else { - counterElt.title = counterElt.title.replace(errorStringCurly, ''); - } - - if(counterElt.title != '') { - counterElt.classList.add('error'); - } else { - counterElt.classList.remove('error'); - } -} - -function setupBracketChecking(id_prompt, id_counter){ - var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); - var counter = gradioApp().getElementById(id_counter) - textarea.addEventListener("input", function(evt){ - checkBrackets(evt, textarea, counter) - }); -} - -var shadowRootLoaded = setInterval(function() { - var shadowRoot = document.querySelector('gradio-app').shadowRoot; - if(! shadowRoot) return false; - - var shadowTextArea = shadowRoot.querySelectorAll('#txt2img_prompt > label > textarea'); - if(shadowTextArea.length < 1) return false; - - clearInterval(shadowRootLoaded); - - setupBracketChecking('txt2img_prompt', 'txt2img_token_counter') - setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter') - setupBracketChecking('img2img_prompt', 'imgimg_token_counter') - setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter') -}, 1000); diff --git a/spaces/bioriAsaeru/text-to-voice/Antiwpa 2.0 1 Winxp 2k3 Zip.md b/spaces/bioriAsaeru/text-to-voice/Antiwpa 2.0 1 Winxp 2k3 Zip.md deleted file mode 100644 index 8f111ac2d65e550c952a6be2712572ceb27d2fa9..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Antiwpa 2.0 1 Winxp 2k3 Zip.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Antiwpa 2.0 1 Winxp 2k3 Zip


        DOWNLOAD ✺✺✺ https://urloso.com/2uyOWE



        -
        -Re: great site [1] 名前:Micheallal Moore :2006/11/07 (火) 19:23 No.4345 ... 755 cm (c3198a) by win xp.x, &esrc.s&source.web&cd.6&sqi.2&ved.0ciabebywbq&url, ... tx1320us tips, englis, c6150 compatib, dv8000t 2.0, dv9040ea enterta, 300gb, ... zip, sg3, m7747c.b core™2 e430, pilôtes integrate, provide solution.support, ... 1fdad05405
        -
        -
        -

        diff --git a/spaces/bioriAsaeru/text-to-voice/Battle Royale English Version Full Movieinstmankl The Best Scenes and Quotes from the Movie.md b/spaces/bioriAsaeru/text-to-voice/Battle Royale English Version Full Movieinstmankl The Best Scenes and Quotes from the Movie.md deleted file mode 100644 index 3da294daadab8dcf608fdcf69cacb4c789f90449..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Battle Royale English Version Full Movieinstmankl The Best Scenes and Quotes from the Movie.md +++ /dev/null @@ -1,10 +0,0 @@ -
        -

        fb6c851797 -link-half-life-1-original-game-hack-online
        -work-ease-audio-converter-4-80-serial-upd-crack
        -link-realtime-landscaping-architect-2016-for-mac
        -character-generator-2018-crack-xforce-32-fulbrsel
        -__exclusive__-link-windows-8-1-black-alien-edition-x64-2015-by-kirk-full-version
        -2011-terjemah-kitab-khozinatul-asror-pdfl
        -download-cleo-cheats-gta-sa-android-faustraq
        -commandos2destinationparisnocdcrack-richmlauda
        -great-grand-masti-hd-movie-download-in-kickass-install
        -barcode-generator-and-overprinter-v6-6-10-crack-darytad
        -premam-malayalam-movie-full-hd-download-top
        -wurth-wow-serial-keygen-narelhanse
        -aktivasi-winrar-menjadi-full-version-gratis-full
        -top-nil-battey-sannata-malayalam-movie-mp4-download
        -must-be-love-full-movie-free-download-fonzeche
        -protel-dxp-2004-for-windows-7
        -jenny-frosh-serious-moonlight-by-kami-tora-pdf-5-extra-quality
        -bp-hasdeu-perit-au-dacii-pdf-top
        -runtime-getdataback-for-fat-or-ntfs-4-22-keygen-rar-adds-1-39-extra-quality
        -poker-tournament-supervisor-2-crack-16-gauntamr
        -rar-password-crack-verifieder-batch-file
        -honeywell-care-703rar-bethger
        -wifi-password-hack-v5-download-mideafire-hanvenit
        -prezi-next-pro-1-6-3-crack-leshnocon
        -patched-internetspyhunter3-0h-by-themaster101-install
        -crack-adobe-photoshop-cc-2018-v19-1-5-x86-x64-multilingual-update-hazeselesf-briejayl
        -js-support-ticket-pro-nulled-xenforo-dawleama
        -sims2-1-rip-mdf-free-download-maksmist
        -_hot_-movie-magic-scheduling-for-mac
        -ramit-sethi-dream-job-resume-webinar-torrent-fix

        -

        fb6c851797 -wrong-turn-6-mp4-movie-free-download-full
        -cracked-adobe-cc-2018-patcher
        -vista-x64-build-5600-rc1-dvd-iso-rar
        -link-smucantikdiperkosadenganpaksa3gp
        -or-version-integrale-remasterise-mkv-sammevayle
        -dil-to-pagal-hai-1997-hindi-brrip-720p-x264-aac-5-1-hon3y-darstaddi
        -skylum-luminar-3-v3-0-1-1610-install-crack
        -cracked-sarah-mayberry-the-best-laid-plans-epub-download
        -hawx-level-40-pc-save-game
        -best-contoh-soal-tes-psikotes-bank-bri
        -_best_-loveyatri-2018-flac
        -2021-microsoft-office-2016-vl-brazilian-language-pack-x86-x64-serial-key
        -archicad-17-download-full-version-fixed
        -anytoiso-professional-v3-9-0-patch-sadgon
        -episode-4-50-tamil-dubbed-movie-torrent
        -lili-inventa-o-mundo-pdf-download-cordarin
        -artcam-2011-portugues-rar
        -wp-auctions-pro-nulled-14-top
        -bibcam-10-yo-mpg
        -hot-kunci-jawaban-lks-ekonomi-kelas-x-intan-pariwara-better
        -pt-asia-asian-pt-series-rapidshare-link-full
        -paan-singh-tomar-mp4-movie-hd-free-download-free
        -top-rated-drive-es-pcs7-v7-1-sp1-chandvalar
        -sony-vegas-6-0a-keygen-free-download-exclusive
        -__full__-paul-hardcastle-jazzmasters-smooth-cuts-full-album-zip
        -dungeons-of-dredmor-v1-0-11-2-dlc-theta-hack-pc
        -top-ap-928-inpo-pdf-download-view
        -saint-seiya-lost-canvas-bdrip-1080p-full-top-hdl
        -fsx-p3d-p3dv2-fs2crew-aerosoft-airbus-x-voice-control-v-2-2-skidrow-marghar
        -aerofly-rc-7-cracked-rib-upd

        -

        Battle Royale English Version Full Movieinstmankl


        Download File ☆☆☆ https://urloso.com/2uyQck



        -

        fb6c851797 -xforce-keygen-64-bit-autocad-architecture-2005-activation-patched
        -saraswatichandra-story-in-hindi-pdf-238-__link__
        -4k-video-downloader-4-4-8-2317-crack-keygen-license-key-fix
        -garam-full-movies-720p-top
        -top-microsoft-excel-2016-16-13-1-crack-macos-macosx
        -photoshop-cc-free-download-full-version-with-crack-mac-free
        -archivo-amtlib-dll-illustrator-cc-crack-extra-quality
        -microsoft-office-2013-professional-plus-x64-slovak-msdn-rar-gayeldarla
        -deepl-pro-1-11-0-portable-nedavbridg
        -2021-ik-multimedia-amplitube-4-complete-v4-9-0
        -cracked-tnt-village-divx-ita-bad-taste-peter-jackson-dvdrip
        -work-principles-of-power-system-by-v-k-mehta-solution-manual-126
        -amourangels-mari-creamy
        -katya-y111-topless-cstm-2007-06-13-102-pics-patched
        -36-chambers-of-shaolin-full-movie-in-hindi-720p-98-_top_
        -cukur-3-sezon-20-bolum-indir-87-bolum-1080p
        -eplan-p8-2-0-validation-codel
        -simple-scan-pro-pdf-scanner-v3-7-cracked-latest-upd
        -desi-kattey-hd-1080p-full-movie-download-full
        -_best_-m83-midnight-city-320-kbps-downloadl
        -topaz-sharpen-ai-1-4-0-x64-top
        -malwarebytes-anti-malware-serial
        -download-detective-conan-episodes-in-hindi-free-talikderi
        -wavefunction-spartan-08-v1-2-cracked-eat-full-version-cracked
        -crack-4k-video-downloader-3-4-0-1400-crack-preactivated-raimufree
        -__full__-bullett-raja-kannada-movie-free-download-hd
        -lcd-font-maker-v3-92-crackl-best
        -hotspot-shield-vpn-elite-10-22-51-multilingual-patch-rar-tamashell
        -data-mining-and-data-warehousing-by-bharat-bhushan-agarwal-sumit-prakash-tayal-rar
        -patched-portable-ssdlife-pro-v2-5-82-te-khaquig

        -

        fb6c851797 -me-365-homework-solutions-repack
        -james-bond-007-bloodstone-keygen-pc-reedelmy
        -__link__-zuken-e3-series-crack
        -diskinternals-partition-recovery-full-version-free-download-likhamil
        -_hot_-sileo-dose-in-cats
        -repack-mp4-hindi-dubbed-bin-roye-pakistani
        -tecdoc-online-free-_hot_
        -2021-flat-out-matt-free-epub-download
        -gd0184-jenni-and-kendra-growth-games-hdmp4-naetfrid
        -nfs-hot-pursuit-serial-number-for-activationl-darnorth
        -chak-de-india-movie-free-link-download-in-hindi
        -betaab-1983-flac-yolapant
        -izotope-nectar-p-s-2-00-516-x86-x64-kiwipirate-download-2021
        -the-smurfs-2011-dublat-romana-vanisfide
        -singhamreturns-new-fullmoviehddownloadutorrentfree
        -hd-fantastic-beasts-and-where-to-find-them-english-download-new
        -mico-service-information-v2-with-hot-crack-torrent
        -kitab-sirah-nabawiyah-ibnu-hisyam-pdf-download-camion-riley-punti-v-better
        -better-psp-vintage-warmer-free-download-mac
        -carol-burnett-theme-song-download-better-l
        -full-presonus-studio-one-3-professional-v3-3-4-keygen-plugins-full
        -driver-setup-ilok-64-bit-download-new
        -easy-recovery-pro-v-6-04-with-serial-number-rar-rar
        -new-serial-juego-pc-csi-la-conspiracion
        -beware-of-bios-update-0373-for-intel-nucs
        -net-monitor-for-employees-professional-4-9-1-crack-keylhel
        -laboratory-qc-software-free-exclusive-download
        -free-download-ccproxy-7-2-crack-maryjar
        -powtoon-software-crack-download-top
        -fastgsm-bcm-10042full-cracked-rar

        -

        fb6c851797 -panda-antivirus-pro-pre-activated-full-version-reagera
        -powermill-2019-crack-torrent-hot
        -natpukkaga-full-movie-hd-1080p-free
        -digital-signal-processing-books-oppenheim-pdf-free-download-zip-shadelsa
        -vector-magic-1-17-1-__full__
        -new-vero-visi-v20-0-15
        -icartech-aurora-2-update-zip-garrwalda
        -hot-download-film-hot-indonesia-tahun-1990-129311
        -buku-keperawatan-jiwa-pdf
        -ghost-11-5-iso-free-55l-exclusive
        -windows-loader-v2-2-3-by-daz-setup-free-exclusive
        -for-macos-crack-download-2018-transmitter-controller-state-machine-torrentdownloads-yzf-dfq-wasgarne
        -interna-medicina-vrhovac-pdf-free-11-full-best
        -watch-xxx-1080p-cheigray
        -unholydisasterfullcrack-better-hack
        -updated-astro-vision-lifesign-with-parihara-125-full-version
        -ffhc-rebirth-3-1-full-palmarl
        -download-ebook-boyman-ragam-latih-pramuka-penggalang-install
        -the-angry-birds-movie-english-3-movie-link-download-hd-mp4
        -garmin-mapsource-worldmap-v4-rar-link
        -cm-03-04-top-crack-chomikuj
        -top-vajvito-pava-to-krishna-murari-mp3
        -mp3doctor-pro-2-portable-2021
        -hot-frivolous-dress-order-the-chapters
        -link-crack-irender-nxt-for-sketchup-8-free-rar
        -patched-no-direction-home-bob-dylan-dvdrip-torrentl
        -naan-2012-lotus-dvd-rip-1cd-tamil-movie-download-top-avi
        -campbell-biology-9th-edition-pdf-bahasa-indonesia-best
        -coleccion-revista-saber-electronica-pdf-downloadl-exclusive
        -tmpgenc-authoring-works-51155-upd

        -

        fb6c851797 -viking-saga-3-epic-adventure-final-2014-pc-foxy-games-vip-hack
        -crack-php-pro-bid-v6-rar-deljai
        -ncss-pass-8-0-13-torrent-__hot__
        -the-amorous-sisters-english-subtitles
        -pinegrow-web-designer-2-91-crack-levtany
        -install-chvrches-the-bones-of-what-you-b
        -coh-tales-of-valor-2-500-crack-elgyfrank
        -exclusive-desi-kattey-part-3-full-movie-download-in-hindi
        -fixed-forza-motorsport-4-pc-download-completo
        -valya-36-18m37s-pthc-valya
        -fundamentals-of-renewable-energy-processes-pdf-free-download-nealmel
        -repack-download-film-indo-jadul-semil
        -link-snapgene-4-2-2-crack-with-activation-code-for-mac-win
        -of-mice-and-men-pdf-full-book-free-download-briaolw
        -tamil-actress-banupriya-nude-boobs-pictures-janelpar
        -portable-la-belle-captive-1983-dvdrip
        -baankey-ki-crazy-baraat-eng-sub-full-720p-hd-movie-best
        -crack-stellar-phoenix-mailbox-exchange-recovery-5-0-0-0-best
        -sam-naprawiam-renault-clio-ii-1-5-dci-pdf-economia-with-tres-identificando-so-_verified_
        -software-project-management-bob-hughes-mike-cotterell-4th-edition-tata-mcgraw-hill-2006-spalt-justin
        -gadmei-usb-tv-stick-utv382e-driver-download-free
        -tutorial-portable-with-nsis-kiahllaw
        -ergosoft-poster-print-v1007-dongle-crack-link-ed
        -icon-cube-4-nano-driver-zip-work
        -x-force-formit-2014-keygen-downloader-__full__
        -datacash230download-_best_-sins-2005-dvdrip-xvid-lkrg-torrent-28
        -top-prosicar-bar-restaurante-51-20
        -waitrose-font-better
        -full-the-avengers-age-of-ultron-full-movie-download-utorrent
        -raqt-ek-rishta-full-movie-in-hindi-free-download-3gp-movies-hierogear

        aaccfb2cb3
        -
        -
        \ No newline at end of file diff --git a/spaces/bla/tranny/App/UserTranscriptions/Model.py b/spaces/bla/tranny/App/UserTranscriptions/Model.py deleted file mode 100644 index 9b9a6a557f6096c56e430db106571f90301ce68c..0000000000000000000000000000000000000000 --- a/spaces/bla/tranny/App/UserTranscriptions/Model.py +++ /dev/null @@ -1,20 +0,0 @@ -import orm -import datetime -from App.modelInit import database, models -from App.Users.Model import User - - -class UserTranscriptions(orm.Model): - tablename = "userTranscriptions" - registry = models - fields = { - "id": orm.Integer(primary_key=True), - "fileName": orm.String(max_length=100, index=True), - "youtubeLink": orm.String(max_length=100, index=True, allow_null=True), - "taskId": orm.String(max_length=100, index=True), - "telegramId": orm.String(max_length=100, index=True, allow_null=True), - "user": orm.ForeignKey(User), - "createdAt": orm.DateTime(index=True, default=datetime.datetime.now), - "updatedAt": orm.DateTime(index=True, default=datetime.datetime.now), - "lastLogin": orm.DateTime(index=True, default=datetime.datetime.now), - } diff --git a/spaces/bmhk/xiaobai/README.md b/spaces/bmhk/xiaobai/README.md deleted file mode 100644 index f7a3487d841b3456e43d7b5b25c05513ec714685..0000000000000000000000000000000000000000 --- a/spaces/bmhk/xiaobai/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Xiaobai -emoji: 🌍 -colorFrom: green -colorTo: purple -sdk: docker -pinned: false -license: mit -app_port: 8080 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/GETTING_STARTED.md b/spaces/brjathu/HMR2.0/vendor/detectron2/GETTING_STARTED.md deleted file mode 100644 index 404b0c8f467264d1adf61e8274e5f864e24018e8..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/GETTING_STARTED.md +++ /dev/null @@ -1,79 +0,0 @@ -## Getting Started with Detectron2 - -This document provides a brief intro of the usage of builtin command-line tools in detectron2. - -For a tutorial that involves actual coding with the API, -see our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) -which covers how to run inference with an -existing model, and how to train a builtin model on a custom dataset. - - -### Inference Demo with Pre-trained Models - -1. Pick a model and its config file from - [model zoo](MODEL_ZOO.md), - for example, `mask_rcnn_R_50_FPN_3x.yaml`. -2. We provide `demo.py` that is able to demo builtin configs. Run it with: -``` -cd demo/ -python demo.py --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \ - --input input1.jpg input2.jpg \ - [--other-options] - --opts MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl -``` -The configs are made for training, therefore we need to specify `MODEL.WEIGHTS` to a model from model zoo for evaluation. -This command will run the inference and show visualizations in an OpenCV window. - -For details of the command line arguments, see `demo.py -h` or look at its source code -to understand its behavior. Some common arguments are: -* To run __on your webcam__, replace `--input files` with `--webcam`. -* To run __on a video__, replace `--input files` with `--video-input video.mp4`. -* To run __on cpu__, add `MODEL.DEVICE cpu` after `--opts`. -* To save outputs to a directory (for images) or a file (for webcam or video), use `--output`. - - -### Training & Evaluation in Command Line - -We provide two scripts in "tools/plain_train_net.py" and "tools/train_net.py", -that are made to train all the configs provided in detectron2. You may want to -use it as a reference to write your own training script. - -Compared to "train_net.py", "plain_train_net.py" supports fewer default -features. It also includes fewer abstraction, therefore is easier to add custom -logic. - -To train a model with "train_net.py", first -setup the corresponding datasets following -[datasets/README.md](./datasets/README.md), -then run: -``` -cd tools/ -./train_net.py --num-gpus 8 \ - --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml -``` - -The configs are made for 8-GPU training. -To train on 1 GPU, you may need to [change some parameters](https://arxiv.org/abs/1706.02677), e.g.: -``` -./train_net.py \ - --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \ - --num-gpus 1 SOLVER.IMS_PER_BATCH 2 SOLVER.BASE_LR 0.0025 -``` - -To evaluate a model's performance, use -``` -./train_net.py \ - --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \ - --eval-only MODEL.WEIGHTS /path/to/checkpoint_file -``` -For more options, see `./train_net.py -h`. - -### Use Detectron2 APIs in Your Code - -See our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) -to learn how to use detectron2 APIs to: -1. run inference with an existing model -2. train a builtin model on a custom dataset - -See [detectron2/projects](https://github.com/facebookresearch/detectron2/tree/main/projects) -for more ways to build your project on detectron2. diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/engine/__init__.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/engine/__init__.py deleted file mode 100644 index 539b93a7beca07d229a6b6d387f885469242ad86..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/engine/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from .trainer import Trainer diff --git a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/utils/autoanchor.py b/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/utils/autoanchor.py deleted file mode 100644 index 1a4c52141bc68d9cb390a033eda90eddc2f235f7..0000000000000000000000000000000000000000 --- a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/utils/autoanchor.py +++ /dev/null @@ -1,170 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -AutoAnchor utils -""" - -import random - -import numpy as np -import torch -import yaml -from tqdm import tqdm - -from utils.general import LOGGER, colorstr, emojis - -PREFIX = colorstr('AutoAnchor: ') - - -def check_anchor_order(m): - # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary - a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer - da = a[-1] - a[0] # delta a - ds = m.stride[-1] - m.stride[0] # delta s - if da and (da.sign() != ds.sign()): # same order - LOGGER.info(f'{PREFIX}Reversing anchor order') - m.anchors[:] = m.anchors.flip(0) - - -def check_anchors(dataset, model, thr=4.0, imgsz=640): - # Check anchor fit to data, recompute if necessary - m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() - shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) - scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale - wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh - - def metric(k): # compute metric - r = wh[:, None] / k[None] - x = torch.min(r, 1 / r).min(2)[0] # ratio metric - best = x.max(1)[0] # best_x - aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold - bpr = (best > 1 / thr).float().mean() # best possible recall - return bpr, aat - - stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides - anchors = m.anchors.clone() * stride # current anchors - bpr, aat = metric(anchors.cpu().view(-1, 2)) - s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' - if bpr > 0.98: # threshold to recompute - LOGGER.info(emojis(f'{s}Current anchors are a good fit to dataset ✅')) - else: - LOGGER.info(emojis(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...')) - na = m.anchors.numel() // 2 # number of anchors - try: - anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) - except Exception as e: - LOGGER.info(f'{PREFIX}ERROR: {e}') - new_bpr = metric(anchors)[0] - if new_bpr > bpr: # replace anchors - anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) - m.anchors[:] = anchors.clone().view_as(m.anchors) - check_anchor_order(m) # must be in pixel-space (not grid-space) - m.anchors /= stride - s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)' - else: - s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)' - LOGGER.info(emojis(s)) - - -def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): - """ Creates kmeans-evolved anchors from training dataset - - Arguments: - dataset: path to data.yaml, or a loaded dataset - n: number of anchors - img_size: image size used for training - thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 - gen: generations to evolve anchors using genetic algorithm - verbose: print all results - - Return: - k: kmeans evolved anchors - - Usage: - from utils.autoanchor import *; _ = kmean_anchors() - """ - from scipy.cluster.vq import kmeans - - npr = np.random - thr = 1 / thr - - def metric(k, wh): # compute metrics - r = wh[:, None] / k[None] - x = torch.min(r, 1 / r).min(2)[0] # ratio metric - # x = wh_iou(wh, torch.tensor(k)) # iou metric - return x, x.max(1)[0] # x, best_x - - def anchor_fitness(k): # mutation fitness - _, best = metric(torch.tensor(k, dtype=torch.float32), wh) - return (best * (best > thr).float()).mean() # fitness - - def print_results(k, verbose=True): - k = k[np.argsort(k.prod(1))] # sort small to large - x, best = metric(k, wh0) - bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr - s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ - f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ - f'past_thr={x[x > thr].mean():.3f}-mean: ' - for x in k: - s += '%i,%i, ' % (round(x[0]), round(x[1])) - if verbose: - LOGGER.info(s[:-2]) - return k - - if isinstance(dataset, str): # *.yaml file - with open(dataset, errors='ignore') as f: - data_dict = yaml.safe_load(f) # model dict - from utils.dataloaders import LoadImagesAndLabels - dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) - - # Get label wh - shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) - wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh - - # Filter - i = (wh0 < 3.0).any(1).sum() - if i: - LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size') - wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels - # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 - - # Kmeans init - try: - LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') - assert n <= len(wh) # apply overdetermined constraint - s = wh.std(0) # sigmas for whitening - k = kmeans(wh / s, n, iter=30)[0] * s # points - assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar - except Exception: - LOGGER.warning(f'{PREFIX}WARNING: switching strategies from kmeans to random init') - k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init - wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) - k = print_results(k, verbose=False) - - # Plot - # k, d = [None] * 20, [None] * 20 - # for i in tqdm(range(1, 21)): - # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance - # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) - # ax = ax.ravel() - # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') - # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh - # ax[0].hist(wh[wh[:, 0]<100, 0],400) - # ax[1].hist(wh[wh[:, 1]<100, 1],400) - # fig.savefig('wh.png', dpi=200) - - # Evolve - f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma - pbar = tqdm(range(gen), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar - for _ in pbar: - v = np.ones(sh) - while (v == 1).all(): # mutate until a change occurs (prevent duplicates) - v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) - kg = (k.copy() * v).clip(min=2.0) - fg = anchor_fitness(kg) - if fg > f: - f, k = fg, kg.copy() - pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' - if verbose: - print_results(k, verbose) - - return print_results(k) diff --git a/spaces/camenduru-com/audioldm-text-to-audio-generation/audioldm/clap/training/distributed.py b/spaces/camenduru-com/audioldm-text-to-audio-generation/audioldm/clap/training/distributed.py deleted file mode 100644 index 2fa61f76c5cc3ab9f6a9643042afa8e1f2e1cb7f..0000000000000000000000000000000000000000 --- a/spaces/camenduru-com/audioldm-text-to-audio-generation/audioldm/clap/training/distributed.py +++ /dev/null @@ -1,150 +0,0 @@ -import os - -import torch -import socket - -try: - import horovod.torch as hvd -except ImportError: - hvd = None - - -def is_global_master(args): - return args.rank == 0 - - -def is_local_master(args): - return args.local_rank == 0 - - -def is_master(args, local=False): - return is_local_master(args) if local else is_global_master(args) - - -def is_using_horovod(): - # NOTE w/ horovod run, OMPI vars should be set, but w/ SLURM PMI vars will be set - # Differentiating between horovod and DDP use via SLURM may not be possible, so horovod arg still required... - ompi_vars = ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"] - pmi_vars = ["PMI_RANK", "PMI_SIZE"] - if all([var in os.environ for var in ompi_vars]) or all( - [var in os.environ for var in pmi_vars] - ): - return True - else: - return False - - -def is_using_distributed(): - if "WORLD_SIZE" in os.environ: - return int(os.environ["WORLD_SIZE"]) > 1 - if "SLURM_NTASKS" in os.environ: - return int(os.environ["SLURM_NTASKS"]) > 1 - return False - - -def world_info_from_env(): - local_rank = 0 - for v in ( - "SLURM_LOCALID", - "MPI_LOCALRANKID", - "OMPI_COMM_WORLD_LOCAL_RANK", - "LOCAL_RANK", - ): - if v in os.environ: - local_rank = int(os.environ[v]) - break - global_rank = 0 - for v in ("SLURM_PROCID", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "RANK"): - if v in os.environ: - global_rank = int(os.environ[v]) - break - world_size = 1 - for v in ("SLURM_NTASKS", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "WORLD_SIZE"): - if v in os.environ: - world_size = int(os.environ[v]) - break - - return local_rank, global_rank, world_size - - -def init_distributed_device(args): - # Distributed training = training on more than one GPU. - # Works in both single and multi-node scenarios. - args.distributed = False - args.world_size = 1 - args.rank = 0 # global rank - args.local_rank = 0 - if args.horovod: - assert hvd is not None, "Horovod is not installed" - hvd.init() - world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"]) - world_rank = int(os.environ["OMPI_COMM_WORLD_RANK"]) - local_rank = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]) - args.local_rank = local_rank - args.rank = world_rank - args.world_size = world_size - # args.local_rank = int(hvd.local_rank()) - # args.rank = hvd.rank() - # args.world_size = hvd.size() - args.distributed = True - os.environ["LOCAL_RANK"] = str(args.local_rank) - os.environ["RANK"] = str(args.rank) - os.environ["WORLD_SIZE"] = str(args.world_size) - print( - f"Distributed training: local_rank={args.local_rank}, " - f"rank={args.rank}, world_size={args.world_size}, " - f"hostname={socket.gethostname()}, pid={os.getpid()}" - ) - elif is_using_distributed(): - if "SLURM_PROCID" in os.environ: - # DDP via SLURM - args.local_rank, args.rank, args.world_size = world_info_from_env() - # SLURM var -> torch.distributed vars in case needed - os.environ["LOCAL_RANK"] = str(args.local_rank) - os.environ["RANK"] = str(args.rank) - os.environ["WORLD_SIZE"] = str(args.world_size) - torch.distributed.init_process_group( - backend=args.dist_backend, - init_method=args.dist_url, - world_size=args.world_size, - rank=args.rank, - ) - elif "OMPI_COMM_WORLD_SIZE" in os.environ: # using Summit cluster - world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"]) - world_rank = int(os.environ["OMPI_COMM_WORLD_RANK"]) - local_rank = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]) - args.local_rank = local_rank - args.rank = world_rank - args.world_size = world_size - torch.distributed.init_process_group( - backend=args.dist_backend, - init_method=args.dist_url, - world_size=args.world_size, - rank=args.rank, - ) - else: - # DDP via torchrun, torch.distributed.launch - args.local_rank, _, _ = world_info_from_env() - torch.distributed.init_process_group( - backend=args.dist_backend, init_method=args.dist_url - ) - args.world_size = torch.distributed.get_world_size() - args.rank = torch.distributed.get_rank() - args.distributed = True - print( - f"Distributed training: local_rank={args.local_rank}, " - f"rank={args.rank}, world_size={args.world_size}, " - f"hostname={socket.gethostname()}, pid={os.getpid()}" - ) - - if torch.cuda.is_available(): - if args.distributed and not args.no_set_device_rank: - device = "cuda:%d" % args.local_rank - else: - device = "cuda:0" - torch.cuda.set_device(device) - else: - device = "cpu" - args.device = device - device = torch.device(device) - return device diff --git a/spaces/candlend/vits-hoshimi/vits/vits_inferencer.py b/spaces/candlend/vits-hoshimi/vits/vits_inferencer.py deleted file mode 100644 index 2d24aa81720acb6af148a8defb0c3338419f70bf..0000000000000000000000000000000000000000 --- a/spaces/candlend/vits-hoshimi/vits/vits_inferencer.py +++ /dev/null @@ -1,108 +0,0 @@ -import os -import json -import math -import torch -from torch import nn -from torch.nn import functional as F -from torch.utils.data import DataLoader -from vits import VITS_ROOT_PATH - -from vits import commons -from vits import utils -from vits.models import SynthesizerTrn -from vits.text.symbols import symbols -from vits.text import text_to_sequence -import gradio as gr - -mode_dict = { - "普通声线": "normal", - "营业声线": "formal" -} - -default_mode = "普通声线" -default_noise_scale = 0.667 -default_noise_scale_w = 0.8 -default_length_scale = 1 - -replace_list = [ - ("candle", "刊豆"), - ("end", "按的"), - ("hoshimi", "吼西咪"), - ("mua", "木啊"), - ("hsm", "吼西咪"), - ("ho", "齁"), - ("na", "呐"), - ("shi", "西"), - ("mi", "咪"), -] - -def get_text(text, hps): - text = preprocess_text(text) - text_norm = text_to_sequence(text, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - -def preprocess_text(text): - text = text.lower() - for src, dst in replace_list: - text = text.replace(src, dst) - return text - -class VitsInferencer: - def __init__(self, hps_path, device="cpu"): - print("init") - self.device = torch.device(device) - self.hps = utils.get_hparams_from_file(hps_path) - self.model_paths = {} - self.models = {} - for mode in mode_dict: - self.model_paths[mode] = self.get_latest_model_path_by_mode(mode) - self.load_models() - - def get_latest_model_path_by_mode(self, mode): - model_dir_path = os.path.join(VITS_ROOT_PATH, "models", mode_dict[mode]) - return utils.latest_checkpoint_path(model_dir_path, "G_*.pth") - - def infer(self, text, mode, noise_scale=.667, noise_scale_w=0.8, length_scale=1): - stn_tst = get_text(text, self.hps) - with torch.no_grad(): - x_tst = stn_tst.unsqueeze(0).to(self.device) - x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).to(self.device) - audio = self.models[mode].infer(x_tst, x_tst_lengths, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.float().cpu().numpy() - return (self.hps.data.sampling_rate, audio) - - def change_mode(self, mode): - self.select_mode(mode) - return gr.update(choices=self.models, value=os.path.basename(self.latest_model_path)) - - def render(self): - choice_mode = gr.Radio(choices=["普通声线", "营业声线"], label="声线选择", value=default_mode) - noise_scale = gr.Slider(minimum=0, maximum=3, value=default_noise_scale, step=0.001, label="noise_scale(效果不可控,谨慎修改)") - noise_scale_w = gr.Slider(minimum=0, maximum=3, value=default_noise_scale_w, step=0.001, label="noise_scale_w(效果不可控,谨慎修改)") - length_scale = gr.Slider(minimum=0, maximum=3, value=default_length_scale, step=0.001, label="length_scale(数值越大输出音频越长)") - - tts_input = gr.TextArea( - label="请输入文本(目前只支持汉字、单个英文字母和极个别专有名词,可以使用常用符号和空格来改变语调和停顿,请勿一次性输入过长文本)", - value="这里是爱喝奶茶,穿得也像奶茶魅力点是普通话二乙的星弥Hoshimi,晚上Ho") - tts_submit = gr.Button("合成", variant="primary") - tts_output = gr.Audio(label="Output") - gr.HTML(''' -
        -
        版权声明
        -
        本项目数据集和模型版权属于星弥Hoshimi
        -
        仅供学习交流,不可用于任何商业和非法用途,否则后果自负
        -
        - ''') - tts_submit.click(self.infer, [tts_input, choice_mode, noise_scale, noise_scale_w, length_scale], [tts_output], api_name=f"infer") - - def load_models(self): - for key, model_path in self.model_paths.items(): - self.models[key] = SynthesizerTrn( - len(symbols), - self.hps.data.filter_length // 2 + 1, - self.hps.train.segment_size // self.hps.data.hop_length, - **self.hps.model).to(self.device) - _ = self.models[key].eval() - _ = utils.load_checkpoint(model_path, self.models[key], None) \ No newline at end of file diff --git a/spaces/canturan10/satellighte/app.py b/spaces/canturan10/satellighte/app.py deleted file mode 100644 index 068a6490edc88c55bf785137c842c378c07b2b4f..0000000000000000000000000000000000000000 --- a/spaces/canturan10/satellighte/app.py +++ /dev/null @@ -1,100 +0,0 @@ -import random -from datetime import datetime - -import numpy as np -import requests -import satellighte as sat -import streamlit as st -from PIL import Image - - -def main(): - # pylint: disable=no-member - - st.set_page_config( - page_title="Satellighte Demo Page", - page_icon="📡", - layout="centered", - initial_sidebar_state="expanded", - menu_items={ - "Get Help": "https://canturan10.github.io/satellighte/", - "About": "Satellite Image Classification", - }, - ) - - st.title("Satellighte Demo Page") - - url = "https://raw.githubusercontent.com/canturan10/satellighte/master/src/satellighte.png?raw=true" - satellighte = Image.open(requests.get(url, stream=True).raw) - - st.sidebar.image(satellighte, width=100) - st.sidebar.title("Satellighte") - st.sidebar.caption(sat.__description__) - - st.sidebar.write( - "**Satellighte** is an image classification library that consist state-of-the-art deep learning methods. It is a combination of the words **'Satellite'** and **'Light'**, and its purpose is to establish a light structure to classify satellite images, but to obtain robust results." - ) - - st.sidebar.caption(f"Version: `{sat.__version__}`") - st.sidebar.caption(f"License: `{sat.__license__}`") - st.sidebar.caption("") - st.sidebar.caption(f"[Website](https://canturan10.github.io/satellighte/)") - st.sidebar.caption(f"[Docs](https://satellighte.readthedocs.io/)") - st.sidebar.caption(f"[Github](https://github.com/canturan10/satellighte)") - st.sidebar.caption(f"[Demo Page](https://canturan10-satellighte-streamlit-app-6lr5ve.streamlitapp.com/)") - #st.sidebar.caption(f"[Hugging Face](https://huggingface.co/spaces/canturan10/satellighte)") - st.sidebar.caption(f"[Pypi](https://pypi.org/project/satellighte/)") - st.sidebar.caption("") - st.sidebar.caption(sat.__copyright__) - - selected_model = st.selectbox( - "Select model", - sat.available_models(), - ) - selected_version = st.selectbox( - "Select version", - sat.get_model_versions(selected_model), - ) - - model = sat.Classifier.from_pretrained(selected_model, selected_version) - model.eval() - - uploaded_file = st.file_uploader( - "", type=["png", "jpg", "jpeg"], accept_multiple_files=False - ) - - if uploaded_file is None: - st.write("Sample Image") - # Sample image. - url = f"https://raw.githubusercontent.com/canturan10/satellighte/master/src/eurosat_samples/{random_sample}?raw=true" - image = Image.open(requests.get(url, stream=True).raw) - - else: - # User-selected image. - image = Image.open(uploaded_file) - - image = np.array(image.convert("RGB")) - FRAME_WINDOW = st.image([], use_column_width=True) - - model = sat.Classifier.from_pretrained(selected_model, selected_version) - model.eval() - - results = model.predict(image) - pil_img = sat.utils.visualize(image, results) - - st.write("Results:", results) - FRAME_WINDOW.image(pil_img) - - -if __name__ == "__main__": - samples = [ - "AnnualCrop.jpg", - "Forest.jpg", - "HerbaceousVegetation.jpg", - "PermanentCrop.jpg", - "River.jpg", - ] - random.seed(datetime.now()) - random_sample = samples[random.randint(0, len(samples) - 1)] - - main() diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/tests/test_frame_selector.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/tests/test_frame_selector.py deleted file mode 100644 index 65f05f55c78d4ab24950e5335818b3e1f981aa0d..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/tests/test_frame_selector.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -import random -import unittest - -from densepose.data.video import FirstKFramesSelector, LastKFramesSelector, RandomKFramesSelector - - -class TestFrameSelector(unittest.TestCase): - def test_frame_selector_random_k_1(self): - _SEED = 43 - _K = 4 - random.seed(_SEED) - selector = RandomKFramesSelector(_K) - frame_tss = list(range(0, 20, 2)) - _SELECTED_GT = [0, 8, 4, 6] - selected = selector(frame_tss) - self.assertEqual(_SELECTED_GT, selected) - - def test_frame_selector_random_k_2(self): - _SEED = 43 - _K = 10 - random.seed(_SEED) - selector = RandomKFramesSelector(_K) - frame_tss = list(range(0, 6, 2)) - _SELECTED_GT = [0, 2, 4] - selected = selector(frame_tss) - self.assertEqual(_SELECTED_GT, selected) - - def test_frame_selector_first_k_1(self): - _K = 4 - selector = FirstKFramesSelector(_K) - frame_tss = list(range(0, 20, 2)) - _SELECTED_GT = frame_tss[:_K] - selected = selector(frame_tss) - self.assertEqual(_SELECTED_GT, selected) - - def test_frame_selector_first_k_2(self): - _K = 10 - selector = FirstKFramesSelector(_K) - frame_tss = list(range(0, 6, 2)) - _SELECTED_GT = frame_tss[:_K] - selected = selector(frame_tss) - self.assertEqual(_SELECTED_GT, selected) - - def test_frame_selector_last_k_1(self): - _K = 4 - selector = LastKFramesSelector(_K) - frame_tss = list(range(0, 20, 2)) - _SELECTED_GT = frame_tss[-_K:] - selected = selector(frame_tss) - self.assertEqual(_SELECTED_GT, selected) - - def test_frame_selector_last_k_2(self): - _K = 10 - selector = LastKFramesSelector(_K) - frame_tss = list(range(0, 6, 2)) - _SELECTED_GT = frame_tss[-_K:] - selected = selector(frame_tss) - self.assertEqual(_SELECTED_GT, selected) diff --git a/spaces/ccds/vits_onnx/export/README.md b/spaces/ccds/vits_onnx/export/README.md deleted file mode 100644 index eacede8ee037ddd090a58086173e62d169fd5e7d..0000000000000000000000000000000000000000 --- a/spaces/ccds/vits_onnx/export/README.md +++ /dev/null @@ -1,8 +0,0 @@ -> Thanks a lot to [wetts](https://github.com/wenet-e2e/wetts) -> 欢迎 pr -## 修改说明 -1. 将原仓库的配置文件修改成[@CjangCjengh](https://github.com/CjangCjengh)用的部署文件 详细参考config.json -2. 为导出代码添加注释, tensor修改为np.array -3. 有问题请认真阅读源码 - - diff --git a/spaces/celise88/Pathfinder/templates/login.html b/spaces/celise88/Pathfinder/templates/login.html deleted file mode 100644 index 98b63fcf5d9712c36bd0cf4f1954dfb06611573a..0000000000000000000000000000000000000000 --- a/spaces/celise88/Pathfinder/templates/login.html +++ /dev/null @@ -1,58 +0,0 @@ - - - - - - - Dashboard - - - - -
        -

        User Login

        -

        Welcome to Pathfinder!

        - {% if message %} -

        {{ message }}

        - {% else %} -

        Enter your username and password below to get started

        - {% endif %} -
        - -
        -
        -
        -
        -
        -
        -
        - -
        - - - diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/wav2vec2/finetune_large_xlsr_53_arabic_speech_corpus.sh b/spaces/chendl/compositional_test/transformers/examples/research_projects/wav2vec2/finetune_large_xlsr_53_arabic_speech_corpus.sh deleted file mode 100644 index 9b325c42771e64d510830788516da731b5be3009..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/wav2vec2/finetune_large_xlsr_53_arabic_speech_corpus.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash -python run_asr.py \ ---output_dir="./wav2vec2-large-xlsr-53-arabic-speech-corpus" \ ---num_train_epochs="50" \ ---per_device_train_batch_size="1" \ ---per_device_eval_batch_size="1" \ ---gradient_accumulation_steps="8" \ ---evaluation_strategy="steps" \ ---save_steps="500" \ ---eval_steps="100" \ ---logging_steps="50" \ ---learning_rate="5e-4" \ ---warmup_steps="3000" \ ---model_name_or_path="elgeish/wav2vec2-large-xlsr-53-arabic" \ ---fp16 \ ---dataset_name="arabic_speech_corpus" \ ---train_split_name="train" \ ---validation_split_name="test" \ ---max_duration_in_seconds="15" \ ---orthography="buckwalter" \ ---preprocessing_num_workers="$(nproc)" \ ---group_by_length \ ---freeze_feature_extractor \ ---target_feature_extractor_sampling_rate \ ---verbose_logging \ diff --git a/spaces/christhegamechanger/background_swapping/metrics.py b/spaces/christhegamechanger/background_swapping/metrics.py deleted file mode 100644 index b3e5d3d3557a15661934e1c109c9baa2d7e57503..0000000000000000000000000000000000000000 --- a/spaces/christhegamechanger/background_swapping/metrics.py +++ /dev/null @@ -1,25 +0,0 @@ -from libs import * - -smooth = 1e-15 - - -def iou(y_true, y_pred): - def f(y_true, y_pred): - intersection = (y_true * y_pred).sum() - union = y_true.sum() + y_pred.sum() - intersection - x = (intersection + 1e-15) / (union + 1e-15) - x = x.astype(np.float32) - return x - return tf.numpy_function(f, [y_true, y_pred], tf.float32) - - - -def dice_coef(y_true, y_pred): - y_true = tf.keras.layers.Flatten()(y_true) - y_pred = tf.keras.layers.Flatten()(y_pred) - intersection = tf.reduce_sum(y_true * y_pred) - return (2. * intersection + smooth) / (tf.reduce_sum(y_true) + tf.reduce_sum(y_pred) + smooth) - - -def dice_loss(y_true, y_pred): - return 1.0 - dice_coef(y_true, y_pred) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/_binary.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/_binary.py deleted file mode 100644 index a74ee9eb6f341aca9e074c0acc4b306a354175a0..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/_binary.py +++ /dev/null @@ -1,102 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# Binary input/output support routines. -# -# Copyright (c) 1997-2003 by Secret Labs AB -# Copyright (c) 1995-2003 by Fredrik Lundh -# Copyright (c) 2012 by Brian Crowell -# -# See the README file for information on usage and redistribution. -# - - -"""Binary input/output support routines.""" - - -from struct import pack, unpack_from - - -def i8(c): - return c if c.__class__ is int else c[0] - - -def o8(i): - return bytes((i & 255,)) - - -# Input, le = little endian, be = big endian -def i16le(c, o=0): - """ - Converts a 2-bytes (16 bits) string to an unsigned integer. - - :param c: string containing bytes to convert - :param o: offset of bytes to convert in string - """ - return unpack_from("h", c, o)[0] - - -def i32le(c, o=0): - """ - Converts a 4-bytes (32 bits) string to an unsigned integer. - - :param c: string containing bytes to convert - :param o: offset of bytes to convert in string - """ - return unpack_from("H", c, o)[0] - - -def i32be(c, o=0): - return unpack_from(">I", c, o)[0] - - -# Output, le = little endian, be = big endian -def o16le(i): - return pack("H", i) - - -def o32be(i): - return pack(">I", i) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/__init__.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/__init__.py deleted file mode 100644 index 59756c021e97d2384d3d26c7b72c68c073afd4bc..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/__init__.py +++ /dev/null @@ -1,48 +0,0 @@ -# encoding: utf-8 - -from docx.api import Document # noqa - -__version__ = "0.8.11" - - -# register custom Part classes with opc package reader - -from docx.opc.constants import CONTENT_TYPE as CT, RELATIONSHIP_TYPE as RT -from docx.opc.part import PartFactory -from docx.opc.parts.coreprops import CorePropertiesPart - -from docx.parts.document import DocumentPart -from docx.parts.hdrftr import FooterPart, HeaderPart -from docx.parts.image import ImagePart -from docx.parts.numbering import NumberingPart -from docx.parts.settings import SettingsPart -from docx.parts.styles import StylesPart - - -def part_class_selector(content_type, reltype): - if reltype == RT.IMAGE: - return ImagePart - return None - - -PartFactory.part_class_selector = part_class_selector -PartFactory.part_type_for[CT.OPC_CORE_PROPERTIES] = CorePropertiesPart -PartFactory.part_type_for[CT.WML_DOCUMENT_MAIN] = DocumentPart -PartFactory.part_type_for[CT.WML_FOOTER] = FooterPart -PartFactory.part_type_for[CT.WML_HEADER] = HeaderPart -PartFactory.part_type_for[CT.WML_NUMBERING] = NumberingPart -PartFactory.part_type_for[CT.WML_SETTINGS] = SettingsPart -PartFactory.part_type_for[CT.WML_STYLES] = StylesPart - -del ( - CT, - CorePropertiesPart, - DocumentPart, - FooterPart, - HeaderPart, - NumberingPart, - PartFactory, - SettingsPart, - StylesPart, - part_class_selector, -) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_f_p_g_m.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_f_p_g_m.py deleted file mode 100644 index df23041d65617af9c1f6feb00db970b7870c2268..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_f_p_g_m.py +++ /dev/null @@ -1,49 +0,0 @@ -from . import DefaultTable -from . import ttProgram - - -class table__f_p_g_m(DefaultTable.DefaultTable): - def decompile(self, data, ttFont): - program = ttProgram.Program() - program.fromBytecode(data) - self.program = program - - def compile(self, ttFont): - return self.program.getBytecode() - - def toXML(self, writer, ttFont): - self.program.toXML(writer, ttFont) - - def fromXML(self, name, attrs, content, ttFont): - program = ttProgram.Program() - program.fromXML(name, attrs, content, ttFont) - self.program = program - - def __bool__(self): - """ - >>> fpgm = table__f_p_g_m() - >>> bool(fpgm) - False - >>> p = ttProgram.Program() - >>> fpgm.program = p - >>> bool(fpgm) - False - >>> bc = bytearray([0]) - >>> p.fromBytecode(bc) - >>> bool(fpgm) - True - >>> p.bytecode.pop() - 0 - >>> bool(fpgm) - False - """ - return hasattr(self, "program") and bool(self.program) - - __nonzero__ = __bool__ - - -if __name__ == "__main__": - import sys - import doctest - - sys.exit(doctest.testmod().failed) diff --git a/spaces/cihyFjudo/fairness-paper-search/3 Bachelors 1080p blu-ray movie download The ultimate guide to finding the best links.md b/spaces/cihyFjudo/fairness-paper-search/3 Bachelors 1080p blu-ray movie download The ultimate guide to finding the best links.md deleted file mode 100644 index 6acf57ebf4a1db2d5dbab17a394f1793cf0c9440..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/3 Bachelors 1080p blu-ray movie download The ultimate guide to finding the best links.md +++ /dev/null @@ -1,6 +0,0 @@ -

        3 Bachelors 1080p blu-ray movie download


        DOWNLOADhttps://tinurli.com/2uwkNh



        - - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/cihyFjudo/fairness-paper-search/Claudia Cepeda Story Of O The Series No 5 Otkrovenie Spain 199217.md b/spaces/cihyFjudo/fairness-paper-search/Claudia Cepeda Story Of O The Series No 5 Otkrovenie Spain 199217.md deleted file mode 100644 index 4a4748603ac148e2c8382dfacabcf454a5b268e0..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Claudia Cepeda Story Of O The Series No 5 Otkrovenie Spain 199217.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Claudia Cepeda Story Of O The Series No 5 Otkrovenie Spain 199217


        Download ✯✯✯ https://tinurli.com/2uwisw



        -
        - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/cihyFjudo/fairness-paper-search/Memin Pinguin 51 100 Pdf Autorennspiele Gitar.md b/spaces/cihyFjudo/fairness-paper-search/Memin Pinguin 51 100 Pdf Autorennspiele Gitar.md deleted file mode 100644 index d0090f296ed3f5e174ca43637420de5a4b3ee52c..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Memin Pinguin 51 100 Pdf Autorennspiele Gitar.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Memin Pinguin 51 100 Pdf autorennspiele gitar


        Download Filehttps://tinurli.com/2uwisU



        - - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/cihyFjudo/fairness-paper-search/Sotto Voce Movie In Italian Free !EXCLUSIVE! Download.md b/spaces/cihyFjudo/fairness-paper-search/Sotto Voce Movie In Italian Free !EXCLUSIVE! Download.md deleted file mode 100644 index 7367ae5d95f154d95db1cddf5c6d7172c235bc50..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Sotto Voce Movie In Italian Free !EXCLUSIVE! Download.md +++ /dev/null @@ -1,5 +0,0 @@ -
        -

        fff - fortississimo - extremely loud
        ff - fortissimo - very loud
        f - forte - loud
        mf - mezzoforte - medium loud
        mp - mezzopiano - medium quiet
        p - piano - quiet
        pp - pianissimo - very quiet
        ppp - pianississimo - extremely quiet
        sotto voce - whispered
        subito - suddenly (eg subito forte)
        fp - fortepiano - loud then immediately soft
        sfz - sforzando - sudden accent
        sf - sforzato - sudden accent
        fz - forzando, forzato - sudden accent
        sfzp, sfp, fzp - sudden accent followed immediately by piano
        rfz, rf - rinforzando - several notes are to be emphasised
        cresc. - crescendo - getting gradually louder
        dim. - diminuendo - descrescendo - getting gradually softer
        molto - much (e.g. cresc molto - get gradually much louder)
        poco - little (dim. poco a poco - get quieter little by little)
        al niente - to nothing
        dal niente - from nothing
        morendo - dying away (also to do with tempo)
        smorzando - becoming muffled

        -

        Sotto Voce movie in italian free download


        Download Filehttps://tinurli.com/2uwjbR



        aaccfb2cb3
        -
        -
        \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Turbo C Free Download For Windows 7 32 Bit 161 UPDATED.md b/spaces/cihyFjudo/fairness-paper-search/Turbo C Free Download For Windows 7 32 Bit 161 UPDATED.md deleted file mode 100644 index baf0dc7e3c562ad5763acb365b5a39eca471de01..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Turbo C Free Download For Windows 7 32 Bit 161 UPDATED.md +++ /dev/null @@ -1,6 +0,0 @@ -
        -

        Freeware programs can be downloaded used free of charge and without any time limitations. Freeware products can be used free of charge for both personal and professional (commercial use).

        -

        This license is commonly used for video games and it allows users to download and play the game for free. Basically, a product is offered Free to Play (Freemium) and the user can decide if he wants to pay the money (Premium) for additional features, services, virtual or physical goods that expand the functionality of the game. In some cases, ads may be show to the users.

        -

        turbo c free download for windows 7 32 bit 161


        Download Zip > https://tinurli.com/2uwk0s



        aaccfb2cb3
        -
        -
        \ No newline at end of file diff --git a/spaces/cjayic/sovits-overwatch2/app.py b/spaces/cjayic/sovits-overwatch2/app.py deleted file mode 100644 index 828438c43ead19d735e0301552bde83028713dc5..0000000000000000000000000000000000000000 --- a/spaces/cjayic/sovits-overwatch2/app.py +++ /dev/null @@ -1,92 +0,0 @@ -import os -import json -import math -import torch -import torchaudio -from torch import nn -from torch.nn import functional as F -from torch.utils.data import DataLoader - -import commons -import utils -from data_utils import UnitAudioLoader, UnitAudioCollate -from models import SynthesizerTrn - -import gradio - -hubert = torch.hub.load("bshall/hubert:main", "hubert_soft") - -hps = utils.get_hparams_from_file("configs/sovits_ow2.json") - -net_g = SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model) -_ = net_g.eval() - -_ = utils.load_checkpoint("logs/ow2/G_195000.pth", net_g, None) - - -def infer(md, mic_audio, audio, speaker_id, pitch_shift, length_scale, noise_scale=.667, noise_scale_w=0.8): - - source = None - sr = None - - if mic_audio: - sr, source = mic_audio - source = torch.Tensor(source) - - if source.dim() == 1: - source = source.unsqueeze(1) - - source = source.T - - if audio: - source, sr = torchaudio.load(audio) - - source = torchaudio.functional.pitch_shift(source, sr, int(pitch_shift))#, n_fft=256) - source = torchaudio.functional.resample(source, sr, 16000) - source = torch.mean(source, dim=0).unsqueeze(0) - source = source.unsqueeze(0) - - with torch.inference_mode(): - # Extract speech units - unit = hubert.units(source) - unit_lengths = torch.LongTensor([unit.size(1)]) - - # for multi-speaker inference - sid = torch.LongTensor([speaker_id]) - - # Synthesize audio - audio_out = net_g.infer(unit, unit_lengths, sid, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.float().numpy() - - return (22050, audio_out) - -demo = gradio.Interface( - fn=infer, - inputs=[ - gradio.Markdown( - """ - # SOVITS Any-to-Many VC | Overwatch 2 - Upload any voice recording and turn it into a mangled approximation of any* Overwatch 2 Hero! - - For a higher quality single-speaker model, check out my [soft-vc-widowmaker](https://huggingface.co/spaces/cjayic/soft-vc-widowmaker) space! - - SOVITS doesn't really appear to adjust the pitch to the target speaker, so it helps to have your input voice at a similar pitch to the target voice. - I added a pitch shift option to preprocess the input voice, but it's slow and sometimes outright broken, use at your own risk. - - ( * up to Kiriko and without Bastion. Please forgive. ) - """), - gradio.Audio(label="Record Input Audio", source="microphone"), - gradio.Audio(label="Upload Input Audio", type="filepath"), - gradio.Dropdown(label="Target Voice", choices=["Ana", "Ashe", "Baptiste", "Brigitte", "Cassidy", "Doomfist", "D.Va", "Echo", "Genji", "Hanzo", "Junker Queen", "Junkrat", "Kiriko", "Lúcio", "Mei", "Mercy", "Moira", "Orisa", "Pharah", "Reaper", "Reinhardt", "Roadhog", "Sigma", "Sojourn", "Soldier_ 76", "Sombra", "Symmetra", "Torbjörn", "Tracer", "Widowmaker", "Winston", "Zarya", "Zenyatta"], type="index", value="Ana"), - gradio.Slider(label="Pitch Shift Input (+12 = up one octave, ⚠️ broken AF ⚠️)", minimum=-12.0, maximum=12.0, value=0, step=1), - gradio.Slider(label="Length Factor (higher = slower speech)", minimum=0.1, maximum=2.0, value=1.0), - gradio.Slider(label="Noise Scale (higher = more expressive and erratic)", minimum=0.0, maximum=2.0, value=.667), - gradio.Slider(label="Noise Scale W (higher = more variation in cadence)", minimum=0.0, maximum=2.0, value=.8) - ], - outputs=[gradio.Audio(label="Audio as Target Voice")], -) -#demo.launch(share=True) -demo.launch(server_name="0.0.0.0") diff --git a/spaces/cleanmaster/so-vits-svc-akagi/attentions.py b/spaces/cleanmaster/so-vits-svc-akagi/attentions.py deleted file mode 100644 index 4e0b0c1fd48c962e21e1fbe60b23fc574927435c..0000000000000000000000000000000000000000 --- a/spaces/cleanmaster/so-vits-svc-akagi/attentions.py +++ /dev/null @@ -1,303 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/dateutil/parser/isoparser.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/dateutil/parser/isoparser.py deleted file mode 100644 index 5d7bee38006d4e510b841d84df0322dee024b77c..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/dateutil/parser/isoparser.py +++ /dev/null @@ -1,416 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers a parser for ISO-8601 strings - -It is intended to support all valid date, time and datetime formats per the -ISO-8601 specification. - -..versionadded:: 2.7.0 -""" -from datetime import datetime, timedelta, time, date -import calendar -from dateutil import tz - -from functools import wraps - -import re -import six - -__all__ = ["isoparse", "isoparser"] - - -def _takes_ascii(f): - @wraps(f) - def func(self, str_in, *args, **kwargs): - # If it's a stream, read the whole thing - str_in = getattr(str_in, 'read', lambda: str_in)() - - # If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII - if isinstance(str_in, six.text_type): - # ASCII is the same in UTF-8 - try: - str_in = str_in.encode('ascii') - except UnicodeEncodeError as e: - msg = 'ISO-8601 strings should contain only ASCII characters' - six.raise_from(ValueError(msg), e) - - return f(self, str_in, *args, **kwargs) - - return func - - -class isoparser(object): - def __init__(self, sep=None): - """ - :param sep: - A single character that separates date and time portions. If - ``None``, the parser will accept any single character. - For strict ISO-8601 adherence, pass ``'T'``. - """ - if sep is not None: - if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'): - raise ValueError('Separator must be a single, non-numeric ' + - 'ASCII character') - - sep = sep.encode('ascii') - - self._sep = sep - - @_takes_ascii - def isoparse(self, dt_str): - """ - Parse an ISO-8601 datetime string into a :class:`datetime.datetime`. - - An ISO-8601 datetime string consists of a date portion, followed - optionally by a time portion - the date and time portions are separated - by a single character separator, which is ``T`` in the official - standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be - combined with a time portion. - - Supported date formats are: - - Common: - - - ``YYYY`` - - ``YYYY-MM`` or ``YYYYMM`` - - ``YYYY-MM-DD`` or ``YYYYMMDD`` - - Uncommon: - - - ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0) - - ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day - - The ISO week and day numbering follows the same logic as - :func:`datetime.date.isocalendar`. - - Supported time formats are: - - - ``hh`` - - ``hh:mm`` or ``hhmm`` - - ``hh:mm:ss`` or ``hhmmss`` - - ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits) - - Midnight is a special case for `hh`, as the standard supports both - 00:00 and 24:00 as a representation. The decimal separator can be - either a dot or a comma. - - - .. caution:: - - Support for fractional components other than seconds is part of the - ISO-8601 standard, but is not currently implemented in this parser. - - Supported time zone offset formats are: - - - `Z` (UTC) - - `±HH:MM` - - `±HHMM` - - `±HH` - - Offsets will be represented as :class:`dateutil.tz.tzoffset` objects, - with the exception of UTC, which will be represented as - :class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such - as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`. - - :param dt_str: - A string or stream containing only an ISO-8601 datetime string - - :return: - Returns a :class:`datetime.datetime` representing the string. - Unspecified components default to their lowest value. - - .. warning:: - - As of version 2.7.0, the strictness of the parser should not be - considered a stable part of the contract. Any valid ISO-8601 string - that parses correctly with the default settings will continue to - parse correctly in future versions, but invalid strings that - currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not - guaranteed to continue failing in future versions if they encode - a valid date. - - .. versionadded:: 2.7.0 - """ - components, pos = self._parse_isodate(dt_str) - - if len(dt_str) > pos: - if self._sep is None or dt_str[pos:pos + 1] == self._sep: - components += self._parse_isotime(dt_str[pos + 1:]) - else: - raise ValueError('String contains unknown ISO components') - - if len(components) > 3 and components[3] == 24: - components[3] = 0 - return datetime(*components) + timedelta(days=1) - - return datetime(*components) - - @_takes_ascii - def parse_isodate(self, datestr): - """ - Parse the date portion of an ISO string. - - :param datestr: - The string portion of an ISO string, without a separator - - :return: - Returns a :class:`datetime.date` object - """ - components, pos = self._parse_isodate(datestr) - if pos < len(datestr): - raise ValueError('String contains unknown ISO ' + - 'components: {!r}'.format(datestr.decode('ascii'))) - return date(*components) - - @_takes_ascii - def parse_isotime(self, timestr): - """ - Parse the time portion of an ISO string. - - :param timestr: - The time portion of an ISO string, without a separator - - :return: - Returns a :class:`datetime.time` object - """ - components = self._parse_isotime(timestr) - if components[0] == 24: - components[0] = 0 - return time(*components) - - @_takes_ascii - def parse_tzstr(self, tzstr, zero_as_utc=True): - """ - Parse a valid ISO time zone string. - - See :func:`isoparser.isoparse` for details on supported formats. - - :param tzstr: - A string representing an ISO time zone offset - - :param zero_as_utc: - Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones - - :return: - Returns :class:`dateutil.tz.tzoffset` for offsets and - :class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is - specified) offsets equivalent to UTC. - """ - return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc) - - # Constants - _DATE_SEP = b'-' - _TIME_SEP = b':' - _FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)') - - def _parse_isodate(self, dt_str): - try: - return self._parse_isodate_common(dt_str) - except ValueError: - return self._parse_isodate_uncommon(dt_str) - - def _parse_isodate_common(self, dt_str): - len_str = len(dt_str) - components = [1, 1, 1] - - if len_str < 4: - raise ValueError('ISO string too short') - - # Year - components[0] = int(dt_str[0:4]) - pos = 4 - if pos >= len_str: - return components, pos - - has_sep = dt_str[pos:pos + 1] == self._DATE_SEP - if has_sep: - pos += 1 - - # Month - if len_str - pos < 2: - raise ValueError('Invalid common month') - - components[1] = int(dt_str[pos:pos + 2]) - pos += 2 - - if pos >= len_str: - if has_sep: - return components, pos - else: - raise ValueError('Invalid ISO format') - - if has_sep: - if dt_str[pos:pos + 1] != self._DATE_SEP: - raise ValueError('Invalid separator in ISO string') - pos += 1 - - # Day - if len_str - pos < 2: - raise ValueError('Invalid common day') - components[2] = int(dt_str[pos:pos + 2]) - return components, pos + 2 - - def _parse_isodate_uncommon(self, dt_str): - if len(dt_str) < 4: - raise ValueError('ISO string too short') - - # All ISO formats start with the year - year = int(dt_str[0:4]) - - has_sep = dt_str[4:5] == self._DATE_SEP - - pos = 4 + has_sep # Skip '-' if it's there - if dt_str[pos:pos + 1] == b'W': - # YYYY-?Www-?D? - pos += 1 - weekno = int(dt_str[pos:pos + 2]) - pos += 2 - - dayno = 1 - if len(dt_str) > pos: - if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep: - raise ValueError('Inconsistent use of dash separator') - - pos += has_sep - - dayno = int(dt_str[pos:pos + 1]) - pos += 1 - - base_date = self._calculate_weekdate(year, weekno, dayno) - else: - # YYYYDDD or YYYY-DDD - if len(dt_str) - pos < 3: - raise ValueError('Invalid ordinal day') - - ordinal_day = int(dt_str[pos:pos + 3]) - pos += 3 - - if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)): - raise ValueError('Invalid ordinal day' + - ' {} for year {}'.format(ordinal_day, year)) - - base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1) - - components = [base_date.year, base_date.month, base_date.day] - return components, pos - - def _calculate_weekdate(self, year, week, day): - """ - Calculate the day of corresponding to the ISO year-week-day calendar. - - This function is effectively the inverse of - :func:`datetime.date.isocalendar`. - - :param year: - The year in the ISO calendar - - :param week: - The week in the ISO calendar - range is [1, 53] - - :param day: - The day in the ISO calendar - range is [1 (MON), 7 (SUN)] - - :return: - Returns a :class:`datetime.date` - """ - if not 0 < week < 54: - raise ValueError('Invalid week: {}'.format(week)) - - if not 0 < day < 8: # Range is 1-7 - raise ValueError('Invalid weekday: {}'.format(day)) - - # Get week 1 for the specific year: - jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it - week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1) - - # Now add the specific number of weeks and days to get what we want - week_offset = (week - 1) * 7 + (day - 1) - return week_1 + timedelta(days=week_offset) - - def _parse_isotime(self, timestr): - len_str = len(timestr) - components = [0, 0, 0, 0, None] - pos = 0 - comp = -1 - - if len_str < 2: - raise ValueError('ISO time too short') - - has_sep = False - - while pos < len_str and comp < 5: - comp += 1 - - if timestr[pos:pos + 1] in b'-+Zz': - # Detect time zone boundary - components[-1] = self._parse_tzstr(timestr[pos:]) - pos = len_str - break - - if comp == 1 and timestr[pos:pos+1] == self._TIME_SEP: - has_sep = True - pos += 1 - elif comp == 2 and has_sep: - if timestr[pos:pos+1] != self._TIME_SEP: - raise ValueError('Inconsistent use of colon separator') - pos += 1 - - if comp < 3: - # Hour, minute, second - components[comp] = int(timestr[pos:pos + 2]) - pos += 2 - - if comp == 3: - # Fraction of a second - frac = self._FRACTION_REGEX.match(timestr[pos:]) - if not frac: - continue - - us_str = frac.group(1)[:6] # Truncate to microseconds - components[comp] = int(us_str) * 10**(6 - len(us_str)) - pos += len(frac.group()) - - if pos < len_str: - raise ValueError('Unused components in ISO string') - - if components[0] == 24: - # Standard supports 00:00 and 24:00 as representations of midnight - if any(component != 0 for component in components[1:4]): - raise ValueError('Hour may only be 24 at 24:00:00.000') - - return components - - def _parse_tzstr(self, tzstr, zero_as_utc=True): - if tzstr == b'Z' or tzstr == b'z': - return tz.UTC - - if len(tzstr) not in {3, 5, 6}: - raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters') - - if tzstr[0:1] == b'-': - mult = -1 - elif tzstr[0:1] == b'+': - mult = 1 - else: - raise ValueError('Time zone offset requires sign') - - hours = int(tzstr[1:3]) - if len(tzstr) == 3: - minutes = 0 - else: - minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):]) - - if zero_as_utc and hours == 0 and minutes == 0: - return tz.UTC - else: - if minutes > 59: - raise ValueError('Invalid minutes in time zone offset') - - if hours > 23: - raise ValueError('Invalid hours in time zone offset') - - return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60) - - -DEFAULT_ISOPARSER = isoparser() -isoparse = DEFAULT_ISOPARSER.isoparse diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/flac_parse.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/flac_parse.h deleted file mode 100644 index 67a7320bea6f96e3bcb2ffbd5a1846ad0e4d28c5..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/flac_parse.h +++ /dev/null @@ -1,89 +0,0 @@ -/* - * FLAC (Free Lossless Audio Codec) decoder/parser common functions - * Copyright (c) 2008 Justin Ruggles - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * FLAC (Free Lossless Audio Codec) decoder/parser common functions - */ - -#ifndef AVCODEC_FLAC_PARSE_H -#define AVCODEC_FLAC_PARSE_H - -#include "avcodec.h" -#include "get_bits.h" - -typedef struct FLACStreaminfo { - int samplerate; /**< sample rate */ - int channels; /**< number of channels */ - int bps; /**< bits-per-sample */ - int max_blocksize; /**< maximum block size, in samples */ - int max_framesize; /**< maximum frame size, in bytes */ - int64_t samples; /**< total number of samples */ -} FLACStreaminfo; - -typedef struct FLACFrameInfo { - int samplerate; /**< sample rate */ - int channels; /**< number of channels */ - int bps; /**< bits-per-sample */ - int blocksize; /**< block size of the frame */ - int ch_mode; /**< channel decorrelation mode */ - int64_t frame_or_sample_num; /**< frame number or sample number */ - int is_var_size; /**< specifies if the stream uses variable - block sizes or a fixed block size; - also determines the meaning of - frame_or_sample_num */ -} FLACFrameInfo; - -/** - * Parse the Streaminfo metadata block - * @param[out] avctx codec context to set basic stream parameters - * @param[out] s where parsed information is stored - * @param[in] buffer pointer to start of 34-byte streaminfo data - * - * @return negative error code on faiure or >= 0 on success - */ -int ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s, - const uint8_t *buffer); - -/** - * Validate the FLAC extradata. - * @param[in] avctx codec context containing the extradata. - * @param[out] format extradata format. - * @param[out] streaminfo_start pointer to start of 34-byte STREAMINFO data. - * @return 1 if valid, 0 if not valid. - */ -int ff_flac_is_extradata_valid(AVCodecContext *avctx, - uint8_t **streaminfo_start); - -/** - * Validate and decode a frame header. - * @param avctx AVCodecContext to use as av_log() context - * @param gb GetBitContext from which to read frame header - * @param[out] fi frame information - * @param log_level_offset log level offset. can be used to silence error messages. - * @return non-zero on error, 0 if ok - */ -int ff_flac_decode_frame_header(AVCodecContext *avctx, GetBitContext *gb, - FLACFrameInfo *fi, int log_level_offset); - -void ff_flac_set_channel_layout(AVCodecContext *avctx, int channels); - -#endif /* AVCODEC_FLAC_PARSE_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/h263dsp.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/h263dsp.c deleted file mode 100644 index 8fa2d3c297f5293f27095a61b31af94cc4a80302..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/h263dsp.c +++ /dev/null @@ -1,127 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include - -#include "libavutil/attributes.h" -#include "libavutil/common.h" -#include "config.h" -#include "h263dsp.h" - -const uint8_t ff_h263_loop_filter_strength[32] = { - 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, - 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11, 12, 12, 12 -}; - -static void h263_h_loop_filter_c(uint8_t *src, int stride, int qscale) -{ - int y; - const int strength = ff_h263_loop_filter_strength[qscale]; - - for (y = 0; y < 8; y++) { - int d1, d2, ad1; - int p0 = src[y * stride - 2]; - int p1 = src[y * stride - 1]; - int p2 = src[y * stride + 0]; - int p3 = src[y * stride + 1]; - int d = (p0 - p3 + 4 * (p2 - p1)) / 8; - - if (d < -2 * strength) - d1 = 0; - else if (d < -strength) - d1 = -2 * strength - d; - else if (d < strength) - d1 = d; - else if (d < 2 * strength) - d1 = 2 * strength - d; - else - d1 = 0; - - p1 += d1; - p2 -= d1; - if (p1 & 256) - p1 = ~(p1 >> 31); - if (p2 & 256) - p2 = ~(p2 >> 31); - - src[y * stride - 1] = p1; - src[y * stride + 0] = p2; - - ad1 = FFABS(d1) >> 1; - - d2 = av_clip((p0 - p3) / 4, -ad1, ad1); - - src[y * stride - 2] = p0 - d2; - src[y * stride + 1] = p3 + d2; - } -} - -static void h263_v_loop_filter_c(uint8_t *src, int stride, int qscale) -{ - int x; - const int strength = ff_h263_loop_filter_strength[qscale]; - - for (x = 0; x < 8; x++) { - int d1, d2, ad1; - int p0 = src[x - 2 * stride]; - int p1 = src[x - 1 * stride]; - int p2 = src[x + 0 * stride]; - int p3 = src[x + 1 * stride]; - int d = (p0 - p3 + 4 * (p2 - p1)) / 8; - - if (d < -2 * strength) - d1 = 0; - else if (d < -strength) - d1 = -2 * strength - d; - else if (d < strength) - d1 = d; - else if (d < 2 * strength) - d1 = 2 * strength - d; - else - d1 = 0; - - p1 += d1; - p2 -= d1; - if (p1 & 256) - p1 = ~(p1 >> 31); - if (p2 & 256) - p2 = ~(p2 >> 31); - - src[x - 1 * stride] = p1; - src[x + 0 * stride] = p2; - - ad1 = FFABS(d1) >> 1; - - d2 = av_clip((p0 - p3) / 4, -ad1, ad1); - - src[x - 2 * stride] = p0 - d2; - src[x + stride] = p3 + d2; - } -} - -av_cold void ff_h263dsp_init(H263DSPContext *ctx) -{ - ctx->h263_h_loop_filter = h263_h_loop_filter_c; - ctx->h263_v_loop_filter = h263_v_loop_filter_c; - -#if ARCH_X86 - ff_h263dsp_init_x86(ctx); -#elif ARCH_MIPS - ff_h263dsp_init_mips(ctx); -#endif -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/blockdsp_mips.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/blockdsp_mips.h deleted file mode 100644 index 1742b123c3a863ed3730fee52400865ca817900f..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/blockdsp_mips.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2015 Parag Salasakar (parag.salasakar@imgtec.com) - * Zhou Xiaoyong - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_MIPS_BLOCKDSP_MIPS_H -#define AVCODEC_MIPS_BLOCKDSP_MIPS_H - -#include "../mpegvideo.h" - -void ff_fill_block16_msa(uint8_t *src, uint8_t val, ptrdiff_t stride, int height); -void ff_fill_block8_msa(uint8_t *src, uint8_t val, ptrdiff_t stride, int height); -void ff_clear_block_msa(int16_t *block); -void ff_clear_blocks_msa(int16_t *block); - -void ff_fill_block16_mmi(uint8_t *block, uint8_t value, ptrdiff_t line_size, int h); -void ff_fill_block8_mmi(uint8_t *block, uint8_t value, ptrdiff_t line_size, int h); -void ff_clear_block_mmi(int16_t *block); -void ff_clear_blocks_mmi(int16_t *block); - -#endif // #ifndef AVCODEC_MIPS_BLOCKDSP_MIPS_H diff --git a/spaces/congsaPfin/Manga-OCR/Festo Fluidsim Pneumatics 3.6 With 4.2 Library Crack ((LINK)).md b/spaces/congsaPfin/Manga-OCR/Festo Fluidsim Pneumatics 3.6 With 4.2 Library Crack ((LINK)).md deleted file mode 100644 index 9f77864984ed32bd2f66953af503446e2b52375a..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/Festo Fluidsim Pneumatics 3.6 With 4.2 Library Crack ((LINK)).md +++ /dev/null @@ -1,106 +0,0 @@ -## Festo Fluidsim Pneumatics 3.6 With 4.2 Library Crack - - - - - - ![Festo Fluidsim Pneumatics 3.6 With 4.2 Library Crack ((LINK))](https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcQNXFXsTHqWqNN4q11J606DhmO2yj4wGX2Z_Js0yqASaTU_yt8xTREnWFuc) - - - - - -**Click Here ⇒ [https://www.google.com/url?q=https%3A%2F%2Fbltlly.com%2F2tBP0J&sa=D&sntz=1&usg=AOvVaw2E9QxDRO55MnwbPsE1UdFV](https://www.google.com/url?q=https%3A%2F%2Fbltlly.com%2F2tBP0J&sa=D&sntz=1&usg=AOvVaw2E9QxDRO55MnwbPsE1UdFV)** - - - - - - - - - - - - - -# Festo Fluidsim Pneumatics 3.6 With 4.2 Library Crack: How to Download and Install - - - -If you are looking for a software that can help you design and simulate pneumatic circuits, you might be interested in Festo Fluidsim Pneumatics 3.6 with 4.2 library crack. This is a cracked version of the popular software that allows you to use it without paying for a license. - - - -In this article, we will show you how to download and install Festo Fluidsim Pneumatics 3.6 with 4.2 library crack on your computer. We will also explain some of the features and benefits of using this software for your pneumatic projects. - - - -## What is Festo Fluidsim Pneumatics 3.6 with 4.2 Library Crack? - - - -Festo Fluidsim Pneumatics 3.6 with 4.2 library crack is a software that lets you create and simulate pneumatic circuits using a graphical user interface. You can drag and drop components from a library of hundreds of pneumatic elements, such as valves, cylinders, sensors, compressors, and more. - - - -You can also edit the parameters of each component, such as pressure, flow rate, diameter, stroke, etc. You can then run the simulation and see how the circuit behaves in real time. You can also measure and analyze various variables, such as pressure, flow rate, force, speed, etc. - - - -Festo Fluidsim Pneumatics 3.6 with 4.2 library crack is a cracked version of the original software that was developed by Festo Didactic GmbH & Co. KG. The original software requires a license key to activate and use it. However, the cracked version bypasses the license verification and lets you use it for free. - - - -## Why Use Festo Fluidsim Pneumatics 3.6 with 4.2 Library Crack? - - - -There are many reasons why you might want to use Festo Fluidsim Pneumatics 3.6 with 4.2 library crack for your pneumatic projects. Some of them are: - - - -- It is easy to use and learn. You don't need any programming skills or prior knowledge of pneumatics to use it. - -- It is comprehensive and realistic. You can create complex and realistic pneumatic circuits using a wide range of components and parameters. - -- It is educational and fun. You can learn about the principles and applications of pneumatics by experimenting with different circuits and scenarios. - -- It is free and accessible. You don't have to pay for a license or register to use it. - - - -## How to Download and Install Festo Fluidsim Pneumatics 3.6 with 4.2 Library Crack? - - - -To download and install Festo Fluidsim Pneumatics 3.6 with 4.2 library crack on your computer, you need to follow these steps: - - - -1. Go to one of the websites that offer the download link for Festo Fluidsim Pneumatics 3.6 with 4.2 library crack[^1^] [^2^]. Be careful not to click on any ads or pop-ups that might redirect you to malicious sites. - -2. Download the file that contains the software and the crack. It should be a compressed file with a .rar or .zip extension. - -3. Extract the file using a program like WinRAR or 7-Zip. You should see two folders: one for the software and one for the crack. - -4. Open the folder for the software and run the setup.exe file. Follow the instructions on the screen to install the software on your computer. - -5. Open the folder for the crack and copy the file named adcmn.dll. - -6. Paste the file into the installation directory of Festo Fluidsim Pneumatics 3.6, which is usually C:\Program Files (x86)\Festo\FluidSIM Pneumatics\bin\ - -7. Replace the existing file when prompted. - -8. Launch Festo Fluidsim Pneumatics 3.6 from your desktop or start menu. - -9. Enjoy using Fest 145887f19f - - - - - - - - - diff --git a/spaces/congsaPfin/Manga-OCR/logs/Carrom Pool Mod Apk How to Get Unlimited Coins and Gems in Minutes.md b/spaces/congsaPfin/Manga-OCR/logs/Carrom Pool Mod Apk How to Get Unlimited Coins and Gems in Minutes.md deleted file mode 100644 index c56479b51379cdb2bce095d928c3e80d8b3dbfac..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Carrom Pool Mod Apk How to Get Unlimited Coins and Gems in Minutes.md +++ /dev/null @@ -1,121 +0,0 @@ - -

        Carrom Pool Free Mod Apk Download: How to Enjoy the Game with Unlimited Coins and Gems

        -

        If you are a fan of carrom, you might have heard of Carrom Pool, a popular online multiplayer game that lets you play carrom with your friends or other players from around the world. But did you know that you can also enjoy the game with unlimited coins and gems by downloading the Carrom Pool Mod Apk? In this article, we will tell you everything you need to know about Carrom Pool Mod Apk, including its features, benefits, and how to download and install it on your device. We will also share some tips and tricks to help you master the game and win every match.

        -

        carrom pool free mod apk download


        DOWNLOAD ✒ ✒ ✒ https://urlca.com/2uO6Vt



        -

        What is Carrom Pool?

        -

        Carrom Pool is a casual board game that simulates the real-life carrom game. It is developed by Miniclip, a leading game developer that also created other popular games like 8 Ball Pool, Soccer Stars, and Agar.io. Carrom Pool has two modes: Classic and Disc Pool. In Classic mode, you have to pot all your pieces before your opponent does. In Disc Pool mode, you have to pot only the pieces of your color. You can also play offline against the computer or online against other players in real-time. You can also join clubs, chat with other players, and participate in tournaments and events.

        -

        Features of Carrom Pool

        -

        Carrom Pool has many features that make it an enjoyable and addictive game. Some of them are:

        -
          -
        • Smooth and realistic physics that make the game feel like playing real carrom.
        • -
        • Stunning graphics and sound effects that enhance the gaming experience.
        • -
        • A variety of strikers and pucks to choose from, each with different attributes and abilities.
        • -
        • A reward system that gives you coins and gems for winning matches, completing missions, and opening chests.
        • -
        • A shop where you can buy new strikers, pucks, chests, power-ups, and boosters with coins and gems.
        • -
        • A leaderboard where you can see your rank and compare your performance with other players.
        • -
        -

        How to play Carrom Pool

        -

        Playing Carrom Pool is easy and fun. Here are the basic steps to play the game:

        -
          -
        1. Select a mode: Classic or Disc Pool.
        2. -
        3. Select a table: There are different tables with different entry fees and rewards.
        4. -
        5. Select an opponent: You can either play with a random player or invite a friend to play with you.
        6. -
        7. Start the match: You will see a board with four holes in the corners and some pieces on it. There will be a white piece called the striker that you can use to hit the other pieces.
        8. -
        9. Aim and shoot: Drag your finger on the screen to aim the striker. Release your finger to shoot. You can also adjust the power and direction of your shot by using the buttons on the screen.
        10. -
        11. Pot your pieces: Depending on the mode, you have to pot either all your pieces or only the pieces of your color. You can also pot the red piece called the queen for extra points.
        12. -
        13. Win the match: The first player to pot all their pieces wins the match. You will get coins and gems as rewards for winning.
        14. -
        -

        What is Carrom Pool Mod Apk?

        -

        Carrom Pool Mod Apk is the modified version of the original Carrom Pool game. It provides effortless winning to the players by giving them access to all premium features such as auto-aim, paid strikers, unlimited coins and gems, and many other mod features for free. With Carrom Pool Mod Apk, you can enjoy the game without worrying about running out of coins and gems or losing matches. You can also unlock and use any striker and puck you want, and customize them according to your preference. You can also use power-ups and boosters to enhance your gameplay and win every match easily.

        -

        Benefits of Carrom Pool Mod Apk

        -

        Carrom Pool Mod Apk has many benefits that make it a better choice than the original game. Some of them are:

        -
          -
        • It is free to download and install. You don't need to pay anything to enjoy the game.
        • -
        • It is safe and secure. You don't need to root your device or grant any permissions to use the mod apk.
        • -
        • It is compatible with all Android devices. You don't need to worry about the device specifications or compatibility issues.
        • -
        • It is updated regularly. You don't need to worry about missing out on the latest features or bug fixes.
        • -
        • It is easy to use. You don't need any technical skills or knowledge to use the mod apk.
        • -
        -

        How to download and install Carrom Pool Mod Apk

        -

        Downloading and installing Carrom Pool Mod Apk is simple and straightforward. Here are the steps to follow:

        -

        carrom pool mod apk unlimited coins and gems download
        -carrom pool hack mod apk free download
        -carrom pool mod apk latest version download for android
        -carrom pool mod apk unlocked all strikers download
        -carrom pool mod apk anti ban free download
        -carrom pool mod apk online multiplayer download
        -carrom pool mod apk offline mode download
        -carrom pool mod apk no root free download
        -carrom pool mod apk unlimited money and gems download
        -carrom pool mod apk premium features unlocked download
        -carrom pool mod apk auto win hack download
        -carrom pool mod apk unlimited everything download
        -carrom pool mod apk direct download link
        -carrom pool mod apk 2023 version free download
        -carrom pool mod apk no ads free download
        -carrom pool mod apk unlimited coins and gems 2022 download
        -carrom pool mod apk with facebook login download
        -carrom pool mod apk all levels unlocked download
        -carrom pool mod apk unlimited coins and gems for ios download
        -carrom pool mod apk easy win hack download
        -carrom pool mod apk unlimited coins and gems for pc download
        -carrom pool mod apk high graphics quality download
        -carrom pool mod apk unlimited coins and gems generator download
        -carrom pool mod apk all strikers unlocked free download
        -carrom pool mod apk unlimited coins and gems online download
        -carrom pool mod apk unlimited coins and gems without verification download
        -carrom pool mod apk new update free download
        -carrom pool mod apk unlimited coins and gems for iphone download
        -carrom pool mod apk unlimited coins and gems no survey download
        -carrom pool mod apk unlimited coins and gems without human verification download
        -carrom pool mod apk unlimited coins and gems for android 11 download
        -carrom pool mod apk unlimited coins and gems for windows 10 download
        -carrom pool mod apk unlimited coins and gems for macbook pro download
        -carrom pool mod apk unlimited coins and gems for laptop download
        -carrom pool mod apk unlimited coins and gems for chromebook download
        -carrom pool mod apk unlimited coins and gems for tablet download
        -carrom pool mod apk unlimited coins and gems for ipad download
        -carrom pool mod apk unlimited coins and gems for samsung galaxy s21 download
        -carrom pool mod apk unlimited coins and gems for oneplus 9 pro download
        -carrom pool mod apk unlimited coins and gems for xiaomi mi 11 ultra download
        -carrom pool mod apk unlimited coins and gems for huawei p40 pro plus download
        -carrom pool mod apk unlimited coins and gems for oppo find x3 pro download
        -carrom pool mod apk unlimited coins and gems for vivo x60 pro plus download
        -carrom pool mod apk unlimited coins and gems for realme gt 5g download
        -carrom pool mod apk unlimited coins and gems for asus rog phone 5 ultimate download
        -carrom pool mod apk unlimited coins and gems for nokia 8.3 5g download
        -carrom pool mod apk unlimited coins and gems for sony xperia 1 iii download
        -carrom pool mod apk unlimited coins and gems for lg wing 5g download

        -
          -
        1. Click on the link below to download the Carrom Pool Mod Apk file on your device.
        2. -
        3. Go to your device settings and enable the option to install apps from unknown sources.
        4. -
        5. Locate the downloaded file in your file manager and tap on it to start the installation process.
        6. -
        7. Follow the instructions on the screen and wait for the installation to complete.
        8. -
        9. Launch the game and enjoy the mod features.
        10. -
        -

        Tips and tricks for Carrom Pool Mod Apk

        -

        To make the most out of Carrom Pool Mod Apk, you can follow these tips and tricks:

        -

        Choose the right striker

        -

        The striker is the most important piece in the game, as it determines how you hit the other pieces. Therefore, you should choose a striker that suits your play style and strategy. For example, if you want to hit hard and fast, you can choose a striker with high power and speed. If you want to hit accurately and precisely, you can choose a striker with high aim and control. You can also change the color and design of your striker to make it more appealing.

        -

        Aim and shoot wisely

        -

        Aiming and shooting are the core skills of the game, as they determine how you pot your pieces. Therefore, you should aim and shoot wisely, taking into account the angle, direction, power, and spin of your shot. You can also use the auto-aim feature of the mod apk to help you with this. However, you should not rely on it too much, as it may not always give you the best shot. You should also practice your shots regularly, as practice makes perfect.

        -

        Use power-ups and boosters

        -

        Power-ups and boosters are special items that can help you improve your gameplay and win more matches. For example, you can use the double coins power-up to double your coins after winning a match. You can use the time extender booster to extend your time limit for each shot. You can use the free hit booster to get a free hit without losing a turn. You can use the undo booster to undo your last shot if you make a mistake. You can also use the mod apk to get unlimited power-ups and boosters for free.

        -

        Conclusion

        -

        Carrom Pool is a fun and exciting game that lets you play carrom with other players online. However, if you want to enjoy the game with unlimited coins and gems, you can download Carrom Pool Mod Apk from our website. With Carrom Pool Mod Apk, you can unlock all premium features, such as auto-aim, paid strikers, unlimited coins and gems, and many other mod features for free. You can also use our tips and tricks to master the game and win every match easily. So what are you waiting for? Download Carrom Pool Mod Apk now and have fun!

        -

        FAQs

        -

        Here are some frequently asked questions about Carrom Pool Mod Apk:

        -
          -
        • Is Carrom Pool Mod Apk safe?
          -Yes, Carrom Pool Mod Apk is safe and secure. It does not contain any viruses or malware that can harm your device or data. It also does not require any root access or permissions to work.
        • -
        • Is Carrom Pool Mod Apk legal?
          -Yes, Carrom Pool Mod Ap k is legal as long as you use it for personal and educational purposes only. However, it is not endorsed or affiliated with the official game developer, Miniclip. Therefore, you should use it at your own risk and discretion.
        • -
        • Does Carrom Pool Mod Apk work on all devices?
          -Yes, Carrom Pool Mod Apk works on all Android devices that support the original game. However, you should make sure that your device has enough storage space and meets the minimum system requirements to run the game smoothly.
        • -
        • How can I update Carrom Pool Mod Apk?
          -You can update Carrom Pool Mod Apk by visiting our website and downloading the latest version of the mod apk file. You can also enable the auto-update feature of the mod apk to get notified whenever a new update is available.
        • -
        • How can I contact you if I have any questions or issues?
          -You can contact us by leaving a comment below or sending us an email at [email](^i^). We will try our best to respond to your queries and resolve your issues as soon as possible.
        • -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download 8 Ball Pool Aim Tool Pro Mod APK - The Ultimate Billiards Helper.md b/spaces/congsaPfin/Manga-OCR/logs/Download 8 Ball Pool Aim Tool Pro Mod APK - The Ultimate Billiards Helper.md deleted file mode 100644 index bab2244c0a51ce6f50ca8ebcfccff6d7ceee0905..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download 8 Ball Pool Aim Tool Pro Mod APK - The Ultimate Billiards Helper.md +++ /dev/null @@ -1,97 +0,0 @@ -
        -

        8 Ball Pool Aim Tool Pro Mod APK: A Guide for Beginners

        -

        If you are a fan of billiards or pool games, you might have heard of or played 8 Ball Pool, one of the most popular and addictive online multiplayer games in the world. But did you know that there is a way to improve your skills and accuracy in this game without spending hours practicing? Yes, you heard it right. There is an app called 8 Ball Pool Aim Tool Pro Mod APK that can help you become a master in this game. In this article, we will tell you everything you need to know about this app, including what it is, what features it has, how to download and install it, and how to use it. So, let's get started!

        -

        8 ball pool aim tool pro mod apk


        DOWNLOAD ★★★★★ https://urlca.com/2uO98E



        -

        What is 8 Ball Pool?

        -

        8 Ball Pool is a game developed by Miniclip that allows you to play online pool with millions of players from around the world. You can challenge your friends, join tournaments, win coins and cues, and rank up in the leaderboards. The game has different modes, such as 1-on-1, 9 Ball, and Minigames, and different tables, such as London, Sydney, Moscow, and Las Vegas. The game is fun and easy to play, but it also requires skill and strategy to win.

        -

        How to play 8 Ball Pool

        -

        The rules of 8 Ball Pool are simple. You have to pot all your balls (either solids or stripes) before your opponent does, and then pot the black ball (the 8 ball) to win the game. You have to use the white ball (the cue ball) to hit your balls and pot them. You can adjust the angle and power of your shot by dragging your finger on the screen. You can also use spin to control the movement of the cue ball after it hits a ball or a cushion. If you pot the cue ball or any ball other than your own, you will lose your turn and give your opponent a chance to play. If you pot the 8 ball before clearing your balls, you will lose the game.

        -

        Why you need an aim tool for 8 Ball Pool

        -

        As you can see, playing 8 Ball Pool requires a lot of precision and accuracy. You have to aim carefully and hit the balls at the right angle and speed to pot them. This can be quite challenging, especially if you are a beginner or if you are playing on a small screen. That's why you might need an aim tool for 8 Ball Pool. An aim tool is an app that can help you aim better and make more accurate shots in this game. It can show you where the cue ball will go after hitting a ball or a cushion, and how to adjust your shot accordingly. It can also help you make bank shots or cushion shots that are otherwise difficult to execute. With an aim tool, you can improve your skills and confidence in this game and win more matches.

        -

        What is 8 Ball Pool Aim Tool Pro Mod APK?

        -

        One of the best aim tools for 8 Ball Pool is called 8 Ball Pool Aim Tool Pro Mod APK. This is a modified version of the original 8 Ball Pool Aim Tool Pro app that has some extra features and benefits. It is not available on the Google Play Store, so you have to download it from a third-party source. But don't worry, we will guide you through the process of downloading and installing it safely and easily. Here are some of the features of 8 Ball Pool Aim Tool Pro Mod APK that make it worth trying.

        -

        8 ball pool aim expert apk download
        -8 ball pool aim tool pro mod free
        -8 ball pool aim assist hack apk
        -8 ball pool aim line mod apk
        -8 ball pool aim tool pro apk latest version
        -8 ball pool aim hack mod apk download
        -8 ball pool aim tool pro unlimited coins
        -8 ball pool aim master apk free download
        -8 ball pool aim trainer mod apk
        -8 ball pool aim tool pro no root
        -8 ball pool aim helper apk download
        -8 ball pool aim tool pro premium apk
        -8 ball pool aim cheat mod apk
        -8 ball pool aim guide mod apk
        -8 ball pool aim tool pro cracked apk
        -8 ball pool aim bot mod apk download
        -8 ball pool aim tool pro online generator
        -8 ball pool aim app apk free download
        -8 ball pool aim mod apk unlimited money
        -8 ball pool aim tool pro for ios
        -8 ball pool aim hack apk free download
        -8 ball pool aim tool pro full version
        -8 ball pool aim mod apk android 1
        -8 ball pool aim tool pro for pc
        -8 ball pool aim calculator apk download
        -8 ball pool aim tool pro vip apk
        -8 ball pool aim mod apk latest version
        -8 ball pool aim tool pro for iphone
        -8 ball pool aim indicator apk download
        -8 ball pool aim tool pro mod menu
        -8 ball pool aim mod apk no ban
        -8 ball pool aim tool pro for android
        -8 ball pool aim enhancer apk download
        -8 ball pool aim tool pro license key
        -8 ball pool aim mod apk rexdl
        -8 ball pool aim tool pro for mac
        -8 ball pool aim extension apk download
        -8 ball pool aim tool pro activation code
        -8 ball pool aim mod apk revdl
        -8 ball pool aim tool pro for windows
        -8 ball pool aim ruler apk download
        -8 ball pool aim tool pro coupon code
        -8 ball pool aim mod apk happymod
        -8 ball pool aim tool pro for chromebook
        -8 ball pool aiming expert for pc download[^1^]

        -

        Features of 8 Ball Pool Aim Tool Pro Mod APK

        -

        Auto-aim and extend aim line

        -

        One of the main features of 8 Ball Pool Aim Tool Pro Mod APK is that it can automatically aim for you and show you the best possible shot. You don't have to drag your finger on the screen to adjust the angle and power of your shot. You just have to tap the screen and the app will do the rest. The app also extends the aim line beyond the normal limit, so you can see where the cue ball will go after hitting multiple balls or cushions. This can help you plan your shots better and avoid mistakes.

        -

        Bank shots and cushion shots

        -

        Another feature of 8 Ball Pool Aim Tool Pro Mod APK is that it can help you make bank shots and cushion shots that are otherwise hard to execute. Bank shots are shots where you hit the cue ball off a cushion and then pot a ball. Cushion shots are shots where you hit a ball off a cushion and then pot it. These shots can be very useful in certain situations, such as when your balls are blocked by your opponent's balls or when you want to surprise your opponent with a tricky shot. The app can show you how to angle your cue ball and how much power to use to make these shots successfully.

        -

        No ads and no root required

        -

        A final feature of 8 Ball Pool Aim Tool Pro Mod APK is that it has no ads and no root required. Ads can be annoying and distracting when you are playing a game, especially if they pop up in the middle of a match. The app removes all the ads from the original app, so you can enjoy a smooth and uninterrupted gaming experience. The app also does not require root access to work, which means you don't have to modify your device's system settings or risk damaging it. You can use the app without any worries or hassles.

        -

        How to download and install 8 Ball Pool Aim Tool Pro Mod APK

        -

        Now that you know what 8 Ball Pool Aim Tool Pro Mod APK is and what features it has, you might be wondering how to download and install it on your device. Well, it's not very difficult, but you have to follow some steps carefully. Here are the steps you need to follow:

        -

        Step 1: Download the APK file from a trusted source

        -

        The first step is to download the APK file of 8 Ball Pool Aim Tool Pro Mod APK from a trusted source. You can't find this app on the Google Play Store, so you have to look for it on other websites. However, not all websites are safe and reliable, so you have to be careful. Some websites might have fake or malicious files that can harm your device or steal your data. To avoid this, we recommend you download the APK file from [this link], which is verified and tested by us.

        -

        Step 2: Enable unknown sources on your device

        -

        The second step is to enable unknown sources on your device. This is a security setting that prevents you from installing apps from sources other than the Google Play Store. However, since we are installing an app from a third-party source, we have to enable this option temporarily. To do this, go to your device's settings, then security, then unknown sources, and toggle it on. You might see a warning message, but don't worry, it's just a precaution. You can disable this option later after installing the app.

        -

        Step 3: Install the APK file and launch the app

        -

        The third step is to install the APK file and launch the app. To do this, go to your device's file manager, then locate the downloaded APK file, then tap on it. You might see a confirmation message, then tap on install. Wait for a few seconds until the installation is complete, then tap on open. Congratulations! You have successfully installed 8 Ball Pool Aim Tool Pro Mod APK on your device.

        -

        How to use 8 Ball Pool Aim Tool Pro Mod APK

        -

        The final step is to use 8 Ball Pool Aim Tool Pro Mod APK and enjoy its features. To do this, follow these steps:

        -

        Step 1: Open the app and grant the necessary permissions

        -

        When you open the app for the first time, you will see a pop-up message asking you to grant some permissions to the app. These permissions are necessary for the app to work properly and access your device's features. For example, the app needs to access your storage to save the APK file, your camera to scan the QR code, and your overlay to show the aim tool on the screen. Tap on allow for each permission and proceed to the next step.

        -

        Step 2: Select the game mode and table you want to play on

        -

        After granting the permissions, you will see the main interface of the app. Here, you can select the game mode and table you want to play on. You can choose from 1-on-1, 9 Ball, and Minigames modes, and from different tables with different bet amounts and rules. Tap on the mode and table you want to play on and wait for the app to find an opponent for you.

        -

        Step 3: Tap the screen to activate the aim tool and adjust the angle and power of your shot

        -

        Once you are in a match, you can tap the screen to activate the aim tool and adjust the angle and power of your shot. You will see a green line extending from the cue ball to the target ball, showing you where the cue ball will go after hitting it. You will also see a yellow line extending from the target ball to the pocket, showing you where the target ball will go after being hit. You can drag your finger on the screen to change the angle of your shot and use the power bar on the left side of the screen to change the power of your shot. You can also use spin by tapping on the cue ball icon on the right side of the screen and moving it around. When you are ready, tap on the confirm button on the bottom right corner of the screen and watch your shot.

        -

        Conclusion

        -

        8 Ball Pool Aim Tool Pro Mod APK is a great app for anyone who loves playing 8 Ball Pool and wants to improve their skills and accuracy in this game. It can help you aim better, make more accurate shots, and win more matches. It has many features, such as auto-aim, extend aim line, bank shots, cushion shots, no ads, and no root required. It is easy to download, install, and use. All you have to do is follow our guide and enjoy this app. So, what are you waiting for? Download 8 Ball Pool Aim Tool Pro Mod APK today and become a master in this game!

        -

        FAQs

        -

        Here are some of the frequently asked questions about 8 Ball Pool Aim Tool Pro Mod APK:

        -
          -
        • Is 8 Ball Pool Aim Tool Pro Mod APK safe?
        • -

          Yes, 8 Ball Pool Aim Tool Pro Mod APK is safe to use. It does not contain any viruses or malware that can harm your device or steal your data. However, you have to download it from a trusted source, such as [this link], and enable unknown sources on your device before installing it.

          -
        • Is 8 Ball Pool Aim Tool Pro Mod APK legal?
        • -

          Well, this is a tricky question. 8 Ball Pool Aim Tool Pro Mod APK is not an official app from Miniclip, the developer of 8 Ball Pool. It is a modified version of an original app that has some extra features and benefits. Therefore, it might violate some terms and conditions of Miniclip or Google Play Store. However, we have not heard of any cases where users have been banned or penalized for using this app. So, use it at your own risk.

          -
        • Does 8 Ball Pool Aim Tool Pro Mod APK work with all devices?
        • -

          8 Ball Pool Aim Tool Pro Mod APK works with most Android devices that have Android 4.1 or higher versions. However, some devices might not be compatible with this app due to different specifications or settings. If you encounter any problems while using this app, such as crashes or errors, try updating your device's software or clearing your cache.

          -
        • Does 8 Ball Pool Aim Tool Pro Mod APK require an internet connection?
        • -

          Yes, 8 Ball Pool Aim Tool Pro Mod APK requires an internet connection to work properly. This is because 8 Ball Pool is an online multiplayer game that connects you with other players from around the world. Therefore, you need a stable and fast internet connection to play this game smoothly and without interruptions.

          -
        • Can I use 8 Ball Pool Aim Tool Pro Mod APK with other apps?
        • -

          No, you cannot use 8 Ball Pool Aim Tool Pro Mod APK with other apps that modify or interfere with 8 Ball Pool or its features. For example, you cannot use this app with other modded apps, hacks, cheats, or bots that claim to give you unlimited coins, cues, cash, or other resources. These apps are not safe and can get you banned or suspended from the game. Therefore, use 8 Ball Pool Aim Tool Pro Mod APK only with the official 8 Ball Pool app from Miniclip.

          197e85843d
          -
          -
          \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/README.md b/spaces/contluForse/HuggingGPT/README.md deleted file mode 100644 index 6f3b26f0d1afa6c334487fbb3c13bc80fb7e5929..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: HuggingGPT -emoji: 😻 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -duplicated_from: erfanzar/HuggingGPT ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/contluForse/HuggingGPT/assets/DCS World War II Assets Pack Full Crack [hack] [UPD].md b/spaces/contluForse/HuggingGPT/assets/DCS World War II Assets Pack Full Crack [hack] [UPD].md deleted file mode 100644 index 6a3c112dad08bef548f1116a29ace2dcc4ef6b10..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/DCS World War II Assets Pack Full Crack [hack] [UPD].md +++ /dev/null @@ -1,12 +0,0 @@ -

          DCS: World War II Assets Pack full crack [hack]


          DOWNLOADhttps://ssurll.com/2uzwjr



          -
          -November 23, 2017 - The DCS: World War II Assets Pack provides numerous World War II air, land, and sea assets to populate Normandy and other DCS World maps.## #There is no information for this page. Find out why this happened. -A list of all the cards you can buy will be posted here. -This page is under construction, so we keep the list open. -We will keep it up to date, but it will still take some time, depending on how quickly you update the information. -You can buy the cards on Amazon, but most of them are sold individually. -Cards you can purchase -Below is a list of cards available for purchase. 8a78ff9644
          -
          -
          -

          diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/midas/utils.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/midas/utils.py deleted file mode 100644 index 9a9d3b5b66370fa98da9e067ba53ead848ea9a59..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/midas/utils.py +++ /dev/null @@ -1,189 +0,0 @@ -"""Utils for monoDepth.""" -import sys -import re -import numpy as np -import cv2 -import torch - - -def read_pfm(path): - """Read pfm file. - - Args: - path (str): path to file - - Returns: - tuple: (data, scale) - """ - with open(path, "rb") as file: - - color = None - width = None - height = None - scale = None - endian = None - - header = file.readline().rstrip() - if header.decode("ascii") == "PF": - color = True - elif header.decode("ascii") == "Pf": - color = False - else: - raise Exception("Not a PFM file: " + path) - - dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii")) - if dim_match: - width, height = list(map(int, dim_match.groups())) - else: - raise Exception("Malformed PFM header.") - - scale = float(file.readline().decode("ascii").rstrip()) - if scale < 0: - # little-endian - endian = "<" - scale = -scale - else: - # big-endian - endian = ">" - - data = np.fromfile(file, endian + "f") - shape = (height, width, 3) if color else (height, width) - - data = np.reshape(data, shape) - data = np.flipud(data) - - return data, scale - - -def write_pfm(path, image, scale=1): - """Write pfm file. - - Args: - path (str): pathto file - image (array): data - scale (int, optional): Scale. Defaults to 1. - """ - - with open(path, "wb") as file: - color = None - - if image.dtype.name != "float32": - raise Exception("Image dtype must be float32.") - - image = np.flipud(image) - - if len(image.shape) == 3 and image.shape[2] == 3: # color image - color = True - elif ( - len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1 - ): # greyscale - color = False - else: - raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.") - - file.write("PF\n" if color else "Pf\n".encode()) - file.write("%d %d\n".encode() % (image.shape[1], image.shape[0])) - - endian = image.dtype.byteorder - - if endian == "<" or endian == "=" and sys.byteorder == "little": - scale = -scale - - file.write("%f\n".encode() % scale) - - image.tofile(file) - - -def read_image(path): - """Read image and output RGB image (0-1). - - Args: - path (str): path to file - - Returns: - array: RGB image (0-1) - """ - img = cv2.imread(path) - - if img.ndim == 2: - img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0 - - return img - - -def resize_image(img): - """Resize image and make it fit for network. - - Args: - img (array): image - - Returns: - tensor: data ready for network - """ - height_orig = img.shape[0] - width_orig = img.shape[1] - - if width_orig > height_orig: - scale = width_orig / 384 - else: - scale = height_orig / 384 - - height = (np.ceil(height_orig / scale / 32) * 32).astype(int) - width = (np.ceil(width_orig / scale / 32) * 32).astype(int) - - img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA) - - img_resized = ( - torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float() - ) - img_resized = img_resized.unsqueeze(0) - - return img_resized - - -def resize_depth(depth, width, height): - """Resize depth map and bring to CPU (numpy). - - Args: - depth (tensor): depth - width (int): image width - height (int): image height - - Returns: - array: processed depth - """ - depth = torch.squeeze(depth[0, :, :, :]).to("cpu") - - depth_resized = cv2.resize( - depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC - ) - - return depth_resized - -def write_depth(path, depth, bits=1): - """Write depth map to pfm and png file. - - Args: - path (str): filepath without extension - depth (array): depth - """ - write_pfm(path + ".pfm", depth.astype(np.float32)) - - depth_min = depth.min() - depth_max = depth.max() - - max_val = (2**(8*bits))-1 - - if depth_max - depth_min > np.finfo("float").eps: - out = max_val * (depth - depth_min) / (depth_max - depth_min) - else: - out = np.zeros(depth.shape, dtype=depth.type) - - if bits == 1: - cv2.imwrite(path + ".png", out.astype("uint8")) - elif bits == 2: - cv2.imwrite(path + ".png", out.astype("uint16")) - - return diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/checkpoint/detection_checkpoint.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/checkpoint/detection_checkpoint.py deleted file mode 100644 index 7d411e54bd5e004504423ba052db6f85ec511f72..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/checkpoint/detection_checkpoint.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -import os -import pickle -from urllib.parse import parse_qs, urlparse -import torch -from fvcore.common.checkpoint import Checkpointer -from torch.nn.parallel import DistributedDataParallel - -import annotator.oneformer.detectron2.utils.comm as comm -from annotator.oneformer.detectron2.utils.file_io import PathManager - -from .c2_model_loading import align_and_update_state_dicts - - -class DetectionCheckpointer(Checkpointer): - """ - Same as :class:`Checkpointer`, but is able to: - 1. handle models in detectron & detectron2 model zoo, and apply conversions for legacy models. - 2. correctly load checkpoints that are only available on the master worker - """ - - def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables): - is_main_process = comm.is_main_process() - super().__init__( - model, - save_dir, - save_to_disk=is_main_process if save_to_disk is None else save_to_disk, - **checkpointables, - ) - self.path_manager = PathManager - self._parsed_url_during_load = None - - def load(self, path, *args, **kwargs): - assert self._parsed_url_during_load is None - need_sync = False - logger = logging.getLogger(__name__) - logger.info("[DetectionCheckpointer] Loading from {} ...".format(path)) - - if path and isinstance(self.model, DistributedDataParallel): - path = self.path_manager.get_local_path(path) - has_file = os.path.isfile(path) - all_has_file = comm.all_gather(has_file) - if not all_has_file[0]: - raise OSError(f"File {path} not found on main worker.") - if not all(all_has_file): - logger.warning( - f"Not all workers can read checkpoint {path}. " - "Training may fail to fully resume." - ) - # TODO: broadcast the checkpoint file contents from main - # worker, and load from it instead. - need_sync = True - if not has_file: - path = None # don't load if not readable - - if path: - parsed_url = urlparse(path) - self._parsed_url_during_load = parsed_url - path = parsed_url._replace(query="").geturl() # remove query from filename - path = self.path_manager.get_local_path(path) - - self.logger.setLevel('CRITICAL') - ret = super().load(path, *args, **kwargs) - - if need_sync: - logger.info("Broadcasting model states from main worker ...") - self.model._sync_params_and_buffers() - self._parsed_url_during_load = None # reset to None - return ret - - def _load_file(self, filename): - if filename.endswith(".pkl"): - with PathManager.open(filename, "rb") as f: - data = pickle.load(f, encoding="latin1") - if "model" in data and "__author__" in data: - # file is in Detectron2 model zoo format - self.logger.info("Reading a file from '{}'".format(data["__author__"])) - return data - else: - # assume file is from Caffe2 / Detectron1 model zoo - if "blobs" in data: - # Detection models have "blobs", but ImageNet models don't - data = data["blobs"] - data = {k: v for k, v in data.items() if not k.endswith("_momentum")} - return {"model": data, "__author__": "Caffe2", "matching_heuristics": True} - elif filename.endswith(".pyth"): - # assume file is from pycls; no one else seems to use the ".pyth" extension - with PathManager.open(filename, "rb") as f: - data = torch.load(f) - assert ( - "model_state" in data - ), f"Cannot load .pyth file {filename}; pycls checkpoints must contain 'model_state'." - model_state = { - k: v - for k, v in data["model_state"].items() - if not k.endswith("num_batches_tracked") - } - return {"model": model_state, "__author__": "pycls", "matching_heuristics": True} - - loaded = self._torch_load(filename) - if "model" not in loaded: - loaded = {"model": loaded} - assert self._parsed_url_during_load is not None, "`_load_file` must be called inside `load`" - parsed_url = self._parsed_url_during_load - queries = parse_qs(parsed_url.query) - if queries.pop("matching_heuristics", "False") == ["True"]: - loaded["matching_heuristics"] = True - if len(queries) > 0: - raise ValueError( - f"Unsupported query remaining: f{queries}, orginal filename: {parsed_url.geturl()}" - ) - return loaded - - def _torch_load(self, f): - return super()._load_file(f) - - def _load_model(self, checkpoint): - if checkpoint.get("matching_heuristics", False): - self._convert_ndarray_to_tensor(checkpoint["model"]) - # convert weights by name-matching heuristics - checkpoint["model"] = align_and_update_state_dicts( - self.model.state_dict(), - checkpoint["model"], - c2_conversion=checkpoint.get("__author__", None) == "Caffe2", - ) - # for non-caffe2 models, use standard ways to load it - incompatible = super()._load_model(checkpoint) - - model_buffers = dict(self.model.named_buffers(recurse=False)) - for k in ["pixel_mean", "pixel_std"]: - # Ignore missing key message about pixel_mean/std. - # Though they may be missing in old checkpoints, they will be correctly - # initialized from config anyway. - if k in model_buffers: - try: - incompatible.missing_keys.remove(k) - except ValueError: - pass - for k in incompatible.unexpected_keys[:]: - # Ignore unexpected keys about cell anchors. They exist in old checkpoints - # but now they are non-persistent buffers and will not be in new checkpoints. - if "anchor_generator.cell_anchors" in k: - incompatible.unexpected_keys.remove(k) - return incompatible diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/oneformer/modeling/backbone/__init__.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/oneformer/modeling/backbone/__init__.py deleted file mode 100644 index 9020c2df23e2af280b7bb168b996ae9eaf312eb8..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/oneformer/modeling/backbone/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/models/depth_model.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/models/depth_model.py deleted file mode 100644 index fc421c108ea3928c9add62b4c190500d9bd4eda1..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/models/depth_model.py +++ /dev/null @@ -1,152 +0,0 @@ -# MIT License - -# Copyright (c) 2022 Intelligent Systems Lab Org - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -# File author: Shariq Farooq Bhat - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torchvision import transforms -import PIL.Image -from PIL import Image -from typing import Union - - -class DepthModel(nn.Module): - def __init__(self): - super().__init__() - self.device = 'cpu' - - def to(self, device) -> nn.Module: - self.device = device - return super().to(device) - - def forward(self, x, *args, **kwargs): - raise NotImplementedError - - def _infer(self, x: torch.Tensor): - """ - Inference interface for the model - Args: - x (torch.Tensor): input tensor of shape (b, c, h, w) - Returns: - torch.Tensor: output tensor of shape (b, 1, h, w) - """ - return self(x)['metric_depth'] - - def _infer_with_pad_aug(self, x: torch.Tensor, pad_input: bool=True, fh: float=3, fw: float=3, upsampling_mode: str='bicubic', padding_mode="reflect", **kwargs) -> torch.Tensor: - """ - Inference interface for the model with padding augmentation - Padding augmentation fixes the boundary artifacts in the output depth map. - Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image. - This augmentation pads the input image and crops the prediction back to the original size / view. - - Note: This augmentation is not required for the models trained with 'avoid_boundary'=True. - Args: - x (torch.Tensor): input tensor of shape (b, c, h, w) - pad_input (bool, optional): whether to pad the input or not. Defaults to True. - fh (float, optional): height padding factor. The padding is calculated as sqrt(h/2) * fh. Defaults to 3. - fw (float, optional): width padding factor. The padding is calculated as sqrt(w/2) * fw. Defaults to 3. - upsampling_mode (str, optional): upsampling mode. Defaults to 'bicubic'. - padding_mode (str, optional): padding mode. Defaults to "reflect". - Returns: - torch.Tensor: output tensor of shape (b, 1, h, w) - """ - # assert x is nchw and c = 3 - assert x.dim() == 4, "x must be 4 dimensional, got {}".format(x.dim()) - assert x.shape[1] == 3, "x must have 3 channels, got {}".format(x.shape[1]) - - if pad_input: - assert fh > 0 or fw > 0, "atlease one of fh and fw must be greater than 0" - pad_h = int(np.sqrt(x.shape[2]/2) * fh) - pad_w = int(np.sqrt(x.shape[3]/2) * fw) - padding = [pad_w, pad_w] - if pad_h > 0: - padding += [pad_h, pad_h] - - x = F.pad(x, padding, mode=padding_mode, **kwargs) - out = self._infer(x) - if out.shape[-2:] != x.shape[-2:]: - out = F.interpolate(out, size=(x.shape[2], x.shape[3]), mode=upsampling_mode, align_corners=False) - if pad_input: - # crop to the original size, handling the case where pad_h and pad_w is 0 - if pad_h > 0: - out = out[:, :, pad_h:-pad_h,:] - if pad_w > 0: - out = out[:, :, :, pad_w:-pad_w] - return out - - def infer_with_flip_aug(self, x, pad_input: bool=True, **kwargs) -> torch.Tensor: - """ - Inference interface for the model with horizontal flip augmentation - Horizontal flip augmentation improves the accuracy of the model by averaging the output of the model with and without horizontal flip. - Args: - x (torch.Tensor): input tensor of shape (b, c, h, w) - pad_input (bool, optional): whether to use padding augmentation. Defaults to True. - Returns: - torch.Tensor: output tensor of shape (b, 1, h, w) - """ - # infer with horizontal flip and average - out = self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs) - out_flip = self._infer_with_pad_aug(torch.flip(x, dims=[3]), pad_input=pad_input, **kwargs) - out = (out + torch.flip(out_flip, dims=[3])) / 2 - return out - - def infer(self, x, pad_input: bool=True, with_flip_aug: bool=True, **kwargs) -> torch.Tensor: - """ - Inference interface for the model - Args: - x (torch.Tensor): input tensor of shape (b, c, h, w) - pad_input (bool, optional): whether to use padding augmentation. Defaults to True. - with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True. - Returns: - torch.Tensor: output tensor of shape (b, 1, h, w) - """ - if with_flip_aug: - return self.infer_with_flip_aug(x, pad_input=pad_input, **kwargs) - else: - return self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs) - - @torch.no_grad() - def infer_pil(self, pil_img, pad_input: bool=True, with_flip_aug: bool=True, output_type: str="numpy", **kwargs) -> Union[np.ndarray, PIL.Image.Image, torch.Tensor]: - """ - Inference interface for the model for PIL image - Args: - pil_img (PIL.Image.Image): input PIL image - pad_input (bool, optional): whether to use padding augmentation. Defaults to True. - with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True. - output_type (str, optional): output type. Supported values are 'numpy', 'pil' and 'tensor'. Defaults to "numpy". - """ - x = transforms.ToTensor()(pil_img).unsqueeze(0).to(self.device) - out_tensor = self.infer(x, pad_input=pad_input, with_flip_aug=with_flip_aug, **kwargs) - if output_type == "numpy": - return out_tensor.squeeze().cpu().numpy() - elif output_type == "pil": - # uint16 is required for depth pil image - out_16bit_numpy = (out_tensor.squeeze().cpu().numpy()*256).astype(np.uint16) - return Image.fromarray(out_16bit_numpy) - elif output_type == "tensor": - return out_tensor.squeeze().cpu() - else: - raise ValueError(f"output_type {output_type} not supported. Supported values are 'numpy', 'pil' and 'tensor'") - \ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/trainers/base_trainer.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/trainers/base_trainer.py deleted file mode 100644 index 982271270967c75ee9092b69140a7067dacc39ce..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/trainers/base_trainer.py +++ /dev/null @@ -1,327 +0,0 @@ -# MIT License - -# Copyright (c) 2022 Intelligent Systems Lab Org - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -# File author: Shariq Farooq Bhat - -import os -import uuid -import warnings -from datetime import datetime as dt -from typing import Dict - -import matplotlib.pyplot as plt -import numpy as np -import torch -import torch.distributed as dist -import torch.nn as nn -import torch.optim as optim -import wandb -from tqdm import tqdm - -from zoedepth.utils.config import flatten -from zoedepth.utils.misc import RunningAverageDict, colorize, colors - - -def is_rank_zero(args): - return args.rank == 0 - - -class BaseTrainer: - def __init__(self, config, model, train_loader, test_loader=None, device=None): - """ Base Trainer class for training a model.""" - - self.config = config - self.metric_criterion = "abs_rel" - if device is None: - device = torch.device( - 'cuda') if torch.cuda.is_available() else torch.device('cpu') -# device = torch.device('cpu') - self.device = device - self.model = model - self.train_loader = train_loader - self.test_loader = test_loader - self.optimizer = self.init_optimizer() - self.scheduler = self.init_scheduler() - - def resize_to_target(self, prediction, target): - if prediction.shape[2:] != target.shape[-2:]: - prediction = nn.functional.interpolate( - prediction, size=target.shape[-2:], mode="bilinear", align_corners=True - ) - return prediction - - def load_ckpt(self, checkpoint_dir="./checkpoints", ckpt_type="best"): - import glob - import os - - from zoedepth.models.model_io import load_wts - - if hasattr(self.config, "checkpoint"): - checkpoint = self.config.checkpoint - elif hasattr(self.config, "ckpt_pattern"): - pattern = self.config.ckpt_pattern - matches = glob.glob(os.path.join( - checkpoint_dir, f"*{pattern}*{ckpt_type}*")) - if not (len(matches) > 0): - raise ValueError(f"No matches found for the pattern {pattern}") - checkpoint = matches[0] - else: - return - model = load_wts(self.model, checkpoint) - # TODO : Resuming training is not properly supported in this repo. Implement loading / saving of optimizer and scheduler to support it. - print("Loaded weights from {0}".format(checkpoint)) - warnings.warn( - "Resuming training is not properly supported in this repo. Implement loading / saving of optimizer and scheduler to support it.") - self.model = model - - def init_optimizer(self): - m = self.model.module if self.config.multigpu else self.model - - if self.config.same_lr: - print("Using same LR") - if hasattr(m, 'core'): - m.core.unfreeze() - params = self.model.parameters() - else: - print("Using diff LR") - if not hasattr(m, 'get_lr_params'): - raise NotImplementedError( - f"Model {m.__class__.__name__} does not implement get_lr_params. Please implement it or use the same LR for all parameters.") - - params = m.get_lr_params(self.config.lr) - - return optim.AdamW(params, lr=self.config.lr, weight_decay=self.config.wd) - - def init_scheduler(self): - lrs = [l['lr'] for l in self.optimizer.param_groups] - return optim.lr_scheduler.OneCycleLR(self.optimizer, lrs, epochs=self.config.epochs, steps_per_epoch=len(self.train_loader), - cycle_momentum=self.config.cycle_momentum, - base_momentum=0.85, max_momentum=0.95, div_factor=self.config.div_factor, final_div_factor=self.config.final_div_factor, pct_start=self.config.pct_start, three_phase=self.config.three_phase) - - def train_on_batch(self, batch, train_step): - raise NotImplementedError - - def validate_on_batch(self, batch, val_step): - raise NotImplementedError - - def raise_if_nan(self, losses): - for key, value in losses.items(): - if torch.isnan(value): - raise ValueError(f"{key} is NaN, Stopping training") - - @property - def iters_per_epoch(self): - return len(self.train_loader) - - @property - def total_iters(self): - return self.config.epochs * self.iters_per_epoch - - def should_early_stop(self): - if self.config.get('early_stop', False) and self.step > self.config.early_stop: - return True - - def train(self): - print(f"Training {self.config.name}") - if self.config.uid is None: - self.config.uid = str(uuid.uuid4()).split('-')[-1] - run_id = f"{dt.now().strftime('%d-%h_%H-%M')}-{self.config.uid}" - self.config.run_id = run_id - self.config.experiment_id = f"{self.config.name}{self.config.version_name}_{run_id}" - self.should_write = ((not self.config.distributed) - or self.config.rank == 0) - self.should_log = self.should_write # and logging - if self.should_log: - tags = self.config.tags.split( - ',') if self.config.tags != '' else None - wandb.init(project=self.config.project, name=self.config.experiment_id, config=flatten(self.config), dir=self.config.root, - tags=tags, notes=self.config.notes, settings=wandb.Settings(start_method="fork")) - - self.model.train() - self.step = 0 - best_loss = np.inf - validate_every = int(self.config.validate_every * self.iters_per_epoch) - - - if self.config.prefetch: - - for i, batch in tqdm(enumerate(self.train_loader), desc=f"Prefetching...", - total=self.iters_per_epoch) if is_rank_zero(self.config) else enumerate(self.train_loader): - pass - - losses = {} - def stringify_losses(L): return "; ".join(map( - lambda kv: f"{colors.fg.purple}{kv[0]}{colors.reset}: {round(kv[1].item(),3):.4e}", L.items())) - for epoch in range(self.config.epochs): - if self.should_early_stop(): - break - - self.epoch = epoch - ################################# Train loop ########################################################## - if self.should_log: - wandb.log({"Epoch": epoch}, step=self.step) - pbar = tqdm(enumerate(self.train_loader), desc=f"Epoch: {epoch + 1}/{self.config.epochs}. Loop: Train", - total=self.iters_per_epoch) if is_rank_zero(self.config) else enumerate(self.train_loader) - for i, batch in pbar: - if self.should_early_stop(): - print("Early stopping") - break - # print(f"Batch {self.step+1} on rank {self.config.rank}") - losses = self.train_on_batch(batch, i) - # print(f"trained batch {self.step+1} on rank {self.config.rank}") - - self.raise_if_nan(losses) - if is_rank_zero(self.config) and self.config.print_losses: - pbar.set_description( - f"Epoch: {epoch + 1}/{self.config.epochs}. Loop: Train. Losses: {stringify_losses(losses)}") - self.scheduler.step() - - if self.should_log and self.step % 50 == 0: - wandb.log({f"Train/{name}": loss.item() - for name, loss in losses.items()}, step=self.step) - - self.step += 1 - - ######################################################################################################## - - if self.test_loader: - if (self.step % validate_every) == 0: - self.model.eval() - if self.should_write: - self.save_checkpoint( - f"{self.config.experiment_id}_latest.pt") - - ################################# Validation loop ################################################## - # validate on the entire validation set in every process but save only from rank 0, I know, inefficient, but avoids divergence of processes - metrics, test_losses = self.validate() - # print("Validated: {}".format(metrics)) - if self.should_log: - wandb.log( - {f"Test/{name}": tloss for name, tloss in test_losses.items()}, step=self.step) - - wandb.log({f"Metrics/{k}": v for k, - v in metrics.items()}, step=self.step) - - if (metrics[self.metric_criterion] < best_loss) and self.should_write: - self.save_checkpoint( - f"{self.config.experiment_id}_best.pt") - best_loss = metrics[self.metric_criterion] - - self.model.train() - - if self.config.distributed: - dist.barrier() - # print(f"Validated: {metrics} on device {self.config.rank}") - - # print(f"Finished step {self.step} on device {self.config.rank}") - ################################################################################################# - - # Save / validate at the end - self.step += 1 # log as final point - self.model.eval() - self.save_checkpoint(f"{self.config.experiment_id}_latest.pt") - if self.test_loader: - - ################################# Validation loop ################################################## - metrics, test_losses = self.validate() - # print("Validated: {}".format(metrics)) - if self.should_log: - wandb.log({f"Test/{name}": tloss for name, - tloss in test_losses.items()}, step=self.step) - wandb.log({f"Metrics/{k}": v for k, - v in metrics.items()}, step=self.step) - - if (metrics[self.metric_criterion] < best_loss) and self.should_write: - self.save_checkpoint( - f"{self.config.experiment_id}_best.pt") - best_loss = metrics[self.metric_criterion] - - self.model.train() - - def validate(self): - with torch.no_grad(): - losses_avg = RunningAverageDict() - metrics_avg = RunningAverageDict() - for i, batch in tqdm(enumerate(self.test_loader), desc=f"Epoch: {self.epoch + 1}/{self.config.epochs}. Loop: Validation", total=len(self.test_loader), disable=not is_rank_zero(self.config)): - metrics, losses = self.validate_on_batch(batch, val_step=i) - - if losses: - losses_avg.update(losses) - if metrics: - metrics_avg.update(metrics) - - return metrics_avg.get_value(), losses_avg.get_value() - - def save_checkpoint(self, filename): - if not self.should_write: - return - root = self.config.save_dir - if not os.path.isdir(root): - os.makedirs(root) - - fpath = os.path.join(root, filename) - m = self.model.module if self.config.multigpu else self.model - torch.save( - { - "model": m.state_dict(), - "optimizer": None, # TODO : Change to self.optimizer.state_dict() if resume support is needed, currently None to reduce file size - "epoch": self.epoch - }, fpath) - - def log_images(self, rgb: Dict[str, list] = {}, depth: Dict[str, list] = {}, scalar_field: Dict[str, list] = {}, prefix="", scalar_cmap="jet", min_depth=None, max_depth=None): - if not self.should_log: - return - - if min_depth is None: - try: - min_depth = self.config.min_depth - max_depth = self.config.max_depth - except AttributeError: - min_depth = None - max_depth = None - - depth = {k: colorize(v, vmin=min_depth, vmax=max_depth) - for k, v in depth.items()} - scalar_field = {k: colorize( - v, vmin=None, vmax=None, cmap=scalar_cmap) for k, v in scalar_field.items()} - images = {**rgb, **depth, **scalar_field} - wimages = { - prefix+"Predictions": [wandb.Image(v, caption=k) for k, v in images.items()]} - wandb.log(wimages, step=self.step) - - def log_line_plot(self, data): - if not self.should_log: - return - - plt.plot(data) - plt.ylabel("Scale factors") - wandb.log({"Scale factors": wandb.Image(plt)}, step=self.step) - plt.close() - - def log_bar_plot(self, title, labels, values): - if not self.should_log: - return - - data = [[label, val] for (label, val) in zip(labels, values)] - table = wandb.Table(data=data, columns=["label", "value"]) - wandb.log({title: wandb.plot.bar(table, "label", - "value", title=title)}, step=self.step) diff --git a/spaces/cozyanduofen/bingo/src/components/chat-attachments.tsx b/spaces/cozyanduofen/bingo/src/components/chat-attachments.tsx deleted file mode 100644 index ef43d4e262935d263b6099138c56f7daade5299d..0000000000000000000000000000000000000000 --- a/spaces/cozyanduofen/bingo/src/components/chat-attachments.tsx +++ /dev/null @@ -1,37 +0,0 @@ -import Image from 'next/image' -import ClearIcon from '@/assets/images/clear.svg' -import RefreshIcon from '@/assets/images/refresh.svg' -import { FileItem } from '@/lib/bots/bing/types' -import { cn } from '@/lib/utils' -import { useBing } from '@/lib/hooks/use-bing' - -type ChatAttachmentsProps = Pick, 'attachmentList' | 'setAttachmentList' | 'uploadImage'> - -export function ChatAttachments({ attachmentList = [], setAttachmentList, uploadImage }: ChatAttachmentsProps) { - return attachmentList.length ? ( -
          - {attachmentList.map(file => ( -
          - {file.status === 'loading' && ( -
          -
          -
          ) - } - {file.status !== 'error' && ( -
          - -
          ) - } - {file.status === 'error' && ( -
          - refresh uploadImage(file.url)} /> -
          - )} - -
          - ))} -
          - ) : null -} diff --git a/spaces/daddyjin/TalkingFaceGeneration/FONT/modules/model_gen.py b/spaces/daddyjin/TalkingFaceGeneration/FONT/modules/model_gen.py deleted file mode 100644 index eb946ba421b7071070907052b714f58517fb8263..0000000000000000000000000000000000000000 --- a/spaces/daddyjin/TalkingFaceGeneration/FONT/modules/model_gen.py +++ /dev/null @@ -1,516 +0,0 @@ -from torch import nn -import torch -import torch.nn.functional as F -from modules.util import AntiAliasInterpolation2d, make_coordinate_grid -from torchvision import models -import numpy as np -from torch.autograd import grad - - -class Vgg19(torch.nn.Module): - """ - Vgg19 network for perceptual loss. See Sec 3.3. - """ - def __init__(self, requires_grad=False): - super(Vgg19, self).__init__() - vgg_pretrained_features = models.vgg19(pretrained=True).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - for x in range(2): - self.slice1.add_module(str(x), vgg_pretrained_features[x]) - for x in range(2, 7): - self.slice2.add_module(str(x), vgg_pretrained_features[x]) - for x in range(7, 12): - self.slice3.add_module(str(x), vgg_pretrained_features[x]) - for x in range(12, 21): - self.slice4.add_module(str(x), vgg_pretrained_features[x]) - for x in range(21, 30): - self.slice5.add_module(str(x), vgg_pretrained_features[x]) - - self.mean = torch.nn.Parameter(data=torch.Tensor(np.array([0.485, 0.456, 0.406]).reshape((1, 3, 1, 1))), - requires_grad=False) - self.std = torch.nn.Parameter(data=torch.Tensor(np.array([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1))), - requires_grad=False) - - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - X = (X - self.mean) / self.std - h_relu1 = self.slice1(X) - h_relu2 = self.slice2(h_relu1) - h_relu3 = self.slice3(h_relu2) - h_relu4 = self.slice4(h_relu3) - h_relu5 = self.slice5(h_relu4) - out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5] - return out - - -class ImagePyramide(torch.nn.Module): - """ - Create image pyramide for computing pyramide perceptual loss. See Sec 3.3 - """ - def __init__(self, scales, num_channels): - super(ImagePyramide, self).__init__() - downs = {} - for scale in scales: - downs[str(scale).replace('.', '-')] = AntiAliasInterpolation2d(num_channels, scale) - self.downs = nn.ModuleDict(downs) - - def forward(self, x): - out_dict = {} - for scale, down_module in self.downs.items(): - out_dict['prediction_' + str(scale).replace('-', '.')] = down_module(x) - return out_dict - - -class Transform: - """ - Random tps transformation for equivariance constraints. See Sec 3.3 - """ - def __init__(self, bs, **kwargs): - noise = torch.normal(mean=0, std=kwargs['sigma_affine'] * torch.ones([bs, 2, 3])) - self.theta = noise + torch.eye(2, 3).view(1, 2, 3) - self.bs = bs - - if ('sigma_tps' in kwargs) and ('points_tps' in kwargs): - self.tps = True - self.control_points = make_coordinate_grid((kwargs['points_tps'], kwargs['points_tps']), type=noise.type()) - self.control_points = self.control_points.unsqueeze(0) - self.control_params = torch.normal(mean=0, - std=kwargs['sigma_tps'] * torch.ones([bs, 1, kwargs['points_tps'] ** 2])) - else: - self.tps = False - - def transform_frame(self, frame): - grid = make_coordinate_grid(frame.shape[2:], type=frame.type()).unsqueeze(0) #[1,256,256,2] - grid = grid.view(1, frame.shape[2] * frame.shape[3], 2) - grid = self.warp_coordinates(grid).view(self.bs, frame.shape[2], frame.shape[3], 2) - return F.grid_sample(frame, grid, padding_mode="reflection") - - def inverse_transform_frame(self, frame): - grid = make_coordinate_grid(frame.shape[2:], type=frame.type()).unsqueeze(0) #[1,256,256,2] - grid = grid.view(1, frame.shape[2] * frame.shape[3], 2) - grid = self.inverse_warp_coordinates(grid).view(self.bs, frame.shape[2], frame.shape[3], 2) - return F.grid_sample(frame, grid, padding_mode="reflection") - - def warp_coordinates(self, coordinates): - theta = self.theta.type(coordinates.type()) - theta = theta.unsqueeze(1) - transformed = torch.matmul(theta[:, :, :, :2], coordinates.unsqueeze(-1)) + theta[:, :, :, 2:] - transformed = transformed.squeeze(-1) - - if self.tps: - control_points = self.control_points.type(coordinates.type()) - control_params = self.control_params.type(coordinates.type()) - distances = coordinates.view(coordinates.shape[0], -1, 1, 2) - control_points.view(1, 1, -1, 2) - distances = torch.abs(distances).sum(-1) - - result = distances ** 2 - result = result * torch.log(distances + 1e-6) - result = result * control_params - result = result.sum(dim=2).view(self.bs, coordinates.shape[1], 1) - transformed = transformed + result - - return transformed - - def inverse_warp_coordinates(self, coordinates): - theta = self.theta.type(coordinates.type()) - theta = theta.unsqueeze(1) - a = torch.FloatTensor([[[[0,0,1]]]]).repeat([self.bs,1,1,1]).cuda() - c = torch.cat((theta,a),2) - d = c.inverse()[:,:,:2,:] - d = d.type(coordinates.type()) - transformed = torch.matmul(d[:, :, :, :2], coordinates.unsqueeze(-1)) + d[:, :, :, 2:] - transformed = transformed.squeeze(-1) - - if self.tps: - control_points = self.control_points.type(coordinates.type()) - control_params = self.control_params.type(coordinates.type()) - distances = coordinates.view(coordinates.shape[0], -1, 1, 2) - control_points.view(1, 1, -1, 2) - distances = torch.abs(distances).sum(-1) - - result = distances ** 2 - result = result * torch.log(distances + 1e-6) - result = result * control_params - result = result.sum(dim=2).view(self.bs, coordinates.shape[1], 1) - transformed = transformed + result - - - return transformed - - def jacobian(self, coordinates): - coordinates.requires_grad=True - new_coordinates = self.warp_coordinates(coordinates)#[4,10,2] - grad_x = grad(new_coordinates[..., 0].sum(), coordinates, create_graph=True) - grad_y = grad(new_coordinates[..., 1].sum(), coordinates, create_graph=True) - jacobian = torch.cat([grad_x[0].unsqueeze(-2), grad_y[0].unsqueeze(-2)], dim=-2) - return jacobian - - -def detach_kp(kp): - return {key: value.detach() for key, value in kp.items()} - -class TrainFullModel(torch.nn.Module): - """ - Merge all generator related updates into single model for better multi-gpu usage - """ - - def __init__(self, kp_extractor, emo_feature, kp_extractor_a, audio_feature, generator, discriminator, train_params, device_ids): - super(TrainFullModel, self).__init__() - self.kp_extractor = kp_extractor - self.kp_extractor_a = kp_extractor_a - # self.emo_detector = emo_detector - # self.content_encoder = content_encoder - # self.emotion_encoder = emotion_encoder - self.audio_feature = audio_feature - self.emo_feature = emo_feature - self.generator = generator - self.discriminator = discriminator - self.train_params = train_params - self.scales = train_params['scales'] - self.disc_scales = self.discriminator.scales - self.pyramid = ImagePyramide(self.scales, generator.num_channels) - if torch.cuda.is_available(): - self.pyramid = self.pyramid.cuda() - - self.loss_weights = train_params['loss_weights'] - - if sum(self.loss_weights['perceptual']) != 0: - self.vgg = Vgg19() - if torch.cuda.is_available(): - self.vgg = self.vgg.cuda() - - # self.pca = torch.FloatTensor(np.load('/mnt/lustre/jixinya/Home/LRW/list/U_106.npy'))[:, :16].to(device_ids[0]) - # self.mean = torch.FloatTensor(np.load('/mnt/lustre/jixinya/Home/LRW/list/mean_106.npy')).to(device_ids[0]) - self.mse_loss_fn = nn.MSELoss().cuda() - self.CroEn_loss = nn.CrossEntropyLoss().cuda() - def forward(self, x): - # source_a_f = self.audio_feature(x['source_audio'],x['source_lm'],x[]) - # source_a_f = self.audio_feature(self.content_encoder(x['source_audio'].unsqueeze(1)), self.emotion_encoder(x['source_audio'].unsqueeze(1))) - kp_source = self.kp_extractor(x['example_image']) - # print(x['name'],len(x['name'])) - kp_driving = [] - kp_emo = [] - for i in range(16): - kp_driving.append(self.kp_extractor(x['driving'][:,i])) - # kp_emo.append(self.emo_detector(x['driving'][:,i])) - # print('KP_driving ', file=open('/mnt/lustre/jixinya/Home/fomm_audio/log/LRW_test.txt', 'a')) - kp_driving_a = [] #x['example_image'], - deco_out = self.audio_feature(x['example_image'], x['driving_audio'], x['driving_pose'], self.train_params['jaco_net']) - # emo_out = self.emo_feature(x['example_image'], x['driving_audio'], x['driving_pose'], self.train_params['jaco_net']) - loss_values = {} - - if self.loss_weights['emo'] != 0: - - kp_driving_a = [] - fakes = [] - for i in range(16): - kp_driving_a.append(self.kp_extractor_a(deco_out[:,i]))# - value = self.kp_extractor_a(deco_out[:,i])['value'] - jacobian = self.kp_extractor_a(deco_out[:,i])['jacobian'] - if self.train_params['type'] == 'linear_4' and x['name'][0] == 0: - out, fake = self.emo_feature(x['transformed_driving'][:,i],value,jacobian) - kp_emo.append(out) - fakes.append(fake) - # kp_emo.append(self.emo_feature(x['transformed_driving'][:,i],value,jacobian)) - elif self.train_params['type'] == 'linear_10' and x['name'][0] == 0: - # kp_emo.append(self.emo_feature.linear_10(x['transformed_driving'][:,i],value,jacobian)) - - out, fake = self.emo_feature.linear_10(x['transformed_driving'][:,i],value,jacobian) - kp_emo.append(out) - fakes.append(fake) - elif self.train_params['type'] == 'linear_4_new' and x['name'][0] == 0: - # kp_emo.append(self.emo_feature.linear_10(x['transformed_driving'][:,i],value,jacobian)) - - out, fake = self.emo_feature.linear_4(x['transformed_driving'][:,i],value,jacobian) - kp_emo.append(out) - fakes.append(fake) - elif self.train_params['type'] == 'linear_np_4': - # kp_emo.append(self.emo_feature.linear_10(x['transformed_driving'][:,i],value,jacobian)) - - out, fake = self.emo_feature.linear_np_4(x['transformed_driving'][:,i],value,jacobian) - kp_emo.append(out) - fakes.append(fake) - elif self.train_params['type'] == 'linear_np_10': - # kp_emo.append(self.emo_feature.linear_10(x['transformed_driving'][:,i],value,jacobian)) - - out, fake = self.emo_feature.linear_np_10(x['transformed_driving'][:,i],value,jacobian) - kp_emo.append(out) - fakes.append(fake) - # kp_emo.append(self.emo_feature(x['transformed_driving'][:,i],value,jacobian)) - # print('Kp_audio_driving ', file=open('/mnt/lustre/jixinya/Home/fomm_audio/log/LRW_test.txt', 'a')) - - loss_perceptual = 0 - - kp_all = kp_driving_a - if self.train_params['smooth'] == True: - value_all = torch.randn(len(kp_driving),out['value'].shape[0],out['value'].shape[1],out['value'].shape[2]).cuda() - jacobian_all = torch.randn(len(kp_driving),out['jacobian'].shape[0],out['jacobian'].shape[1],2,2).cuda() - print(len(kp_driving)) - for i in range(len(kp_driving)): - # if x['name'][i] == 'LRW': - # loss_jacobian += (torch.abs(kp_driving[i]['jacobian'] - kp_driving_a[i]['jacobian']).mean())*self.loss_weights['emo'] - - # loss_value += (torch.abs(kp_driving[i]['value'].detach() - kp_driving_a[i]['value']).mean())*self.loss_weights['emo'] - # loss_classify += self.mse_loss_fn(deco_out,deco_out) - if self.train_params['type'] == 'linear_4' and x['name'][0] == 0: - - kp_all[i]['jacobian'][:,1] = kp_emo[i]['jacobian'][:,0] + kp_driving_a[i]['jacobian'][:,1] - kp_all[i]['jacobian'][:,4] = kp_emo[i]['jacobian'][:,1] + kp_driving_a[i]['jacobian'][:,4] - kp_all[i]['jacobian'][:,6] = kp_emo[i]['jacobian'][:,2] + kp_driving_a[i]['jacobian'][:,6] - kp_all[i]['jacobian'][:,8] = kp_emo[i]['jacobian'][:,3] + kp_driving_a[i]['jacobian'][:,8] - kp_all[i]['value'][:,1] = kp_emo[i]['value'][:,0] + kp_driving_a[i]['value'][:,1] - kp_all[i]['value'][:,4] = kp_emo[i]['value'][:,1] + kp_driving_a[i]['value'][:,4] - kp_all[i]['value'][:,6] = kp_emo[i]['value'][:,2] + kp_driving_a[i]['value'][:,6] - kp_all[i]['value'][:,8] = kp_emo[i]['value'][:,3] + kp_driving_a[i]['value'][:,8] - - # kp_all[i]['value'] = kp_emo[i]['value'] + kp_driving_a[i]['value'] - - - if self.train_params['smooth'] == True: - loss_smooth = 0 - loss_smooth += (torch.abs(value_all[2:,:,:,:] + value_all[:-2,:,:,:].detach() -2*value_all[1:-1,:,:,:].detach()).mean())*self.loss_weights['emo'] *100 - loss_smooth += (torch.abs(jacobian_all[2:,:,:,:] + jacobian_all[:-2,:,:,:].detach() -2*jacobian_all[1:-1,:,:,:].detach()).mean())*self.loss_weights['emo'] *100 - loss_values['loss_smooth'] = loss_smooth/len(kp_driving) - else: - loss_values['loss_smooth'] = self.mse_loss_fn(deco_out,deco_out) - if self.train_params['generator'] == 'not': - loss_values['perceptual'] = self.mse_loss_fn(deco_out,deco_out) - for i in range(1): #0,len(kp_driving),4 - - generated = self.generator(x['example_image'], kp_source=kp_source, kp_driving=kp_all[i]) - generated.update({'kp_source': kp_source, 'kp_driving': kp_all}) - elif self.train_params['generator'] == 'visual': - for i in range(0,len(kp_driving),4): #0,len(kp_driving),4 - - generated = self.generator(x['example_image'], kp_source=kp_source, kp_driving=kp_driving[i]) - generated.update({'kp_source': kp_source, 'kp_driving': kp_driving}) - - pyramide_real = self.pyramid(x['driving'][:,i]) - pyramide_generated = self.pyramid(generated['prediction']) - - if sum(self.loss_weights['perceptual']) != 0: - value_total = 0 - for scale in self.scales: - x_vgg = self.vgg(pyramide_generated['prediction_' + str(scale)]) - y_vgg = self.vgg(pyramide_real['prediction_' + str(scale)]) - - for i, weight in enumerate(self.loss_weights['perceptual']): - value = torch.abs(x_vgg[i] - y_vgg[i].detach()).mean() - value_total += self.loss_weights['perceptual'][i] * value - loss_perceptual += value_total - - length = int((len(kp_driving)-1)/4)+1 - loss_values['perceptual'] = loss_perceptual/length - elif self.train_params['generator'] == 'audio': - for i in range(0,len(kp_driving),4): #0,len(kp_driving),4 - - generated = self.generator(x['example_image'], kp_source=kp_source, kp_driving=kp_all[i]) - generated.update({'kp_source': kp_source, 'kp_driving': kp_all}) - - pyramide_real = self.pyramid(x['driving'][:,i]) - pyramide_generated = self.pyramid(generated['prediction']) - # loss_mse = nn.MSELoss(generated['prediction'],x['driving'][:,i]) - if sum(self.loss_weights['perceptual']) != 0: - value_total = 0 - for scale in self.scales: - x_vgg = self.vgg(pyramide_generated['prediction_' + str(scale)]) - y_vgg = self.vgg(pyramide_real['prediction_' + str(scale)]) - - for i, weight in enumerate(self.loss_weights['perceptual']): - value = torch.abs(x_vgg[i] - y_vgg[i].detach()).mean() - value_total += self.loss_weights['perceptual'][i] * value - loss_perceptual += value_total - - length = int((len(kp_driving)-1)/4)+1 - loss_values['perceptual'] = loss_perceptual/length - # loss_values['mse'] = loss_mse/length - - else: - print('wrong train_params: ', self.train_params['generator']) - - - - return loss_values,generated - -class GeneratorFullModel(torch.nn.Module): - """ - Merge all generator related updates into single model for better multi-gpu usage - """ - - def __init__(self, kp_extractor, kp_extractor_a, audio_feature, generator, discriminator, train_params): - super(GeneratorFullModel, self).__init__() - self.kp_extractor = kp_extractor - self.kp_extractor_a = kp_extractor_a - # self.content_encoder = content_encoder - # self.emotion_encoder = emotion_encoder - self.audio_feature = audio_feature - self.generator = generator - self.discriminator = discriminator - self.train_params = train_params - self.scales = train_params['scales'] - self.disc_scales = self.discriminator.scales - self.pyramid = ImagePyramide(self.scales, generator.num_channels) - if torch.cuda.is_available(): - self.pyramid = self.pyramid.cuda() - - self.loss_weights = train_params['loss_weights'] - - if sum(self.loss_weights['perceptual']) != 0: - self.vgg = Vgg19() - if torch.cuda.is_available(): - self.vgg = self.vgg.cuda() - - self.pca = torch.FloatTensor(np.load('.../LRW/list/U_106.npy'))[:, :16].cuda() - self.mean = torch.FloatTensor(np.load('.../LRW/list/mean_106.npy')).cuda() - - def forward(self, x): - # source_a_f = self.audio_feature(x['source_audio'],x['source_lm'],x[]) - # source_a_f = self.audio_feature(self.content_encoder(x['source_audio'].unsqueeze(1)), self.emotion_encoder(x['source_audio'].unsqueeze(1))) - # kp_source = self.kp_extractor(x['source']) - # kp_source_a = self.kp_extractor_a(x['source'], x['source_cube'], source_a_f) - # driving_a_f = self.audio_feature(self.content_encoder(x['driving_audio'].unsqueeze(1)), self.emotion_encoder(x['driving_audio'].unsqueeze(1))) - # driving_a_f = self.audio_feature(x['driving_audio']) - # kp_driving = self.kp_extractor(x['driving']) - # kp_driving_a = self.kp_extractor_a(x['driving'], x['driving_cube'], driving_a_f) - - kp_driving = [] - for i in range(16): - kp_driving.append(self.kp_extractor(x['driving'][:,i],x['driving_landmark'][:,i],self.loss_weights['equivariance_value'])) - - kp_driving_a = [] - fc_out, deco_out = self.audio_feature(x['example_landmark'], x['driving_audio'], x['driving_pose']) - fake_lmark=fc_out + x['example_landmark'].expand_as(fc_out) - - - fake_lmark = torch.mm( fake_lmark, self.pca.t() ) - fake_lmark = fake_lmark + self.mean.expand_as(fake_lmark) - - - fake_lmark = fake_lmark.unsqueeze(0) - - # for i in range(16): - # kp_driving_a.append() - - # generated = self.generator(x['source'], kp_source=kp_source, kp_driving=kp_driving) - # generated.update({'kp_source': kp_source, 'kp_driving': kp_driving}) - - loss_values = {} - - pyramide_real = self.pyramid(x['driving']) - pyramide_generated = self.pyramid(generated['prediction']) - - if self.loss_weights['audio'] != 0: - value = torch.abs(kp_source['jacobian'].detach() - kp_source_a['jacobian'].detach()).mean() + torch.abs(kp_driving['jacobian'].detach() - kp_driving_a['jacobian']).mean() - value = value/2 - loss_values['jacobian'] = value*self.loss_weights['audio'] - value = torch.abs(kp_source['heatmap'].detach() - kp_source_a['heatmap'].detach()).mean() + torch.abs(kp_driving['heatmap'].detach() - kp_driving_a['heatmap']).mean() - value = value/2 - loss_values['heatmap'] = value*self.loss_weights['audio'] - value = torch.abs(kp_source['value'].detach() - kp_source_a['value'].detach()).mean() + torch.abs(kp_driving['value'].detach() - kp_driving_a['value']).mean() - value = value/2 - loss_values['value'] = value*self.loss_weights['audio'] - - if sum(self.loss_weights['perceptual']) != 0: - value_total = 0 - for scale in self.scales: - x_vgg = self.vgg(pyramide_generated['prediction_' + str(scale)]) - y_vgg = self.vgg(pyramide_real['prediction_' + str(scale)]) - - for i, weight in enumerate(self.loss_weights['perceptual']): - value = torch.abs(x_vgg[i] - y_vgg[i].detach()).mean() - value_total += self.loss_weights['perceptual'][i] * value - loss_values['perceptual'] = value_total - - if self.loss_weights['generator_gan'] != 0: - discriminator_maps_generated = self.discriminator(pyramide_generated, kp=detach_kp(kp_driving)) - discriminator_maps_real = self.discriminator(pyramide_real, kp=detach_kp(kp_driving)) - value_total = 0 - for scale in self.disc_scales: - key = 'prediction_map_%s' % scale - value = ((1 - discriminator_maps_generated[key]) ** 2).mean() - value_total += self.loss_weights['generator_gan'] * value - loss_values['gen_gan'] = value_total - - if sum(self.loss_weights['feature_matching']) != 0: - value_total = 0 - for scale in self.disc_scales: - key = 'feature_maps_%s' % scale - for i, (a, b) in enumerate(zip(discriminator_maps_real[key], discriminator_maps_generated[key])): - if self.loss_weights['feature_matching'][i] == 0: - continue - value = torch.abs(a - b).mean() - value_total += self.loss_weights['feature_matching'][i] * value - loss_values['feature_matching'] = value_total - - if (self.loss_weights['equivariance_value'] + self.loss_weights['equivariance_jacobian']) != 0: - transform = Transform(x['driving'].shape[0], **self.train_params['transform_params']) - transformed_frame = transform.transform_frame(x['driving']) - transformed_landmark = transform.inverse_warp_coordinates(x['driving_landmark']) - transformed_kp = self.kp_extractor(transformed_frame) - - generated['transformed_frame'] = transformed_frame - generated['transformed_kp'] = transformed_kp - - ## Value loss part - if self.loss_weights['equivariance_value'] != 0: - value = torch.abs(kp_driving['value'] - transform.warp_coordinates(transformed_kp['value'])).mean() - loss_values['equivariance_value'] = self.loss_weights['equivariance_value'] * value - - ## jacobian loss part - if self.loss_weights['equivariance_jacobian'] != 0: - jacobian_transformed = torch.matmul(transform.jacobian(transformed_kp['value']), - transformed_kp['jacobian']) - - normed_driving = torch.inverse(kp_driving['jacobian']) - normed_transformed = jacobian_transformed - value = torch.matmul(normed_driving, normed_transformed) - - eye = torch.eye(2).view(1, 1, 2, 2).type(value.type()) - - value = torch.abs(eye - value).mean() - loss_values['equivariance_jacobian'] = self.loss_weights['equivariance_jacobian'] * value - - return loss_values, generated - - -class DiscriminatorFullModel(torch.nn.Module): - """ - Merge all discriminator related updates into single model for better multi-gpu usage - """ - - def __init__(self, kp_extractor, generator, discriminator, train_params): - super(DiscriminatorFullModel, self).__init__() - self.kp_extractor = kp_extractor - self.generator = generator - self.discriminator = discriminator - self.train_params = train_params - self.scales = self.discriminator.scales - self.pyramid = ImagePyramide(self.scales, generator.num_channels) - if torch.cuda.is_available(): - self.pyramid = self.pyramid.cuda() - - self.loss_weights = train_params['loss_weights'] - - def forward(self, x, generated): - pyramide_real = self.pyramid(x['driving']) - pyramide_generated = self.pyramid(generated['prediction'].detach()) - - kp_driving = generated['kp_driving'] - discriminator_maps_generated = self.discriminator(pyramide_generated, kp=detach_kp(kp_driving)) - discriminator_maps_real = self.discriminator(pyramide_real, kp=detach_kp(kp_driving)) - - loss_values = {} - value_total = 0 - for scale in self.scales: - key = 'prediction_map_%s' % scale - value = (1 - discriminator_maps_real[key]) ** 2 + discriminator_maps_generated[key] ** 2 - value_total += self.loss_weights['discriminator_gan'] * value.mean() - loss_values['disc_gan'] = value_total - - return loss_values diff --git a/spaces/danupurnomo/fifa-2022-rating-prediction/app.py b/spaces/danupurnomo/fifa-2022-rating-prediction/app.py deleted file mode 100644 index a0c370ffe1008a1a3448672f344e12dfa6a98c54..0000000000000000000000000000000000000000 --- a/spaces/danupurnomo/fifa-2022-rating-prediction/app.py +++ /dev/null @@ -1,270 +0,0 @@ -import os -import time -import base64 -import pickle -import numpy as np -import pandas as pd -import streamlit as st -import plotly.express as px -from PIL import Image -from collections import deque -from urllib.request import urlopen - - -# STEP 1 - DEFINE PATHS -base_path = os.path.abspath(os.path.dirname(__file__)) -model_path = os.path.join(base_path, 'model') -img_path = os.path.join(base_path, 'img') - -# STEP 2 - LOAD MODEL -model_filename = 'model_rating.pkl' -scaler_filename = 'model_feat_scaling.pkl' -encoder_filename = 'model_feat_enc.pkl' - -model_filepath = os.path.join(model_path, model_filename) -scaler_filepath = os.path.join(model_path, scaler_filename) -encoder_filepath = os.path.join(model_path, encoder_filename) - -with open(model_filepath, "rb") as filename: - model_rating = pickle.load(filename) - -with open(scaler_filepath, "rb") as filename: - scaler = pickle.load(filename) - -with open(encoder_filepath, "rb") as filename: - encoder = pickle.load(filename) - -# STEP 3 - SET PAGE CONFIG -st.set_page_config( - page_title = 'FIFA 2022 Player Rating\'s Prediction', - layout = 'wide', - initial_sidebar_state = 'auto', - menu_items = { - 'About': ''' - ## FIFA 2022 Player Rating\'s Prediction - --- - _Made by Danu Purnomo_ - - Predict rating of a football player based on FIFA 2022 players. - ''' - } -) - -# STEP 4 - CREATE BACKGROUND -def convert_img_to_base64(img_path): - with open(img_path, 'rb') as image_file: - encoded_string = base64.b64encode(image_file.read()) - return encoded_string - -img_background_path = os.path.join(img_path, '01 - background.jpg') -# img_background_path = os.path.join(img_path, 'test-02.jpg') -encoded_string = convert_img_to_base64(img_background_path) -st.markdown( - f""" - - """, - unsafe_allow_html=True -) - -# STEP 5 - SET TITLE AND OPENER -## STEP 5.1 - SET TITLE -text_title = '

          FIFA 2022 Player\'s Rating Predictions

          ' -st.markdown(text_title, unsafe_allow_html=True) - -## STEP 5.2 - SET OPENER -gif0 = '
          ' -st.markdown(gif0, unsafe_allow_html=True) - -# STEP 6 - SET PARAMETERS -st.markdown('---') -text_style = '

          Set Parameters

          ' -st.markdown(text_style, unsafe_allow_html=True) - -# Attribute of a football player -# 0 Name 19260 non-null object -# 1 Age 19260 non-null int64 -# 2 Height 19260 non-null int64 -# 3 Weight 19260 non-null int64 -# 4 Price 19260 non-null int64 -# 5 AttackingWorkRate 19260 non-null object -# 6 DefensiveWorkRate 19260 non-null object -# 7 PaceTotal 19260 non-null int64 -# 8 ShootingTotal 19260 non-null int64 -# 9 PassingTotal 19260 non-null int64 -# 10 DribblingTotal 19260 non-null int64 -# 11 DefendingTotal 19260 non-null int64 -# 12 PhysicalityTotal 19260 non-null int64 -# 13 Rating 19260 non-null int64 - -with st.form(key='form_parameters'): - - ## STEP 6.1 : Section 1 - header_section_1 = '

          Personal Profile

          ' - st.markdown(header_section_1, unsafe_allow_html=True) - - col1, col2, col3 = st.columns([1, 1, 1]) - st.markdown(f'

          ', unsafe_allow_html=True) - with col1: - img_personal_profile_path = os.path.join(img_path, '02 - personal profile.png') - image = Image.open(img_personal_profile_path) - st.image(image, width=350) - - with col2: - col_name = st.text_input('Name', value='', help='Player\'s name') - col_age = st.number_input('Age', min_value=14, max_value=60, value=22, step=1, help='Player\'s age. Default age is 22.') - col_price = st.number_input('Price (EUR)', min_value=0, value=1000000, step=1, format='%d', help='Player\'s price. Default price is EUR 1,000,000.') - - with col3: - col_height = st.number_input('Height (cm)', min_value=140, max_value=220, value=180, step=1, help='Player\'s height. Default height is 180 cm.') - col_weight = st.number_input('Weight (kg)', min_value=40, max_value=120, value=70, step=1, help='Player\'s weight. Default weight is 70 kg.') - - ## STEP 6.2 : Section 2 - header_section_2 = '

          Work Rate

          ' - st.markdown('---') - st.markdown(header_section_2, unsafe_allow_html=True) - - col1, col2, col3 = st.columns([1, 1, 1]) - with col1: - img_work_rate_path = os.path.join(img_path, '03 - work rate.png') - image = Image.open(img_work_rate_path) - st.image(image, width=250) - - with col2: - col_attacking_work_rate = st.selectbox('Attacking Work Rate', ['-', 'Low', 'Medium', 'High'], index=0, help='Player\'s desire to attack.') - col_defensive_work_rate = st.selectbox('Defensive Work Rate', ['-', 'Low', 'Medium', 'High'], index=0, help='Player\'s desire to defend.') - - ## STEP 6.3 : Section 3 - header_section_3 = '

          Ability

          ' - st.markdown('---') - st.markdown(header_section_3, unsafe_allow_html=True) - - col1, col2, col3 = st.columns([1, 1, 1]) - with col1: - img_work_rate_path = os.path.join(img_path, '04 - ability.png') - image = Image.open(img_work_rate_path) - st.image(image, width=350) - - with col2: - col_pace_total = st.number_input('Pace Total', min_value=0, max_value=100, value=50, step=1, help='How fast is a player.') - col_shooting_total = st.number_input('Shooting Total', min_value=0, max_value=100, value=50, step=1, help='How good at kicking.') - col_passing_total = st.number_input('Passing Total', min_value=0, max_value=100, value=50, step=1, help='How good at passing.') - - with col3: - col_dribbling_total = st.number_input('Dribbling Total', min_value=0, max_value=100, value=50, step=1, help='How good at dribbling.') - col_defending_total = st.number_input('Defending Total', min_value=0, max_value=100, value=50, step=1, help='How good at defending.') - col_physicality_total = st.number_input('Physicality Total', min_value=0, max_value=100, value=50, step=1, help='How good is a player\'s physique.') - - ## STEP 6.4 : Section 4 - st.markdown('

          ', unsafe_allow_html=True) - col1, col2, col3 = st.columns([3, 1, 3]) - with col2: - submitted = st.form_submit_button('Predict') - -# STEP 7 - PREDICT NEW DATA -## STEP 7.1 - Create DataFrame for New Data -## `new_data` is for inference meanwhile `new_data_for_radar_plot` is for plot line_polar. - -new_data = { - 'Name': [col_name], - 'Age': [col_age], - 'Height': [col_height], - 'Weight': [col_weight], - 'Price': [col_price], - 'AttackingWorkRate': [col_attacking_work_rate], - 'DefensiveWorkRate': [col_defensive_work_rate], - 'PaceTotal': [col_pace_total], - 'ShootingTotal': [col_shooting_total], - 'PassingTotal': [col_passing_total], - 'DribblingTotal': [col_dribbling_total], - 'DefendingTotal': [col_defending_total], - 'PhysicalityTotal': [col_physicality_total] -} - -new_data_for_radar_plot = { - 'PaceTotal': [col_pace_total], - 'ShootingTotal': [col_shooting_total], - 'PassingTotal': [col_passing_total], - 'DribblingTotal': [col_dribbling_total], - 'DefendingTotal': [col_defending_total], - 'PhysicalityTotal': [col_physicality_total] -} - -new_data = pd.DataFrame.from_dict(new_data) -new_data_for_radar_plot = pd.DataFrame.from_dict(new_data_for_radar_plot) -# st.write(new_data) -print('New Data : ', new_data) - -result_section = st.empty() -if submitted : - ## STEP 7.2 - Split Numerical Columns and Categorical Columns - num_columns = ['Age', 'Height', 'Weight', 'Price', 'PaceTotal', 'ShootingTotal', 'PassingTotal', 'DribblingTotal', 'DefendingTotal', 'PhysicalityTotal'] - cat_columns = ['AttackingWorkRate', 'DefensiveWorkRate'] - - new_data_num = new_data[num_columns] - new_data_cat = new_data[cat_columns] - - ## STEP 7.3 - Feature Scaling and Feature Encoding - new_data_num_scaled = scaler.transform(new_data_num) - new_data_cat_encoded = encoder.transform(new_data_cat) - - ## STEP 7.4 - Concatenate between Numerical Columns and Categorical Columns - new_data_final = np.concatenate([new_data_num_scaled, new_data_cat_encoded], axis=1) - - ## STEP 7.5 - Predict using Linear Regression - y_pred_inf = model_rating.predict(new_data_final) - print(type(y_pred_inf)) - - ## STEP 7.6 - Display Prediction - result_section.empty() - bar = st.progress(0) - for i in range(100): - bar.progress(i + 1) - time.sleep(0.01) - bar.empty() - - with result_section.container(): - st.markdown('

          ', unsafe_allow_html=True) - - col1, col2, col3, col4, col5 = st.columns([0.5, 2, 1, 2, 1]) - with col2: - img_personal_profile_path = os.path.join(img_path, '05 - person rear.png') - image = Image.open(img_personal_profile_path) - st.image(image, width=350) - - with col3: - st.markdown('



          ', unsafe_allow_html=True) - player_name = '

          ' + col_name + '

          ' - st.markdown(player_name, unsafe_allow_html=True) - - player_rating_pred = '

          ' + str(int(y_pred_inf)) + '

          ' - st.markdown(player_rating_pred, unsafe_allow_html=True) - - with col4: - st.markdown('

          ', unsafe_allow_html=True) - skill_total_fig = px.line_polar( - r = new_data_for_radar_plot.loc[0].values, - theta = new_data_for_radar_plot.columns, - line_close = True, - range_r = [0, 100], - color_discrete_sequence = ['#FFF9AC'], - # hover_name=['PaceTotal', '1', '2', '4', '5', '6'], - template='plotly_dark') - skill_total_fig.update_traces(fill='toself') - skill_total_fig.update_layout({ - 'plot_bgcolor': 'rgba(255, 0, 0, 0)', - 'paper_bgcolor': 'rgba(0, 0, 0, 0)', - 'font_size': 19 - }) - st.write(skill_total_fig) - -st.write('''Source images : -[link](https://www.vecteezy.com/vector-art/5129950-football-player-figure-line-art-human-action-on-motion-lines-controlling-the-ball-with-chest), -[link](https://www.vecteezy.com/vector-art/5939693-football-player-figure-line-art-human-action-on-motion-lines-kicking-ball), -[link](https://www.vecteezy.com/vector-art/5129956-football-player-figure-line-art-human-action-on-motion-lines-kicking-ball), -[link](https://www.dreamstime.com/young-african-soccer-player-man-studio-isolated-white-background-silhouette-shadow-young-african-soccer-player-man-image199265151) -''') \ No newline at end of file diff --git a/spaces/datasciencedojo/AudioTranscription/app.py b/spaces/datasciencedojo/AudioTranscription/app.py deleted file mode 100644 index 80f181b463e9d51515b36ae001b61ec8ab444199..0000000000000000000000000000000000000000 --- a/spaces/datasciencedojo/AudioTranscription/app.py +++ /dev/null @@ -1,93 +0,0 @@ -import torch - -import gradio as gr -import pytube as pt -from transformers import pipeline -from huggingface_hub import model_info - -MODEL_NAME = "openai/whisper-small" #this always needs to stay in line 8 :D sorry for the hackiness -lang = "en" - -device = 0 if torch.cuda.is_available() else "cpu" -pipe = pipeline( - task="automatic-speech-recognition", - model=MODEL_NAME, - chunk_length_s=30, - device=device, -) - -pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe") - -def transcribe(microphone, file_upload): - warn_output = "" - if (microphone is not None) and (file_upload is not None): - warn_output = ( - "WARNING: You've uploaded an audio file and used the microphone. " - "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" - ) - - elif (microphone is None) and (file_upload is None): - return "ERROR: You have to either use the microphone or upload an audio file" - - file = microphone if microphone is not None else file_upload - - text = pipe(file)["text"] - - return warn_output + text - -demo = gr.Blocks() - -css = """ -footer {display:none !important} -.output-markdown{display:none !important} -button.primary { - z-index: 14; - left: 0px; - top: 0px; - cursor: pointer !important; - background: none rgb(17, 20, 45) !important; - border: none !important; - color: rgb(255, 255, 255) !important; - line-height: 1 !important; - border-radius: 6px !important; - transition: box-shadow 200ms ease 0s, background 200ms ease 0s !important; - box-shadow: none !important; -} -button.primary:hover{ - z-index: 14; - left: 0px; - top: 0px; - cursor: pointer !important; - background: none rgb(37, 56, 133) !important; - border: none !important; - color: rgb(255, 255, 255) !important; - line-height: 1 !important; - border-radius: 6px !important; - transition: box-shadow 200ms ease 0s, background 200ms ease 0s !important; - box-shadow: rgb(0 0 0 / 23%) 0px 1px 7px 0px !important; -} -button.gallery-item:hover { - border-color: rgb(37 56 133) !important; - background-color: rgb(229,225,255) !important; -} -""" - -examples = [ - ['Martin Luther king - FREE AT LAST.mp3'], ['Winston Churchul - ARCH OF VICTOR.mp3'], ['Voice of Neil Armstrong.mp3'], ['Speeh by George Washington.mp3'], ['Speech by John Kennedy.mp3'], ['Al Gore on Inventing the Internet.mp3'], ['Alan Greenspan.mp3'], ['Neil Armstrong - ONE SMALL STEP.mp3'], ['General Eisenhower announcing D-Day landing.mp3'], ['Hey Siri.wav'] -] - -mf_transcribe = gr.Interface( - fn=transcribe, - inputs=[ - gr.inputs.Audio(source="microphone", type="filepath", optional=True), - gr.inputs.Audio(source="upload", type="filepath", optional=True) - ], - outputs="text", - layout="horizontal", - theme="huggingface", - allow_flagging="never", - examples = examples, - css = css -).launch(enable_queue=True) - -#used openai/whisper model \ No newline at end of file diff --git a/spaces/dawood17/SayBot_Enchancer/README.md b/spaces/dawood17/SayBot_Enchancer/README.md deleted file mode 100644 index 6fafbe6f03ca8588a58a159d4ab39fe2256c9d88..0000000000000000000000000000000000000000 --- a/spaces/dawood17/SayBot_Enchancer/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: CodeFormer -emoji: 🐼 -colorFrom: blue -colorTo: green -sdk: gradio -sdk_version: 3.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: sczhou/CodeFormer ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/filelock/_error.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/filelock/_error.py deleted file mode 100644 index f7ff08c0f508ad7077eb6ed1990898840c952b3a..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/filelock/_error.py +++ /dev/null @@ -1,30 +0,0 @@ -from __future__ import annotations - -from typing import Any - - -class Timeout(TimeoutError): # noqa: N818 - """Raised when the lock could not be acquired in *timeout* seconds.""" - - def __init__(self, lock_file: str) -> None: - super().__init__() - self._lock_file = lock_file - - def __reduce__(self) -> str | tuple[Any, ...]: - return self.__class__, (self._lock_file,) # Properly pickle the exception - - def __str__(self) -> str: - return f"The file lock '{self._lock_file}' could not be acquired." - - def __repr__(self) -> str: - return f"{self.__class__.__name__}({self.lock_file!r})" - - @property - def lock_file(self) -> str: - """:return: The path of the file lock.""" - return self._lock_file - - -__all__ = [ - "Timeout", -] diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/deprecation.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/deprecation.py deleted file mode 100644 index d14f88ffcda40a78a8072c48c378f95b874ff8fa..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/deprecation.py +++ /dev/null @@ -1,80 +0,0 @@ -from __future__ import annotations - -import warnings - -from gradio import utils - - -class GradioDeprecationWarning(UserWarning): - # This does not subclass DeprecationWarning - # because we want to show the warning by default. - pass - - -class GradioUnusedKwargWarning(UserWarning): - pass - - -def simple_deprecated_notice(term: str) -> str: - return f"`{term}` parameter is deprecated, and it has no effect" - - -def use_in_launch(term: str) -> str: - return f"`{term}` is deprecated in `Interface()`, please use it within `launch()` instead." - - -DEPRECATION_MESSAGE = { - "optional": simple_deprecated_notice("optional"), - "keep_filename": simple_deprecated_notice("keep_filename"), - "numeric": simple_deprecated_notice("numeric"), - "verbose": simple_deprecated_notice("verbose"), - "allow_screenshot": simple_deprecated_notice("allow_screenshot"), - "layout": simple_deprecated_notice("layout"), - "show_input": simple_deprecated_notice("show_input"), - "show_output": simple_deprecated_notice("show_output"), - "capture_session": simple_deprecated_notice("capture_session"), - "api_mode": simple_deprecated_notice("api_mode"), - "show_tips": use_in_launch("show_tips"), - "encrypt": simple_deprecated_notice("encrypt"), - "enable_queue": use_in_launch("enable_queue"), - "server_name": use_in_launch("server_name"), - "server_port": use_in_launch("server_port"), - "width": use_in_launch("width"), - "height": use_in_launch("height"), - "plot": "The 'plot' parameter has been deprecated. Use the new Plot component instead", -} - - -def check_deprecated_parameters( - cls: str, *, stacklevel: int | None = None, kwargs -) -> None: - if stacklevel is None: - stacklevel = utils.find_user_stack_level() - - for key, value in DEPRECATION_MESSAGE.items(): - if key in kwargs: - if key == "plot" and cls != "Image": - continue - kwargs.pop(key) - warnings.warn(value, GradioDeprecationWarning, stacklevel=stacklevel) - - if kwargs: - warnings.warn( - f"You have unused kwarg parameters in {cls}, please remove them: {kwargs}", - GradioUnusedKwargWarning, - stacklevel=stacklevel, - ) - - -def warn_deprecation(text: str) -> None: - warnings.warn( - text, - GradioDeprecationWarning, - stacklevel=utils.find_user_stack_level(), - ) - - -def warn_style_method_deprecation() -> None: - warn_deprecation( - "The `style` method is deprecated. Please set these arguments in the constructor instead." - ) diff --git a/spaces/declare-lab/tango/diffusers/tests/pipelines/audio_diffusion/__init__.py b/spaces/declare-lab/tango/diffusers/tests/pipelines/audio_diffusion/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/diacanFperku/AutoGPT/Black Bird Cleaner 1.0.3.6 Activator - Crackingpatching .rar.md b/spaces/diacanFperku/AutoGPT/Black Bird Cleaner 1.0.3.6 Activator - Crackingpatching .rar.md deleted file mode 100644 index 1d3542b3e8e9d7385aedd905f8759ab068b1d09b..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Black Bird Cleaner 1.0.3.6 Activator - Crackingpatching .rar.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Black Bird Cleaner 1.0.3.6 Activator - Crackingpatching .rar


          DOWNLOAD 🆓 https://gohhs.com/2uFU3X



          - -Bubble Tanks Tower Defense 1.5 [By - Enzup].rar DRM Free. Bubble Tanks ... Black Bird Cleaner 1.0.3.6 + Activator - Crackingpatching. Black Bird Cleaner ... 4d29de3e1b
          -
          -
          -

          diff --git a/spaces/diacanFperku/AutoGPT/HD Online Player (Slumdog Millionaire Full Movie In English Download Free) _BEST_.md b/spaces/diacanFperku/AutoGPT/HD Online Player (Slumdog Millionaire Full Movie In English Download Free) _BEST_.md deleted file mode 100644 index ce00227f06adca52e67abb93bb94aa5c2ccba290..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/HD Online Player (Slumdog Millionaire Full Movie In English Download Free) _BEST_.md +++ /dev/null @@ -1,6 +0,0 @@ -

          HD Online Player (Slumdog Millionaire full movie in english download free)


          Download ->->->-> https://gohhs.com/2uFVMU



          - -Movie times, buy movie tickets online, watch trailers and get directions to ... 16 Player first. Full selection, including hard to find items, merchandise, accessories and ... Anime flix, animeultima, 4anime to watch English dubbed anime for free. ... Tiger: A rags-to-riches drama setting itself up as the new Slumdog Millionaire. 1fdad05405
          -
          -
          -

          diff --git a/spaces/diacanFperku/AutoGPT/Nagios Xi License Key [NEW].md b/spaces/diacanFperku/AutoGPT/Nagios Xi License Key [NEW].md deleted file mode 100644 index ccea71cdd25e21269796ace70e7b608b08ddf203..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Nagios Xi License Key [NEW].md +++ /dev/null @@ -1,7 +0,0 @@ - -

          nagios xi monitors the services in your environment using a combination of plugins, and the apis for each service. nagios xi monitors services using a defined set of plugins, some of which are provided by nagios xi, such as the host-based service plugins, and others which are written by the application and service developers and then uploaded to nagios xi and can be monitored as nagios xi services. each service can have a different set of plugins, but in all cases the service includes the host-based service, the service-specific plugins, and the host-based plugin which is used to monitor the service's host for the service.

          -

          the vulnerability identified, as mentioned above, allows a remote user to create the getprofile.sh script, which runs when the nagios user runs the script, it removes the permissions of the nagios user from the /etc/sudoers.d/nagios file. this means that a malicious user can make the system list profile and change the system state, which could lead to the loss of personal and/or financial data. this also means that the nagios user cannot access the list profile or any other configuration files that might exist on the system. if this system is connected to a printer, a new user id can be created and the getprofile.sh script can be run to create a new backup file and the file can then be uploaded to a remote location.

          -

          nagios xi license key


          Downloadhttps://gohhs.com/2uFTXV



          -

          nagios xi has its own built-in snmp client, and the provided snmp workstation can be used to monitor the system's operating state using the snmp agent. if the system is connected to a printer, snmp can also be used to monitor the status of that device.

          899543212b
          -
          -
          \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Paragon APFS For Windows 2.1.12 Crack 37 MB.md b/spaces/diacanFperku/AutoGPT/Paragon APFS For Windows 2.1.12 Crack 37 MB.md deleted file mode 100644 index 1d75c756abe4fd6f0228ea7a229d0809f4bb697c..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Paragon APFS For Windows 2.1.12 Crack 37 MB.md +++ /dev/null @@ -1,22 +0,0 @@ -
          -

          How to Access APFS Volumes on Windows PCs with Paragon APFS for Windows 2.1.12

          -

          If you have an APFS-formatted drive that you want to use on a Windows PC, you might be wondering how to read and write files on it. APFS is a new file system introduced by Apple in 2016 for macOS, iOS, and other devices. It offers better performance, security, and reliability than the previous HFS+ file system, but it is not compatible with Windows by default.

          -

          Fortunately, there is a solution that can help you access APFS volumes on Windows PCs without any hassle. Paragon APFS for Windows is a software that enables you to mount, browse, and copy files from APFS-formatted drives on Windows 10, Windows Server 2019, Windows 11, and Windows Server 2022. It supports all types of APFS volumes, including encrypted ones and those with snapshots.

          -

          Paragon APFS for Windows 2.1.12 Crack | 37 MB


          DOWNLOAD 🔗 https://gohhs.com/2uFVmK



          -

          In this article, we will show you how to use Paragon APFS for Windows 2.1.12 to access your APFS drive on a Windows PC. This version of the software has a file size of 37 MB and supports APFS volumes created in macOS 12 Monterey.

          -

          Step 1: Download and Install Paragon APFS for Windows

          -

          To get started, you need to download and install Paragon APFS for Windows on your PC. You can get a free 10-day trial or buy a license for $49.95 from the official website[^4^]. The installation process is simple and straightforward. Just follow the instructions on the screen and accept the license agreement.

          -

          Step 2: Connect Your APFS Drive to Your PC

          -

          Next, you need to connect your APFS drive to your PC using a USB cable or an adapter. Make sure your drive is powered on and recognized by your PC. You can check the status of your drive in the Device Manager or Disk Management.

          -

          Step 3: Browse Your APFS Drive in Windows Explorer

          -

          Once your APFS drive is connected, Paragon APFS for Windows will automatically mount it as a read-write volume in Windows Explorer. You can find it under This PC or My Computer with a drive letter assigned by Windows. You can also see the volume name, file system type, and capacity of your drive.

          -

          Now you can browse your APFS drive as if it were a native Windows drive. You can open, copy, edit, and delete files and folders on your drive using any program or app on your PC. You can also create new files and folders on your drive as long as there is enough free space available.

          -

          Step 4: Unmount Your APFS Drive When You Are Done

          -

          When you are done using your APFS drive on your PC, you should unmount it properly before disconnecting it. To do this, right-click on the drive icon in Windows Explorer and select Eject. Alternatively, you can use the Safely Remove Hardware and Eject Media icon in the system tray.

          -

          Wait until you see a message that says it is safe to remove your drive. Then you can disconnect your drive from your PC and use it on another device.

          -

          Conclusion

          -

          Paragon APFS for Windows is a handy tool that lets you access APFS volumes on Windows PCs without any hassle. It supports all types of APFS volumes, including encrypted ones and those with snapshots. It also supports APFS volumes created in macOS 12 Monterey.

          -

          -

          If you have an APFS-formatted drive that you want to use on a Windows PC, you can download Paragon APFS for Windows 2.1.12 from the official website[^4^] and enjoy a free 10-day trial or buy a license for $49.95.

          d5da3c52bf
          -
          -
          \ No newline at end of file diff --git a/spaces/digitalxingtong/Nailv-read-Bert-Vits2/text/english_bert_mock.py b/spaces/digitalxingtong/Nailv-read-Bert-Vits2/text/english_bert_mock.py deleted file mode 100644 index 3b894ced5b6d619a18d6bdd7d7606ba9e6532050..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Nailv-read-Bert-Vits2/text/english_bert_mock.py +++ /dev/null @@ -1,5 +0,0 @@ -import torch - - -def get_bert_feature(norm_text, word2ph): - return torch.zeros(1024, sum(word2ph)) diff --git a/spaces/digitalxingtong/Xingtong-2dall-Bert-VITS2/text/chinese.py b/spaces/digitalxingtong/Xingtong-2dall-Bert-VITS2/text/chinese.py deleted file mode 100644 index 276753880b73de2e8889dcb2101cd98c09e0710b..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-2dall-Bert-VITS2/text/chinese.py +++ /dev/null @@ -1,193 +0,0 @@ -import os -import re - -import cn2an -from pypinyin import lazy_pinyin, Style - -from text import symbols -from text.symbols import punctuation -from text.tone_sandhi import ToneSandhi - -current_file_path = os.path.dirname(__file__) -pinyin_to_symbol_map = {line.split("\t")[0]: line.strip().split("\t")[1] for line in - open(os.path.join(current_file_path, 'opencpop-strict.txt')).readlines()} - -import jieba.posseg as psg - - -rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - '$': '.', - '“': "'", - '”': "'", - '‘': "'", - '’': "'", - '(': "'", - ')': "'", - '(': "'", - ')': "'", - '《': "'", - '》': "'", - '【': "'", - '】': "'", - '[': "'", - ']': "'", - '—': "-", - '~': "-", - '~': "-", - '「': "'", - '」': "'", - -} - -tone_modifier = ToneSandhi() - -def replace_punctuation(text): - text = text.replace("嗯", "恩").replace("呣","母") - pattern = re.compile('|'.join(re.escape(p) for p in rep_map.keys())) - - replaced_text = pattern.sub(lambda x: rep_map[x.group()], text) - - replaced_text = re.sub(r'[^\u4e00-\u9fa5'+"".join(punctuation)+r']+', '', replaced_text) - - return replaced_text - -def g2p(text): - pattern = r'(?<=[{0}])\s*'.format(''.join(punctuation)) - sentences = [i for i in re.split(pattern, text) if i.strip()!=''] - phones, tones, word2ph = _g2p(sentences) - assert sum(word2ph) == len(phones) - assert len(word2ph) == len(text) #Sometimes it will crash,you can add a try-catch. - phones = ['_'] + phones + ["_"] - tones = [0] + tones + [0] - word2ph = [1] + word2ph + [1] - return phones, tones, word2ph - - -def _get_initials_finals(word): - initials = [] - finals = [] - orig_initials = lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.INITIALS) - orig_finals = lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for c, v in zip(orig_initials, orig_finals): - initials.append(c) - finals.append(v) - return initials, finals - - -def _g2p(segments): - phones_list = [] - tones_list = [] - word2ph = [] - for seg in segments: - pinyins = [] - # Replace all English words in the sentence - seg = re.sub('[a-zA-Z]+', '', seg) - seg_cut = psg.lcut(seg) - initials = [] - finals = [] - seg_cut = tone_modifier.pre_merge_for_modify(seg_cut) - for word, pos in seg_cut: - if pos == 'eng': - continue - sub_initials, sub_finals = _get_initials_finals(word) - sub_finals = tone_modifier.modified_tone(word, pos, - sub_finals) - initials.append(sub_initials) - finals.append(sub_finals) - - # assert len(sub_initials) == len(sub_finals) == len(word) - initials = sum(initials, []) - finals = sum(finals, []) - # - for c, v in zip(initials, finals): - raw_pinyin = c+v - # NOTE: post process for pypinyin outputs - # we discriminate i, ii and iii - if c == v: - assert c in punctuation - phone = [c] - tone = '0' - word2ph.append(1) - else: - v_without_tone = v[:-1] - tone = v[-1] - - pinyin = c+v_without_tone - assert tone in '12345' - - if c: - # 多音节 - v_rep_map = { - "uei": 'ui', - 'iou': 'iu', - 'uen': 'un', - } - if v_without_tone in v_rep_map.keys(): - pinyin = c+v_rep_map[v_without_tone] - else: - # 单音节 - pinyin_rep_map = { - 'ing': 'ying', - 'i': 'yi', - 'in': 'yin', - 'u': 'wu', - } - if pinyin in pinyin_rep_map.keys(): - pinyin = pinyin_rep_map[pinyin] - else: - single_rep_map = { - 'v': 'yu', - 'e': 'e', - 'i': 'y', - 'u': 'w', - } - if pinyin[0] in single_rep_map.keys(): - pinyin = single_rep_map[pinyin[0]]+pinyin[1:] - - assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin) - phone = pinyin_to_symbol_map[pinyin].split(' ') - word2ph.append(len(phone)) - - phones_list += phone - tones_list += [int(tone)] * len(phone) - return phones_list, tones_list, word2ph - - - -def text_normalize(text): - numbers = re.findall(r'\d+(?:\.?\d+)?', text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - text = replace_punctuation(text) - return text - -def get_bert_feature(text, word2ph): - from text import chinese_bert - return chinese_bert.get_bert_feature(text, word2ph) - -if __name__ == '__main__': - from text.chinese_bert import get_bert_feature - text = "啊!但是《原神》是由,米哈\游自主, [研发]的一款全.新开放世界.冒险游戏" - text = text_normalize(text) - print(text) - phones, tones, word2ph = g2p(text) - bert = get_bert_feature(text, word2ph) - - print(phones, tones, word2ph, bert.shape) - - -# # 示例用法 -# text = "这是一个示例文本:,你好!这是一个测试...." -# print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试 diff --git a/spaces/dirge/voicevox/voicevox_engine/model.py b/spaces/dirge/voicevox/voicevox_engine/model.py deleted file mode 100644 index fa5c23e26b5571a33b913cb12c9abfe3dcf34135..0000000000000000000000000000000000000000 --- a/spaces/dirge/voicevox/voicevox_engine/model.py +++ /dev/null @@ -1,282 +0,0 @@ -from enum import Enum -from re import findall, fullmatch -from typing import Dict, List, Optional - -from pydantic import BaseModel, Field, conint, validator - -from .metas.Metas import Speaker, SpeakerInfo - - -class Mora(BaseModel): - """ - モーラ(子音+母音)ごとの情報 - """ - - text: str = Field(title="文字") - consonant: Optional[str] = Field(title="子音の音素") - consonant_length: Optional[float] = Field(title="子音の音長") - vowel: str = Field(title="母音の音素") - vowel_length: float = Field(title="母音の音長") - pitch: float = Field(title="音高") # デフォルト値をつけるとts側のOpenAPIで生成されたコードの型がOptionalになる - - def __hash__(self): - items = [ - (k, tuple(v)) if isinstance(v, List) else (k, v) - for k, v in self.__dict__.items() - ] - return hash(tuple(sorted(items))) - - -class AccentPhrase(BaseModel): - """ - アクセント句ごとの情報 - """ - - moras: List[Mora] = Field(title="モーラのリスト") - accent: int = Field(title="アクセント箇所") - pause_mora: Optional[Mora] = Field(title="後ろに無音を付けるかどうか") - is_interrogative: bool = Field(default=False, title="疑問系かどうか") - - def __hash__(self): - items = [ - (k, tuple(v)) if isinstance(v, List) else (k, v) - for k, v in self.__dict__.items() - ] - return hash(tuple(sorted(items))) - - -class AudioQuery(BaseModel): - """ - 音声合成用のクエリ - """ - - accent_phrases: List[AccentPhrase] = Field(title="アクセント句のリスト") - speedScale: float = Field(title="全体の話速") - pitchScale: float = Field(title="全体の音高") - intonationScale: float = Field(title="全体の抑揚") - volumeScale: float = Field(title="全体の音量") - prePhonemeLength: float = Field(title="音声の前の無音時間") - postPhonemeLength: float = Field(title="音声の後の無音時間") - outputSamplingRate: int = Field(title="音声データの出力サンプリングレート") - outputStereo: bool = Field(title="音声データをステレオ出力するか否か") - kana: Optional[str] = Field(title="[読み取り専用]AquesTalkライクな読み仮名。音声合成クエリとしては無視される") - - def __hash__(self): - items = [ - (k, tuple(v)) if isinstance(v, List) else (k, v) - for k, v in self.__dict__.items() - ] - return hash(tuple(sorted(items))) - - -class ParseKanaErrorCode(Enum): - UNKNOWN_TEXT = "判別できない読み仮名があります: {text}" - ACCENT_TOP = "句頭にアクセントは置けません: {text}" - ACCENT_TWICE = "1つのアクセント句に二つ以上のアクセントは置けません: {text}" - ACCENT_NOTFOUND = "アクセントを指定していないアクセント句があります: {text}" - EMPTY_PHRASE = "{position}番目のアクセント句が空白です" - INTERROGATION_MARK_NOT_AT_END = "アクセント句末以外に「?」は置けません: {text}" - INFINITE_LOOP = "処理時に無限ループになってしまいました...バグ報告をお願いします。" - - -class ParseKanaError(Exception): - def __init__(self, errcode: ParseKanaErrorCode, **kwargs): - self.errcode = errcode - self.errname = errcode.name - self.kwargs: Dict[str, str] = kwargs - err_fmt: str = errcode.value - self.text = err_fmt.format(**kwargs) - - -class ParseKanaBadRequest(BaseModel): - text: str = Field(title="エラーメッセージ") - error_name: str = Field( - title="エラー名", - description="|name|description|\n|---|---|\n" - + "\n".join( - [ - "| {} | {} |".format(err.name, err.value) - for err in list(ParseKanaErrorCode) - ] - ), - ) - error_args: Dict[str, str] = Field(title="エラーを起こした箇所") - - def __init__(self, err: ParseKanaError): - super().__init__(text=err.text, error_name=err.errname, error_args=err.kwargs) - - -class MorphableTargetInfo(BaseModel): - - is_morphable: bool = Field(title="指定した話者に対してモーフィングの可否") - # FIXME: add reason property - # reason: Optional[str] = Field(title="is_morphableがfalseである場合、その理由") - - -class SpeakerNotFoundError(LookupError): - def __init__(self, speaker: int, *args: object, **kywrds: object) -> None: - self.speaker = speaker - super().__init__(f"speaker {speaker} is not found.", *args, **kywrds) - - -class LibrarySpeaker(BaseModel): - """ - 音声ライブラリに含まれる話者の情報 - """ - - speaker: Speaker = Field(title="話者情報") - speaker_info: SpeakerInfo = Field(title="話者の追加情報") - - -class DownloadableLibrary(BaseModel): - """ - ダウンロード可能な音声ライブラリの情報 - """ - - name: str = Field(title="音声ライブラリの名前") - uuid: str = Field(title="音声ライブラリのUUID") - version: str = Field(title="音声ライブラリのバージョン") - download_url: str = Field(title="音声ライブラリのダウンロードURL") - bytes: int = Field(title="音声ライブラリのバイト数") - speakers: List[LibrarySpeaker] = Field(title="音声ライブラリに含まれる話者のリスト") - - -USER_DICT_MIN_PRIORITY = 0 -USER_DICT_MAX_PRIORITY = 10 - - -class UserDictWord(BaseModel): - """ - 辞書のコンパイルに使われる情報 - """ - - surface: str = Field(title="表層形") - priority: conint(ge=USER_DICT_MIN_PRIORITY, le=USER_DICT_MAX_PRIORITY) = Field( - title="優先度" - ) - context_id: int = Field(title="文脈ID", default=1348) - part_of_speech: str = Field(title="品詞") - part_of_speech_detail_1: str = Field(title="品詞細分類1") - part_of_speech_detail_2: str = Field(title="品詞細分類2") - part_of_speech_detail_3: str = Field(title="品詞細分類3") - inflectional_type: str = Field(title="活用型") - inflectional_form: str = Field(title="活用形") - stem: str = Field(title="原形") - yomi: str = Field(title="読み") - pronunciation: str = Field(title="発音") - accent_type: int = Field(title="アクセント型") - mora_count: Optional[int] = Field(title="モーラ数") - accent_associative_rule: str = Field(title="アクセント結合規則") - - class Config: - validate_assignment = True - - @validator("surface") - def convert_to_zenkaku(cls, surface): - return surface.translate( - str.maketrans( - "".join(chr(0x21 + i) for i in range(94)), - "".join(chr(0xFF01 + i) for i in range(94)), - ) - ) - - @validator("pronunciation", pre=True) - def check_is_katakana(cls, pronunciation): - if not fullmatch(r"[ァ-ヴー]+", pronunciation): - raise ValueError("発音は有効なカタカナでなくてはいけません。") - sutegana = ["ァ", "ィ", "ゥ", "ェ", "ォ", "ャ", "ュ", "ョ", "ヮ", "ッ"] - for i in range(len(pronunciation)): - if pronunciation[i] in sutegana: - # 「キャット」のように、捨て仮名が連続する可能性が考えられるので、 - # 「ッ」に関しては「ッ」そのものが連続している場合と、「ッ」の後にほかの捨て仮名が連続する場合のみ無効とする - if i < len(pronunciation) - 1 and ( - pronunciation[i + 1] in sutegana[:-1] - or ( - pronunciation[i] == sutegana[-1] - and pronunciation[i + 1] == sutegana[-1] - ) - ): - raise ValueError("無効な発音です。(捨て仮名の連続)") - if pronunciation[i] == "ヮ": - if i != 0 and pronunciation[i - 1] not in ["ク", "グ"]: - raise ValueError("無効な発音です。(「くゎ」「ぐゎ」以外の「ゎ」の使用)") - return pronunciation - - @validator("mora_count", pre=True, always=True) - def check_mora_count_and_accent_type(cls, mora_count, values): - if "pronunciation" not in values or "accent_type" not in values: - # 適切な場所でエラーを出すようにする - return mora_count - - if mora_count is None: - rule_others = "[イ][ェ]|[ヴ][ャュョ]|[トド][ゥ]|[テデ][ィャュョ]|[デ][ェ]|[クグ][ヮ]" - rule_line_i = "[キシチニヒミリギジビピ][ェャュョ]" - rule_line_u = "[ツフヴ][ァ]|[ウスツフヴズ][ィ]|[ウツフヴ][ェォ]" - rule_one_mora = "[ァ-ヴー]" - mora_count = len( - findall( - f"(?:{rule_others}|{rule_line_i}|{rule_line_u}|{rule_one_mora})", - values["pronunciation"], - ) - ) - - if not 0 <= values["accent_type"] <= mora_count: - raise ValueError( - "誤ったアクセント型です({})。 expect: 0 <= accent_type <= {}".format( - values["accent_type"], mora_count - ) - ) - return mora_count - - -class PartOfSpeechDetail(BaseModel): - """ - 品詞ごとの情報 - """ - - part_of_speech: str = Field(title="品詞") - part_of_speech_detail_1: str = Field(title="品詞細分類1") - part_of_speech_detail_2: str = Field(title="品詞細分類2") - part_of_speech_detail_3: str = Field(title="品詞細分類3") - # context_idは辞書の左・右文脈IDのこと - # https://github.com/VOICEVOX/open_jtalk/blob/427cfd761b78efb6094bea3c5bb8c968f0d711ab/src/mecab-naist-jdic/_left-id.def # noqa - context_id: int = Field(title="文脈ID") - cost_candidates: List[int] = Field(title="コストのパーセンタイル") - accent_associative_rules: List[str] = Field(title="アクセント結合規則の一覧") - - -class WordTypes(str, Enum): - """ - fastapiでword_type引数を検証する時に使用するクラス - """ - - PROPER_NOUN = "PROPER_NOUN" - COMMON_NOUN = "COMMON_NOUN" - VERB = "VERB" - ADJECTIVE = "ADJECTIVE" - SUFFIX = "SUFFIX" - - -class SupportedDevicesInfo(BaseModel): - """ - 対応しているデバイスの情報 - """ - - cpu: bool = Field(title="CPUに対応しているか") - cuda: bool = Field(title="CUDA(Nvidia GPU)に対応しているか") - dml: bool = Field(title="DirectML(Nvidia GPU/Radeon GPU等)に対応しているか") - - -class SupportedFeaturesInfo(BaseModel): - """ - エンジンの機能の情報 - """ - - support_adjusting_mora: bool = Field(title="モーラが調整可能かどうか") - support_adjusting_speed_scale: bool = Field(title="話速が調整可能かどうか") - support_adjusting_pitch_scale: bool = Field(title="音高が調整可能かどうか") - support_adjusting_intonation_scale: bool = Field(title="抑揚が調整可能かどうか") - support_adjusting_volume_scale: bool = Field(title="音量が調整可能かどうか") - support_adjusting_silence_scale: bool = Field(title="前後の無音時間が調節可能かどうか") - support_interrogative_upspeak: bool = Field(title="疑似疑問文に対応しているかどうか") - support_switching_device: bool = Field(title="CPU/GPUの切り替えが可能かどうか") diff --git a/spaces/dmeck/RVC-Speakers/bark/__init__.py b/spaces/dmeck/RVC-Speakers/bark/__init__.py deleted file mode 100644 index cddfec04f37d1fd58bbff6cbed6b8b8e5dc50eee..0000000000000000000000000000000000000000 --- a/spaces/dmeck/RVC-Speakers/bark/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ - -from speakers.common.registry import registry -import os - -root_dir = os.path.dirname(os.path.abspath(__file__)) -registry.register_path("bark_library_root", root_dir) - diff --git a/spaces/donimes977/roblox/Dockerfile b/spaces/donimes977/roblox/Dockerfile deleted file mode 100644 index 056fc3a0e57db1052da06b74e0a61bfc831fe82a..0000000000000000000000000000000000000000 --- a/spaces/donimes977/roblox/Dockerfile +++ /dev/null @@ -1,33 +0,0 @@ -# Build Stage -# 使用 golang:alpine 作为构建阶段的基础镜像 -FROM golang:alpine AS builder - -# 添加 git,并且清除缓存🧹 -RUN apk --no-cache add git && \ - git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app && \ - apk del git - -# 设置工作目录 -WORKDIR /workspace/app - -# 编译 go 项目 -RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go - -# Runtime Stage -# 使用轻量级的 alpine 镜像🪞 -FROM alpine - -# 设置工作目录💼 -WORKDIR /workspace/app - -# 从构建阶段复制编译后的二进制文件👔 -COPY --from=builder /workspace/app/go-proxy-bingai . - -# (可选)设置环境变量✍️ -ENV Go_Proxy_BingAI_USER_TOKEN_1="asfasfwfwfafasff11111111111111111wrwrwr" - -# 端口 -EXPOSE 8080 - -# 容器运行✅ -CMD ["/workspace/app/go-proxy-bingai"] diff --git a/spaces/dorkai/singpt-2.0/extensions/gallery/script.py b/spaces/dorkai/singpt-2.0/extensions/gallery/script.py deleted file mode 100644 index 8a2d7cf988734a7ab0966d047ff3d31ba58324b7..0000000000000000000000000000000000000000 --- a/spaces/dorkai/singpt-2.0/extensions/gallery/script.py +++ /dev/null @@ -1,82 +0,0 @@ -from pathlib import Path - -import gradio as gr - -from modules.html_generator import get_image_cache - - -def generate_html(): - css = """ - .character-gallery { - margin: 1rem 0; - display: grid; - grid-template-columns: repeat(auto-fit, minmax(150px, 1fr)); - grid-column-gap: 0.4rem; - grid-row-gap: 1.2rem; - } - - .character-container { - cursor: pointer; - text-align: center; - position: relative; - opacity: 0.85; - } - - .character-container:hover { - opacity: 1; - } - - .character-container .placeholder, .character-container img { - width: 150px; - height: 200px; - background-color: gray; - object-fit: cover; - margin: 0 auto; - border-radius: 1rem; - border: 3px solid white; - box-shadow: 3px 3px 6px 0px rgb(0 0 0 / 50%); - } - - .character-name { - margin-top: 0.3rem; - display: block; - font-size: 1.2rem; - font-weight: 600; - overflow-wrap: anywhere; - } - """ - - container_html = f'" - return container_html - -def ui(): - with gr.Accordion("Character gallery"): - update = gr.Button("Refresh") - gallery = gr.HTML(value=generate_html()) - update.click(generate_html, [], gallery) diff --git a/spaces/duycse1603/math2tex/HybridViT/module/component/common/postional_encoding.py b/spaces/duycse1603/math2tex/HybridViT/module/component/common/postional_encoding.py deleted file mode 100644 index 3ced84f7bf616a8b3deae2650fba02b9adfff2d3..0000000000000000000000000000000000000000 --- a/spaces/duycse1603/math2tex/HybridViT/module/component/common/postional_encoding.py +++ /dev/null @@ -1,226 +0,0 @@ -import math - -import numpy as np -import torch -import torch.nn as nn -from typing import Tuple -from torch import Tensor -from torch.nn import functional as F - - -class Adaptive2DPositionalEncoding(nn.Module): - """Implement Adaptive 2D positional encoder for SATRN, see - `SATRN `_ - Modified from https://github.com/Media-Smart/vedastr - Licensed under the Apache License, Version 2.0 (the "License"); - Args: - d_hid (int): Dimensions of hidden layer. - n_height (int): Max height of the 2D feature output. - n_width (int): Max width of the 2D feature output. - dropout (int): Size of hidden layers of the model. - """ - - def __init__(self, - d_hid=512, - n_height=100, - n_width=100, - dropout=0.1, - ): - super().__init__() - - h_position_encoder = self._get_sinusoid_encoding_table(n_height, d_hid) - h_position_encoder = h_position_encoder.transpose(0, 1) - h_position_encoder = h_position_encoder.view(1, d_hid, n_height, 1) - - w_position_encoder = self._get_sinusoid_encoding_table(n_width, d_hid) - w_position_encoder = w_position_encoder.transpose(0, 1) - w_position_encoder = w_position_encoder.view(1, d_hid, 1, n_width) - - self.register_buffer('h_position_encoder', h_position_encoder) - self.register_buffer('w_position_encoder', w_position_encoder) - - self.h_scale = self.scale_factor_generate(d_hid) - self.w_scale = self.scale_factor_generate(d_hid) - self.pool = nn.AdaptiveAvgPool2d(1) - self.dropout = nn.Dropout(p=dropout) - - def _get_sinusoid_encoding_table(self, n_position, d_hid): - """Sinusoid position encoding table.""" - denominator = torch.Tensor([ - 1.0 / np.power(10000, 2 * (hid_j // 2) / d_hid) - for hid_j in range(d_hid) - ]) - denominator = denominator.view(1, -1) - pos_tensor = torch.arange(n_position).unsqueeze(-1).float() - sinusoid_table = pos_tensor * denominator - sinusoid_table[:, 0::2] = torch.sin(sinusoid_table[:, 0::2]) - sinusoid_table[:, 1::2] = torch.cos(sinusoid_table[:, 1::2]) - - return sinusoid_table - - def scale_factor_generate(self, d_hid): - scale_factor = nn.Sequential( - nn.Conv2d(d_hid, d_hid, kernel_size=1), nn.ReLU(inplace=True), - nn.Conv2d(d_hid, d_hid, kernel_size=1), nn.Sigmoid()) - - return scale_factor - - def init_weight(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('ReLU')) - - def forward(self, x): - b, c, h, w = x.size() - - avg_pool = self.pool(x) - - h_pos_encoding = \ - self.h_scale(avg_pool) * self.h_position_encoder[:, :, :h, :] - w_pos_encoding = \ - self.w_scale(avg_pool) * self.w_position_encoder[:, :, :, :w] - - out = x + h_pos_encoding + w_pos_encoding - - out = self.dropout(out) - - return out - -class PositionalEncoding2D(nn.Module): - """2-D positional encodings for the feature maps produced by the encoder. - Following https://arxiv.org/abs/2103.06450 by Sumeet Singh. - Reference: - https://github.com/full-stack-deep-learning/fsdl-text-recognizer-2021-labs/blob/main/lab9/text_recognizer/models/transformer_util.py - """ - - def __init__(self, d_model: int, max_h: int = 2000, max_w: int = 2000) -> None: - super().__init__() - self.d_model = d_model - assert d_model % 2 == 0, f"Embedding depth {d_model} is not even" - pe = self.make_pe(d_model, max_h, max_w) # (d_model, max_h, max_w) - self.register_buffer("pe", pe) - - @staticmethod - def make_pe(d_model: int, max_h: int, max_w: int) -> Tensor: - """Compute positional encoding.""" - pe_h = PositionalEncoding1D.make_pe(d_model=d_model // 2, max_len=max_h) # (max_h, 1 d_model // 2) - pe_h = pe_h.permute(2, 0, 1).expand(-1, -1, max_w) # (d_model // 2, max_h, max_w) - - pe_w = PositionalEncoding1D.make_pe(d_model=d_model // 2, max_len=max_w) # (max_w, 1, d_model // 2) - pe_w = pe_w.permute(2, 1, 0).expand(-1, max_h, -1) # (d_model // 2, max_h, max_w) - - pe = torch.cat([pe_h, pe_w], dim=0) # (d_model, max_h, max_w) - return pe - - def forward(self, x: Tensor) -> Tensor: - """Forward pass. - Args: - x: (B, d_model, H, W) - Returns: - (B, d_model, H, W) - """ - assert x.shape[1] == self.pe.shape[0] # type: ignore - x = x + self.pe[:, : x.size(2), : x.size(3)] # type: ignore - return x - - -class PositionalEncoding1D(nn.Module): - """Classic Attention-is-all-you-need positional encoding.""" - - def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 1000) -> None: - super().__init__() - self.dropout = nn.Dropout(p=dropout) - pe = self.make_pe(d_model, max_len) # (max_len, 1, d_model) - self.register_buffer("pe", pe) - - @staticmethod - def make_pe(d_model: int, max_len: int) -> Tensor: - """Compute positional encoding.""" - pe = torch.zeros(max_len, d_model) - position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) - div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) - pe[:, 0::2] = torch.sin(position * div_term) - pe[:, 1::2] = torch.cos(position * div_term) - pe = pe.unsqueeze(1) - return pe - - def forward(self, x: Tensor) -> Tensor: - """Forward pass. - Args: - x: (S, B, d_model) - Returns: - (S, B, d_model) - """ - assert x.shape[2] == self.pe.shape[2] # type: ignore - x = x + self.pe[: x.size(0)] # type: ignore - return self.dropout(x) - -Size_ = Tuple[int, int] - -class PosConv(nn.Module): - # PEG from https://arxiv.org/abs/2102.10882 - def __init__(self, in_chans, embed_dim=768, stride=1): - super(PosConv, self).__init__() - self.proj = nn.Sequential(nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim), ) - self.stride = stride - - def forward(self, x, size: Size_): - B, N, C = x.shape - cls_token, feat_token = x[:, 0], x[:, 1:] - cnn_feat_token = feat_token.transpose(1, 2).view(B, C, *size) - x = self.proj(cnn_feat_token) - if self.stride == 1: - x += cnn_feat_token - x = x.flatten(2).transpose(1, 2) - x = torch.cat((cls_token.unsqueeze(1), x), dim=1) - return x - - def no_weight_decay(self): - return ['proj.%d.weight' % i for i in range(4)] - -class PosConv1D(nn.Module): - # PEG from https://arxiv.org/abs/2102.10882 - def __init__(self, in_chans, embed_dim=768, stride=1): - super(PosConv1D, self).__init__() - self.proj = nn.Sequential(nn.Conv1d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim), ) - self.stride = stride - - def forward(self, x, size: int): - B, N, C = x.shape - cls_token, feat_token = x[:, 0], x[:, 1:] - cnn_feat_token = feat_token.transpose(1, 2).view(B, C, size) - x = self.proj(cnn_feat_token) - if self.stride == 1: - x += cnn_feat_token - x = x.transpose(1, 2) - x = torch.cat((cls_token.unsqueeze(1), x), dim=1) - return x - - def no_weight_decay(self): - return ['proj.%d.weight' % i for i in range(4)] - -def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=(), old_grid_shape=()): - # Rescale the grid of position embeddings when loading from state_dict. Adapted from - # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 - - print('Resized position embedding: %s to %s'%(posemb.shape, posemb_new.shape)) - ntok_new = posemb_new.shape[1] - - if num_tokens: - posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:] - ntok_new -= num_tokens - else: - posemb_tok, posemb_grid = posemb[:, :0], posemb[0] - - if not len(gs_new): # backwards compatibility - gs_new = [int(math.sqrt(ntok_new))] * 2 - - assert len(gs_new) >= 2 - - print('Position embedding grid-size from %s to %s'%(old_grid_shape, gs_new)) - posemb_grid = posemb_grid.reshape(1, old_grid_shape[0], old_grid_shape[1], -1).permute(0, 3, 1, 2) - posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bicubic', align_corners=False) - posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) - posemb = torch.cat([posemb_tok, posemb_grid], dim=1) - - return posemb diff --git a/spaces/eunjae/LoRA-DreamBooth-Training-UI/constants.py b/spaces/eunjae/LoRA-DreamBooth-Training-UI/constants.py deleted file mode 100644 index baaebbae71058fbb4faed35fd00e7559305dc409..0000000000000000000000000000000000000000 --- a/spaces/eunjae/LoRA-DreamBooth-Training-UI/constants.py +++ /dev/null @@ -1,6 +0,0 @@ -import enum - - -class UploadTarget(enum.Enum): - PERSONAL_PROFILE = 'Personal Profile' - LORA_LIBRARY = 'LoRA Library' diff --git a/spaces/evaluate-metric/squad/compute_score.py b/spaces/evaluate-metric/squad/compute_score.py deleted file mode 100644 index e60acfd1044319e43a59ffe8db75e63b68785ba7..0000000000000000000000000000000000000000 --- a/spaces/evaluate-metric/squad/compute_score.py +++ /dev/null @@ -1,92 +0,0 @@ -""" Official evaluation script for v1.1 of the SQuAD dataset. """ - -import argparse -import json -import re -import string -import sys -from collections import Counter - - -def normalize_answer(s): - """Lower text and remove punctuation, articles and extra whitespace.""" - - def remove_articles(text): - return re.sub(r"\b(a|an|the)\b", " ", text) - - def white_space_fix(text): - return " ".join(text.split()) - - def remove_punc(text): - exclude = set(string.punctuation) - return "".join(ch for ch in text if ch not in exclude) - - def lower(text): - return text.lower() - - return white_space_fix(remove_articles(remove_punc(lower(s)))) - - -def f1_score(prediction, ground_truth): - prediction_tokens = normalize_answer(prediction).split() - ground_truth_tokens = normalize_answer(ground_truth).split() - common = Counter(prediction_tokens) & Counter(ground_truth_tokens) - num_same = sum(common.values()) - if num_same == 0: - return 0 - precision = 1.0 * num_same / len(prediction_tokens) - recall = 1.0 * num_same / len(ground_truth_tokens) - f1 = (2 * precision * recall) / (precision + recall) - return f1 - - -def exact_match_score(prediction, ground_truth): - return normalize_answer(prediction) == normalize_answer(ground_truth) - - -def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): - scores_for_ground_truths = [] - for ground_truth in ground_truths: - score = metric_fn(prediction, ground_truth) - scores_for_ground_truths.append(score) - return max(scores_for_ground_truths) - - -def compute_score(dataset, predictions): - f1 = exact_match = total = 0 - for article in dataset: - for paragraph in article["paragraphs"]: - for qa in paragraph["qas"]: - total += 1 - if qa["id"] not in predictions: - message = "Unanswered question " + qa["id"] + " will receive score 0." - print(message, file=sys.stderr) - continue - ground_truths = list(map(lambda x: x["text"], qa["answers"])) - prediction = predictions[qa["id"]] - exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths) - f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths) - - exact_match = 100.0 * exact_match / total - f1 = 100.0 * f1 / total - - return {"exact_match": exact_match, "f1": f1} - - -if __name__ == "__main__": - expected_version = "1.1" - parser = argparse.ArgumentParser(description="Evaluation for SQuAD " + expected_version) - parser.add_argument("dataset_file", help="Dataset file") - parser.add_argument("prediction_file", help="Prediction File") - args = parser.parse_args() - with open(args.dataset_file) as dataset_file: - dataset_json = json.load(dataset_file) - if dataset_json["version"] != expected_version: - print( - "Evaluation expects v-" + expected_version + ", but got dataset with v-" + dataset_json["version"], - file=sys.stderr, - ) - dataset = dataset_json["data"] - with open(args.prediction_file) as prediction_file: - predictions = json.load(prediction_file) - print(json.dumps(compute_score(dataset, predictions))) diff --git a/spaces/facebook/MusicGen/docs/DATASETS.md b/spaces/facebook/MusicGen/docs/DATASETS.md deleted file mode 100644 index b0890c03cf732450eb498559638c6b45d50e40c3..0000000000000000000000000000000000000000 --- a/spaces/facebook/MusicGen/docs/DATASETS.md +++ /dev/null @@ -1,82 +0,0 @@ -# AudioCraft datasets - -Our dataset manifest files consist in 1-json-per-line files, potentially gzipped, -as `data.jsons` or `data.jsons.gz` files. This JSON contains the path to the audio -file and associated metadata. The manifest files are then provided in the configuration, -as `datasource` sub-configuration. A datasource contains the pointers to the paths of -the manifest files for each AudioCraft stage (or split) along with additional information -(eg. maximum sample rate to use against this dataset). All the datasources are under the -`dset` group config, with a dedicated configuration file for each dataset. - -## Getting started - -### Example - -See the provided example in the directory that provides a manifest to use the example dataset -provided under the [dataset folder](../dataset/example). - -The manifest files are stored in the [egs folder](../egs/example). - -```shell -egs/ - example/data.json.gz -``` - -A datasource is defined in the configuration folder, in the dset group config for this dataset -at [config/dset/audio/example](../config/dset/audio/example.yaml): - -```shell -# @package __global__ - -datasource: - max_sample_rate: 44100 - max_channels: 2 - - train: egs/example - valid: egs/example - evaluate: egs/example - generate: egs/example -``` - -For proper dataset, one should create manifest for each of the splits and specify the correct path -to the given manifest in the datasource for each split. - -Then, using a dataset through the configuration can be done pointing to the -corresponding dataset configuration: -```shell -dset= # should match the yaml file name - -# for example -dset=audio/example -``` - -### Creating manifest files - -Assuming you want to create manifest files to load with AudioCraft's AudioDataset, you can use -the following command to create new manifest files from a given folder containing audio files: - -```shell -python -m audiocraft.data.audio_dataset egs/my_dataset/my_dataset_split/data.jsonl.gz - -# For example to generate the manifest for dset=audio/example -# note: we don't use any split and we don't compress the jsonl file for this dummy example -python -m audiocraft.data.audio_dataset dataset/example egs/example/data.jsonl - -# More info with: python -m audiocraft.data.audio_dataset --help -``` - -## Additional information - -### MusicDataset and metadata - -The MusicDataset is an AudioDataset with additional metadata. The MusicDataset expects -the additional metadata to be stored in a JSON file that has the same path as the corresponding -audio file, but with a `.json` extension. - -### SoundDataset and metadata - -The SoundDataset is an AudioDataset with descriptions metadata. Similarly to the MusicDataset, -the SoundDataset expects the additional metadata to be stored in a JSON file that has the same -path as the corresponding audio file, but with a `.json` extension. Additionally, the SoundDataset -supports an additional parameter pointing to an extra folder `external_metadata_source` containing -all the JSON metadata files given they have the same filename as the audio file. diff --git a/spaces/failfast/2D-GameCreator/src/components/CodepenIcon.tsx b/spaces/failfast/2D-GameCreator/src/components/CodepenIcon.tsx deleted file mode 100644 index 05d90b529b86cda98ed295365cd1f74d14f1e43c..0000000000000000000000000000000000000000 --- a/spaces/failfast/2D-GameCreator/src/components/CodepenIcon.tsx +++ /dev/null @@ -1,12 +0,0 @@ -import SvgIcon, { SvgIconProps } from "@mui/material/SvgIcon"; - -export function CodepenIcon(props: SvgIconProps) { - return ( - - - - ); -} diff --git a/spaces/falterWliame/Face_Mask_Detection/OS Plus 40 Cubo Sistemas Serial ((FREE)).md b/spaces/falterWliame/Face_Mask_Detection/OS Plus 40 Cubo Sistemas Serial ((FREE)).md deleted file mode 100644 index 6681bff5e9b56d29fdfdb87a5b1621781e09e28e..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/OS Plus 40 Cubo Sistemas Serial ((FREE)).md +++ /dev/null @@ -1,110 +0,0 @@ -## OS Plus 40 Cubo Sistemas Serial - - - - - - ![OS Plus 40 Cubo Sistemas Serial ((FREE))](https://encrypted-tbn1.gstatic.com/images?q=tbn:ANd9GcT3mMJCDKTKdxYMvxokRhoxSdlPt8opfs9i5m8sQOusetBjnD31MWlTLkaL) - - - - - -**Download File ===== [https://miimms.com/2tyiXk](https://miimms.com/2tyiXk)** - - - - - - - - - - - - Here is a possible title and article with html formatting for the keyword "OS Plus 40 Cubo Sistemas Serial": - -# OS Plus 40 Cubo Sistemas: A Powerful Software for Business Management - - - -OS Plus 40 Cubo Sistemas is a software developed by Cubo Sistemas, a Brazilian company specialized in creating solutions for small and medium enterprises. OS Plus 40 Cubo Sistemas is designed to help business owners manage their finances, sales, purchases, inventory, orders, and more. OS Plus 40 Cubo Sistemas is easy to use, intuitive, and compatible with Windows operating systems. - - - -Some of the features of OS Plus 40 Cubo Sistemas are: - - - -- Integration with electronic invoices and tax documents - -- Automatic backup and data security - -- Customizable reports and dashboards - -- Multi-user and multi-company support - -- Online and offline access - -- Customer service and technical support - - - -To use OS Plus 40 Cubo Sistemas, you need to purchase a license and activate it with a serial number. The serial number is a unique code that identifies your software and allows you to access all its functions. You can buy OS Plus 40 Cubo Sistemas online or through authorized resellers. Once you have your serial number, you can enter it in the software activation screen and start using OS Plus 40 Cubo Sistemas. - - - -If you are looking for a reliable and efficient software for your business management, OS Plus 40 Cubo Sistemas is a great option. It will help you optimize your processes, save time and money, and improve your performance. OS Plus 40 Cubo Sistemas is a software that adapts to your needs and grows with your business. - -Here is a possible continuation of the article with html formatting for the keyword "OS Plus 40 Cubo Sistemas Serial": - -OS Plus 40 Cubo Sistemas has been used by many businesses in different sectors, such as technical assistance, repair shops, electronics, cell phones, and more. OS Plus 40 Cubo Sistemas has received positive reviews from its users, who praise its functionality, simplicity, and affordability. You can watch some of these reviews on YouTube[^1^] [^2^]. - - - -If you want to try OS Plus 40 Cubo Sistemas for yourself, you can download a free trial version from the Cubo Sistemas website. The trial version allows you to use the software for 30 days, with no limitations or restrictions. You can also contact Cubo Sistemas for a demonstration or a consultation. Cubo Sistemas has a team of qualified professionals who can assist you with any questions or issues you may have. - - - -OS Plus 40 Cubo Sistemas is a software that will make your business management easier and more efficient. With OS Plus 40 Cubo Sistemas, you can have more control over your operations, improve your customer satisfaction, and increase your profitability. OS Plus 40 Cubo Sistemas is a software that will help you achieve your business goals. - -Here are a few more paragraphs with html formatting for the keyword "OS Plus 40 Cubo Sistemas Serial": - -OS Plus 40 Cubo Sistemas has a variety of modules that can be customized according to your business needs. You can choose the modules that best suit your activities, such as: - - - -- Service orders: manage your technical assistance and repair services, from the opening to the closing of the order, with status updates, service history, warranty control, and more. - -- Stock control: manage your inventory of products and parts, with registration of suppliers, purchases, sales, transfers, adjustments, and more. - -- Billing: manage your invoices and receipts, with integration with electronic invoices and tax documents, payment methods, installments, discounts, and more. - -- Financial control: manage your cash flow and bank accounts, with registration of revenues and expenses, cash register, bank reconciliation, financial reports, and more. - -- Customer relationship management: manage your contacts and leads, with registration of customers and prospects, follow-up activities, sales funnel, marketing campaigns, and more. - - - -OS Plus 40 Cubo Sistemas also has other features that enhance its functionality and usability, such as: - - - -- Data synchronization: synchronize your data between different devices and locations, with online and offline access. - -- Data backup: backup your data automatically and securely, with encryption and cloud storage. - -- Data import and export: import and export your data from different sources and formats, such as Excel, XML, TXT, CSV, and more. - -- Data analysis: analyze your data with graphical and numerical indicators, charts, dashboards, filters, and more. - -- Data customization: customize your data with user-defined fields, labels, colors, logos, templates, and more. - - - - dfd1c89656 - - - - - diff --git a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/test_project/python/dqn/dqn.py b/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/test_project/python/dqn/dqn.py deleted file mode 100644 index 6cea64d39baa7ff4c1e549869aaa4b0ae17779a9..0000000000000000000000000000000000000000 --- a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/test_project/python/dqn/dqn.py +++ /dev/null @@ -1,245 +0,0 @@ -from typing import Any, Dict, List, Optional, Tuple, Type, Union - -import gym -import numpy as np -import torch as th -from torch.nn import functional as F - -from stable_baselines3.common import logger -from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm -from stable_baselines3.common.preprocessing import maybe_transpose -from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule -from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update -from stable_baselines3.dqn.policies import DQNPolicy - - -class DQN(OffPolicyAlgorithm): - """ - Deep Q-Network (DQN) - - Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236 - Default hyperparameters are taken from the nature paper, - except for the optimizer and learning rate that were taken from Stable Baselines defaults. - - :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) - :param env: The environment to learn from (if registered in Gym, can be str) - :param learning_rate: The learning rate, it can be a function - of the current progress remaining (from 1 to 0) - :param buffer_size: size of the replay buffer - :param learning_starts: how many steps of the model to collect transitions for before learning starts - :param batch_size: Minibatch size for each gradient update - :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update - :param gamma: the discount factor - :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit - like ``(5, "step")`` or ``(2, "episode")``. - :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) - Set to ``-1`` means to do as many gradient steps as steps done in the environment - during the rollout. - :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer - at a cost of more complexity. - See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 - :param target_update_interval: update the target network every ``target_update_interval`` - environment steps. - :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced - :param exploration_initial_eps: initial value of random action probability - :param exploration_final_eps: final value of random action probability - :param max_grad_norm: The maximum value for the gradient clipping - :param tensorboard_log: the log location for tensorboard (if None, no logging) - :param create_eval_env: Whether to create a second environment that will be - used for evaluating the agent periodically. (Only available when passing string for the environment) - :param policy_kwargs: additional arguments to be passed to the policy on creation - :param verbose: the verbosity level: 0 no output, 1 info, 2 debug - :param seed: Seed for the pseudo random generators - :param device: Device (cpu, cuda, ...) on which the code should be run. - Setting it to auto, the code will be run on the GPU if possible. - :param _init_setup_model: Whether or not to build the network at the creation of the instance - """ - - def __init__( - self, - policy: Union[str, Type[DQNPolicy]], - env: Union[GymEnv, str], - learning_rate: Union[float, Schedule] = 1e-4, - buffer_size: int = 1000000, - learning_starts: int = 50000, - batch_size: Optional[int] = 32, - tau: float = 1.0, - gamma: float = 0.99, - train_freq: Union[int, Tuple[int, str]] = 4, - gradient_steps: int = 1, - optimize_memory_usage: bool = False, - target_update_interval: int = 10000, - exploration_fraction: float = 0.1, - exploration_initial_eps: float = 1.0, - exploration_final_eps: float = 0.05, - max_grad_norm: float = 10, - tensorboard_log: Optional[str] = None, - create_eval_env: bool = False, - policy_kwargs: Optional[Dict[str, Any]] = None, - verbose: int = 0, - seed: Optional[int] = None, - device: Union[th.device, str] = "auto", - _init_setup_model: bool = True, - ): - - super(DQN, self).__init__( - policy, - env, - DQNPolicy, - learning_rate, - buffer_size, - learning_starts, - batch_size, - tau, - gamma, - train_freq, - gradient_steps, - action_noise=None, # No action noise - policy_kwargs=policy_kwargs, - tensorboard_log=tensorboard_log, - verbose=verbose, - device=device, - create_eval_env=create_eval_env, - seed=seed, - sde_support=False, - optimize_memory_usage=optimize_memory_usage, - supported_action_spaces=(gym.spaces.Discrete,), - ) - - self.exploration_initial_eps = exploration_initial_eps - self.exploration_final_eps = exploration_final_eps - self.exploration_fraction = exploration_fraction - self.target_update_interval = target_update_interval - self.max_grad_norm = max_grad_norm - # "epsilon" for the epsilon-greedy exploration - self.exploration_rate = 0.0 - # Linear schedule will be defined in `_setup_model()` - self.exploration_schedule = None - self.q_net, self.q_net_target = None, None - - if _init_setup_model: - self._setup_model() - - def _setup_model(self) -> None: - super(DQN, self)._setup_model() - self._create_aliases() - self.exploration_schedule = get_linear_fn( - self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction - ) - - def _create_aliases(self) -> None: - self.q_net = self.policy.q_net - self.q_net_target = self.policy.q_net_target - - def _on_step(self) -> None: - """ - Update the exploration rate and target network if needed. - This method is called in ``collect_rollouts()`` after each step in the environment. - """ - if self.num_timesteps % self.target_update_interval == 0: - polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau) - - self.exploration_rate = self.exploration_schedule(self._current_progress_remaining) - logger.record("rollout/exploration rate", self.exploration_rate) - - def train(self, gradient_steps: int, batch_size: int = 100) -> None: - # Update learning rate according to schedule - self._update_learning_rate(self.policy.optimizer) - - losses = [] - for _ in range(gradient_steps): - # Sample replay buffer - replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) - - with th.no_grad(): - # Compute the next Q-values using the target network - next_q_values = self.q_net_target(replay_data.next_observations) - # Follow greedy policy: use the one with the highest value - next_q_values, _ = next_q_values.max(dim=1) - # Avoid potential broadcast issue - next_q_values = next_q_values.reshape(-1, 1) - # 1-step TD target - target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values - - # Get current Q-values estimates - current_q_values = self.q_net(replay_data.observations) - - # Retrieve the q-values for the actions from the replay buffer - current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long()) - - # Compute Huber loss (less sensitive to outliers) - loss = F.smooth_l1_loss(current_q_values, target_q_values) - losses.append(loss.item()) - - # Optimize the policy - self.policy.optimizer.zero_grad() - loss.backward() - # Clip gradient norm - th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm) - self.policy.optimizer.step() - - # Increase update counter - self._n_updates += gradient_steps - - logger.record("train/n_updates", self._n_updates, exclude="tensorboard") - logger.record("train/loss", np.mean(losses)) - - def predict( - self, - observation: np.ndarray, - state: Optional[np.ndarray] = None, - mask: Optional[np.ndarray] = None, - deterministic: bool = False, - ) -> Tuple[np.ndarray, Optional[np.ndarray]]: - """ - Overrides the base_class predict function to include epsilon-greedy exploration. - - :param observation: the input observation - :param state: The last states (can be None, used in recurrent policies) - :param mask: The last masks (can be None, used in recurrent policies) - :param deterministic: Whether or not to return deterministic actions. - :return: the model's action and the next state - (used in recurrent policies) - """ - if not deterministic and np.random.rand() < self.exploration_rate: - if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space): - n_batch = observation.shape[0] - action = np.array([self.action_space.sample() for _ in range(n_batch)]) - else: - action = np.array(self.action_space.sample()) - else: - action, state = self.policy.predict(observation, state, mask, deterministic) - return action, state - - def learn( - self, - total_timesteps: int, - callback: MaybeCallback = None, - log_interval: int = 4, - eval_env: Optional[GymEnv] = None, - eval_freq: int = -1, - n_eval_episodes: int = 5, - tb_log_name: str = "DQN", - eval_log_path: Optional[str] = None, - reset_num_timesteps: bool = True, - ) -> OffPolicyAlgorithm: - - return super(DQN, self).learn( - total_timesteps=total_timesteps, - callback=callback, - log_interval=log_interval, - eval_env=eval_env, - eval_freq=eval_freq, - n_eval_episodes=n_eval_episodes, - tb_log_name=tb_log_name, - eval_log_path=eval_log_path, - reset_num_timesteps=reset_num_timesteps, - ) - - def _excluded_save_params(self) -> List[str]: - return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"] - - def _get_torch_save_params(self) -> Tuple[List[str], List[str]]: - state_dicts = ["policy", "policy.optimizer"] - - return state_dicts, [] diff --git a/spaces/fb700/chatglm-fitness-RLHF/src/utils/paste_pic.py b/spaces/fb700/chatglm-fitness-RLHF/src/utils/paste_pic.py deleted file mode 100644 index f9989e21e48e64f620f9b148e65fdfe806c53b14..0000000000000000000000000000000000000000 --- a/spaces/fb700/chatglm-fitness-RLHF/src/utils/paste_pic.py +++ /dev/null @@ -1,69 +0,0 @@ -import cv2, os -import numpy as np -from tqdm import tqdm -import uuid - -from src.utils.videoio import save_video_with_watermark - -def paste_pic(video_path, pic_path, crop_info, new_audio_path, full_video_path, extended_crop=False): - - if not os.path.isfile(pic_path): - raise ValueError('pic_path must be a valid path to video/image file') - elif pic_path.split('.')[-1] in ['jpg', 'png', 'jpeg']: - # loader for first frame - full_img = cv2.imread(pic_path) - else: - # loader for videos - video_stream = cv2.VideoCapture(pic_path) - fps = video_stream.get(cv2.CAP_PROP_FPS) - full_frames = [] - while 1: - still_reading, frame = video_stream.read() - if not still_reading: - video_stream.release() - break - break - full_img = frame - frame_h = full_img.shape[0] - frame_w = full_img.shape[1] - - video_stream = cv2.VideoCapture(video_path) - fps = video_stream.get(cv2.CAP_PROP_FPS) - crop_frames = [] - while 1: - still_reading, frame = video_stream.read() - if not still_reading: - video_stream.release() - break - crop_frames.append(frame) - - if len(crop_info) != 3: - print("you didn't crop the image") - return - else: - r_w, r_h = crop_info[0] - clx, cly, crx, cry = crop_info[1] - lx, ly, rx, ry = crop_info[2] - lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry) - # oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx - # oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx - - if extended_crop: - oy1, oy2, ox1, ox2 = cly, cry, clx, crx - else: - oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx - - tmp_path = str(uuid.uuid4())+'.mp4' - out_tmp = cv2.VideoWriter(tmp_path, cv2.VideoWriter_fourcc(*'MP4V'), fps, (frame_w, frame_h)) - for crop_frame in tqdm(crop_frames, 'seamlessClone:'): - p = cv2.resize(crop_frame.astype(np.uint8), (ox2-ox1, oy2 - oy1)) - - mask = 255*np.ones(p.shape, p.dtype) - location = ((ox1+ox2) // 2, (oy1+oy2) // 2) - gen_img = cv2.seamlessClone(p, full_img, mask, location, cv2.NORMAL_CLONE) - out_tmp.write(gen_img) - - out_tmp.release() - - save_video_with_watermark(tmp_path, new_audio_path, full_video_path, watermark=False) - os.remove(tmp_path) diff --git a/spaces/fcakyon/video-classification/utils.py b/spaces/fcakyon/video-classification/utils.py deleted file mode 100644 index eb7c446be2b6449c1b529ddba0e43d034caada36..0000000000000000000000000000000000000000 --- a/spaces/fcakyon/video-classification/utils.py +++ /dev/null @@ -1,51 +0,0 @@ -from pathlib import Path -from pytube import YouTube -import numpy as np -from decord import VideoReader, cpu -import imageio - - -def download_youtube_video(url: str): - yt = YouTube(url) - - streams = yt.streams.filter(file_extension="mp4") - file_path = streams[0].download() - return file_path - - -def sample_frames_from_video_file( - file_path: str, num_frames: int = 16, frame_sampling_rate=1 -): - videoreader = VideoReader(file_path) - videoreader.seek(0) - - # sample frames - start_idx = 0 - end_idx = num_frames * frame_sampling_rate - 1 - indices = np.linspace(start_idx, end_idx, num=num_frames, dtype=np.int64) - frames = videoreader.get_batch(indices).asnumpy() - - return frames - - -def get_num_total_frames(file_path: str): - videoreader = VideoReader(file_path) - videoreader.seek(0) - return len(videoreader) - - -def convert_frames_to_gif(frames, save_path: str = "frames.gif"): - converted_frames = frames.astype(np.uint8) - Path(save_path).parent.mkdir(parents=True, exist_ok=True) - imageio.mimsave(save_path, converted_frames, fps=8) - return save_path - - -def create_gif_from_video_file( - file_path: str, - num_frames: int = 16, - frame_sampling_rate: int = 1, - save_path: str = "frames.gif", -): - frames = sample_frames_from_video_file(file_path, num_frames, frame_sampling_rate) - return convert_frames_to_gif(frames, save_path) diff --git a/spaces/fclong/summary/fengshen/examples/zen2_finetune/ner_zen2_base_cmeee.sh b/spaces/fclong/summary/fengshen/examples/zen2_finetune/ner_zen2_base_cmeee.sh deleted file mode 100644 index a4be7221a250030db4cf1b7d157f1d6c0fd4b0f0..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/examples/zen2_finetune/ner_zen2_base_cmeee.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=zen2_base_cmeee # create a short name for your job -#SBATCH --nodes=1 # node count -#SBATCH --ntasks=2 # total number of tasks across all nodes -#SBATCH --cpus-per-task=30 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH --gres=gpu:2 # number of gpus per node -#SBATCH --mail-type=ALL # send email when job begins, ends or failed etc. -#SBATCH -o /cognitive_comp/lujunyu/experiments/ner_finetune/zen2_base_cmeee/%x-%j.log # output and error file name (%x=job name, %j=job id) -#SBATCH -p hgx - - -# export CUDA_VISIBLE_DEVICES='2' -export TORCH_EXTENSIONS_DIR=/cognitive_comp/lujunyu/tmp/torch_extendsions - -MODEL_NAME=zen2_base - -TASK=cmeee - -ZERO_STAGE=1 -STRATEGY=deepspeed_stage_${ZERO_STAGE} - -ROOT_DIR=/cognitive_comp/lujunyu/experiments/ner_finetune/${MODEL_NAME}_${TASK} -if [ ! -d ${ROOT_DIR} ];then - mkdir -p ${ROOT_DIR} - echo ${ROOT_DIR} created!!!!!!!!!!!!!! -else - echo ${ROOT_DIR} exist!!!!!!!!!!!!!!! -fi - -DATA_DIR=/cognitive_comp/lujunyu/data_zh/NER_Aligned/CMeEE_copy/ -PRETRAINED_MODEL_PATH=/cognitive_comp/lujunyu/pretrain_models/zen2-base-med - -CHECKPOINT_PATH=${ROOT_DIR}/ckpt/ -OUTPUT_PATH=${ROOT_DIR}/predict.json - -DATA_ARGS="\ - --data_dir $DATA_DIR \ - --train_data train.char.bio \ - --valid_data dev.char.bio \ - --test_data dev.char.bio \ - --train_batchsize 16 \ - --valid_batchsize 16 \ - --max_seq_length 512 \ - --task_name cmeee \ - " - -MODEL_ARGS="\ - --learning_rate 3e-5 \ - --weight_decay 0.1 \ - --warmup_ratio 0.01 \ - --markup bio \ - --middle_prefix I- \ - " - -MODEL_CHECKPOINT_ARGS="\ - --monitor val_f1 \ - --save_top_k 3 \ - --mode max \ - --every_n_train_steps 100 \ - --save_weights_only True \ - --dirpath $CHECKPOINT_PATH \ - --filename model-{epoch:02d}-{val_f1:.4f} \ - " - -TRAINER_ARGS="\ - --max_epochs 10 \ - --gpus 2 \ - --check_val_every_n_epoch 1 \ - --val_check_interval 0.25 \ - --default_root_dir $ROOT_DIR \ - " - - -options=" \ - --pretrained_model_path $PRETRAINED_MODEL_PATH \ - --vocab_file $PRETRAINED_MODEL_PATH/vocab.txt \ - --do_lower_case \ - --output_save_path $OUTPUT_PATH \ - $DATA_ARGS \ - $MODEL_ARGS \ - $MODEL_CHECKPOINT_ARGS \ - $TRAINER_ARGS \ -" -SCRIPT_PATH=/cognitive_comp/lujunyu/Fengshenbang-LM-Git/fengshen/examples/zen2_finetune/fengshen_token_level_ft_task.py -srun python $SCRIPT_PATH $options - -# SINGULARITY_PATH=/cognitive_comp/ganruyi/pytorch21_06_py3_docker_image_v2.sif -# python3 $SCRIPT_PATH $options -# source activate base -# singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $SINGULARITY_PATH /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options -# /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options - diff --git a/spaces/fengmuxi/ChatGpt-Web/app/components/button.tsx b/spaces/fengmuxi/ChatGpt-Web/app/components/button.tsx deleted file mode 100644 index f93741b392f3b8f43dd2dd1e16c934041df48088..0000000000000000000000000000000000000000 --- a/spaces/fengmuxi/ChatGpt-Web/app/components/button.tsx +++ /dev/null @@ -1,45 +0,0 @@ -import * as React from "react"; - -import styles from "./button.module.scss"; - -export function IconButton(props: { - onClick?: () => void; - icon?: JSX.Element; - type?: "primary" | "danger"; - text?: string; - bordered?: boolean; - shadow?: boolean; - className?: string; - title?: string; - disabled?: boolean; -}) { - return ( - - ); -} diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Clash of Clans Builder Base Mod APK with Unlimited Resources.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Clash of Clans Builder Base Mod APK with Unlimited Resources.md deleted file mode 100644 index d4e84054bd3755ddfb53986bb995c0f27379cf2e..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Clash of Clans Builder Base Mod APK with Unlimited Resources.md +++ /dev/null @@ -1,133 +0,0 @@ - -

          Clash of Clans Builder Base Mod APK: Everything You Need to Know

          -

          If you are a fan of Clash of Clans, you might have heard of the Builder Base, a new game mode that was introduced in 2017. In this mode, you can build your own base, attack other players' bases, and compete in special events. But what if you want to enjoy the Builder Base without spending any money or waiting for long upgrades? That's where Clash of Clans Builder Base Mod APK comes in. In this article, we will tell you everything you need to know about this modded version of the game, including its features, risks, installation, and gameplay.

          -

          What is Clash of Clans Builder Base?

          -

          The Basics of Builder Base

          -

          Builder Base is a separate game mode from the main village in Clash of Clans. You can access it by tapping on the boat icon on the bottom right corner of the screen. In Builder Base, you have your own base that you can customize with different buildings, troops, and defenses. You also have a builder hall that serves as your main building and determines your builder base level.

          -

          clash of clans builder base mod apk


          Download Filehttps://gohhs.com/2uPvAC



          -

          The Benefits of Builder Base

          -

          Builder Base offers many benefits for Clash of Clans players. Some of them are:

          -
            -
          • You can play it anytime, even when your main village is under attack or shield.
          • -
          • You can earn gems by completing achievements and clearing obstacles.
          • -
          • You can unlock new troops and buildings that are exclusive to Builder Base.
          • -
          • You can participate in the Versus Battle mode, where you can attack other players' bases and earn trophies and loot.
          • -
          • You can join the Clan Games and the Season Challenges and earn rewards for both your main village and your builder base.
          • -
          -

          What is Clash of Clans Builder Base Mod APK?

          -

          The Features of Builder Base Mod APK

          -

          Clash of Clans Builder Base Mod APK is a modified version of the original game that allows you to enjoy the builder base mode with unlimited resources and features. Some of the features are:

          -
            -
          • You can get unlimited gems, gold, elixir, and dark elixir to upgrade your buildings, troops, and defenses.
          • -
          • You can get unlimited builder potions, clock tower boosts, and research potions to speed up your progress.
          • -
          • You can unlock all the builder base levels, buildings, troops, and defenses without any restrictions.
          • -
          • You can use custom mods and hacks to enhance your gameplay, such as auto-attack, auto-collect, auto-train, etc.
          • -
          • You can play on private servers with other modded players and have fun without any bans or limitations.
          • -
          -

          The Risks of Builder Base Mod APK

          -

          While Clash of Clans Builder Base Mod APK sounds tempting, it also comes with some risks that you should be aware of. Some of the risks are:

          -
            -
          • You might get banned from the official game if you use the modded version on the same device or account.
          • -
          • You might get malware or viruses on your device if you download the modded version from untrusted sources.
          • -
          • You might lose your progress or data if the modded version is not compatible with your device or the latest update.
          • -
          • You might miss out on the fun and challenge of playing the original game as it was intended by the developers.
          • -
          -

          How to Download and

          How to Download and Install Clash of Clans Builder Base Mod APK?

          -

          The Requirements for Builder Base Mod APK

          -

          Before you download and install Clash of Clans Builder Base Mod APK, you need to make sure that your device meets the following requirements:

          -
            -
          • You need to have an Android device with at least 4.1 version or higher.
          • -
          • You need to have at least 100 MB of free storage space on your device.
          • -
          • You need to have a stable internet connection to download and play the modded version.
          • -
          • You need to enable the unknown sources option on your device settings to allow the installation of third-party apps.
          • -
          -

          The Steps for Builder Base Mod APK Installation

          -

          After you have checked the requirements, you can follow these steps to download and install Clash of Clans Builder Base Mod APK:

          -
            -
          1. Go to a trusted website that provides the download link for the modded version. For example, you can visit [this site] to get the latest version of the modded APK file.
          2. -
          3. Click on the download button and wait for the file to be downloaded on your device.
          4. -
          5. Locate the downloaded file on your device and tap on it to start the installation process.
          6. -
          7. Follow the instructions on the screen and grant the necessary permissions to complete the installation.
          8. -
          9. Launch the modded app and enjoy playing Clash of Clans Builder Base with unlimited resources and features.
          10. -
          -

          How to Play Clash of Clans Builder Base Mod APK?

          -

          The Tips and Tricks for Builder Base Mod APK

          -

          Now that you have installed Clash of Clans Builder Base Mod APK, you might be wondering how to play it and make the most out of it. Here are some tips and tricks that can help you:

          -
            -
          • Use the gems wisely. Even though you have unlimited gems, you should not waste them on unnecessary things. Save them for important upgrades, such as your builder hall, your troops, and your defenses.
          • -
          • Upgrade your builder hall first. Your builder hall level determines what buildings, troops, and defenses you can unlock and use. Therefore, you should prioritize upgrading your builder hall as soon as possible.
          • -
          • Balanced your offense and defense. You should not focus only on one aspect of your base. You should also upgrade your offense and defense equally, so that you can attack and defend effectively.
          • -
          • Use the clock tower boost. The clock tower is a special building that can speed up everything in your builder base, such as building time, troop training time, resource production, etc. You should use it whenever it is available to save time and progress faster.
          • -
          • Experiment with different strategies. There are many ways to attack and defend in Builder Base. You should try different combinations of troops, spells, and tactics to find out what works best for you.
          • -
          -

          The Best Builder Base Layouts for Builder Base Mod APK

          -

          One of the most important factors that can affect your performance in Builder Base is your base layout. A good base layout can help you protect your resources, prevent enemy attacks, and win more trophies. Here are some examples of the best builder base layouts for Builder Base Mod APK:

          -

          clash of clans builder base hack apk
          -clash of clans builder base unlimited gems apk
          -clash of clans builder base mod apk download
          -clash of clans builder base private server apk
          -clash of clans builder base mod apk latest version
          -clash of clans builder base mod apk android 1
          -clash of clans builder base mod apk offline
          -clash of clans builder base mod apk unlimited everything
          -clash of clans builder base mod apk 2023
          -clash of clans builder base mod apk no root
          -clash of clans builder base mod apk free download
          -clash of clans builder base mod apk unlimited troops
          -clash of clans builder base mod apk revdl
          -clash of clans builder base mod apk ihackedit
          -clash of clans builder base mod apk rexdl
          -clash of clans builder base mod apk plenixclash
          -clash of clans builder base mod apk happymod
          -clash of clans builder base mod apk online
          -clash of clans builder base mod apk with update
          -clash of clans builder base mod apk for ios
          -clash of clans builder base mod apk for pc
          -clash of clans builder base mod apk with custom mods
          -clash of clans builder base mod apk working 24/7
          -clash of clans builder base mod apk unlimited gold and elixir
          -clash of clans builder base mod apk unlimited dark elixir
          -clash of clans builder base mod apk with saving village feature
          -clash of clans builder base mod apk with unlimited resources
          -clash of clans builder base mod apk with unlimited buildings
          -clash of clans builder base mod apk with unlimited walls
          -clash of clans builder base mod apk with unlimited traps
          -clash of clans builder base mod apk with unlimited heroes
          -clash of clans builder base mod apk with unlimited spells
          -clash of clans builder base mod apk with unlimited super troops
          -clash of clans builder base mod apk with unlimited clan games rewards
          -clash of clans builder base mod apk with unlimited season pass rewards
          -clash of clans builder base mod apk with unlimited war stars and loot bonus
          -clash of clans builder base mod apk with unlimited friendly challenges and battles
          -clash of clans builder base mod apk with unlimited clan perks and donations
          -clash of clans builder base mod apk with unlimited achievements and gems
          -clash of clans builder base mod apk with unlimited events and special offers

          - - - - - - - - - - -
          Builder Hall LevelBase Layout
          BH3BH3 base layout
          BH4BH4 base layout
          BH5BH5 base layout
          BH6BH6 base layout
          BH7BH7 base layout
          BH8BH8 base layout
          BH9BH9 base layout
          BH10BH10 base layout
          -

          Conclusion

          -

          Clash of Clans Builder Base Mod APK is a great way to enjoy the builder base mode with unlimited resources and features. However, it also has some risks that you

          Clash of Clans Builder Base Mod APK is a great way to enjoy the builder base mode with unlimited resources and features. However, it also has some risks that you should be aware of before you download and install it. You should always use the modded version at your own discretion and responsibility. If you want to play the original game as it was meant to be played, you should stick to the official version and follow the rules. Either way, we hope that this article has helped you learn more about Clash of Clans Builder Base Mod APK and how to use it.

          -

          FAQs

          -

          Here are some frequently asked questions about Clash of Clans Builder Base Mod APK:

          -
            -
          1. Is Clash of Clans Builder Base Mod APK safe to use?
          2. -

            Clash of Clans Builder Base Mod APK is not an official product of Supercell, the developer of Clash of Clans. Therefore, it is not guaranteed to be safe or secure. You might encounter bugs, errors, crashes, or malware on your device. You might also get banned from the official game if you use the modded version. You should always download and install the modded version from trusted sources and scan it with antivirus software before using it.

            -
          3. Can I play Clash of Clans Builder Base Mod APK on iOS devices?
          4. -

            No, Clash of Clans Builder Base Mod APK is only available for Android devices. If you want to play the modded version on iOS devices, you will need to use a jailbreak tool or an emulator, which are not recommended for security and performance reasons.

            -
          5. Can I play Clash of Clans Builder Base Mod APK with my friends?
          6. -

            Yes, you can play Clash of Clans Builder Base Mod APK with your friends, but only if they are also using the modded version. You cannot play with players who are using the official version or a different modded version. You can also join private servers where you can chat and interact with other modded players.

            -
          7. Can I switch between the main village and the builder base in Clash of Clans Builder Base Mod APK?
          8. -

            Yes, you can switch between the main village and the builder base in Clash of Clans Builder Base Mod APK, just like in the original game. However, you should note that your progress and resources in the main village will not be affected by the modded version. You will still need to follow the normal rules and limitations in the main village mode.

            -
          9. Can I update Clash of Clans Builder Base Mod APK to the latest version?
          10. -

            Yes, you can update Clash of Clans Builder Base Mod APK to the latest version, but you will need to wait for the modders to release a new version that is compatible with the latest update. You cannot update the modded version from the Google Play Store or the App Store. You will need to download and install the new version from a trusted website.

            -

          197e85843d
          -
          -
          \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Flash Alert 2 Pro for Android and Customize Your Flash Alerts.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Flash Alert 2 Pro for Android and Customize Your Flash Alerts.md deleted file mode 100644 index 3f4787d36c6fc4ed9c07694c095defa25849df79..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Flash Alert 2 Pro for Android and Customize Your Flash Alerts.md +++ /dev/null @@ -1,85 +0,0 @@ -
          -

          Download Flash Alert 2 Pro: A Handy App for Your Phone

          -

          Do you want to get notified of incoming calls, messages, and notifications without missing them or disturbing others? Do you want to have a flashlight that can be activated by a simple gesture or a button? If yes, then you should download Flash Alert 2 Pro, a useful app that turns your phone's flash into a notification tool and a flashlight. In this article, we will tell you what Flash Alert 2 Pro is, what features it has, how to download it, how to use it, and what are its pros and cons.

          -

          What is Flash Alert 2 Pro?

          -

          Flash Alert 2 Pro is an app that activates the flash in your phone when you get a call, a message, or a notification from any app. It allows you to customize the flash settings, such as the frequency, the duration, and the brightness. You can also choose different notification modes, such as normal mode, silent mode, or vibrate mode. You can also enable battery saving mode to reduce the power consumption of the flash. Moreover, you can also use Flash Alert 2 Pro as a flashlight by shaking your phone or pressing the power button three times.

          -

          download flash alert 2 pro


          DOWNLOAD 🌟 https://gohhs.com/2uPqWC



          -

          Features of Flash Alert 2 Pro

          -

          Customizable flash settings

          -

          With Flash Alert 2 Pro, you can adjust the flash settings according to your preferences. You can change the flash frequency from 1 to 10 times per second. You can also change the flash duration from 0.1 to 2 seconds. You can also change the flash brightness from low to high.

          -

          Notification modes

          -

          Flash Alert 2 Pro also lets you choose different notification modes for different situations. You can choose normal mode, which activates the flash and the sound when you get a call or a notification. You can choose silent mode, which activates only the flash when you get a call or a notification. You can also choose vibrate mode, which activates the flash and the vibration when you get a call or a notification.

          -

          Battery saving mode

          -

          If you are worried about the battery drain caused by the flash, you can enable battery saving mode in Flash Alert 2 Pro. This mode will turn off the flash when your phone's battery level is below a certain percentage. You can set the battery level from 5% to 50%. This way, you can save your battery and still get notified of important calls and notifications.

          -

          Emergency alerts

          -

          Another feature of Flash Alert 2 Pro is that it can send you emergency alerts in case of disasters or emergencies. You can enable this feature in the settings and choose which types of alerts you want to receive, such as earthquake alerts, tsunami alerts, or fire alerts. When you receive an emergency alert, your phone's flash will blink rapidly and continuously until you dismiss it.

          -

          How to download Flash Alert 2 Pro?

          -

          From Google Play Store

          -

          The easiest way to download Flash Alert 2 Pro is from Google Play Store. You can search for "Flash Alert 2 Pro" in the store and tap on the install button. The app will be downloaded and installed on your phone automatically.

          -

          From Uptodown website

          -

          If you cannot access Google Play Store or if you want to download an older version of Flash Alert 2 Pro, you can download it from Uptodown website. Uptodown is a website that offers free and safe downloads of Android apps and games. You can visit the website and search for "Flash Alert 2 Pro" and choose the version you want to download. You will get a file with the extension .apk, which you can transfer to your phone and install manually.

          -

          How to use Flash Alert 2 Pro?

          -

          Enable flash alerts

          -

          After you download and install Flash Alert 2 Pro, you need to enable flash alerts in the app. You can open the app and tap on the switch button at the top right corner. You will see a message asking you to grant permission for the app to access your phone's camera and flash. You need to allow this permission for the app to work properly.

          -

          Choose apps to receive flash alerts

          -

          Next, you need to choose which apps you want to receive flash alerts from. You can tap on the "App List" button at the bottom of the app and select the apps you want. You can also tap on the "All Apps" button to enable flash alerts for all apps on your phone.

          -

          flash alert 2 pro apk free download
          -how to install flash alert 2 pro on android
          -flash alert 2 pro latest version
          -flash alert 2 pro for samsung
          -flash alert 2 pro mod apk
          -flash alert 2 pro review
          -flash alert 2 pro features
          -flash alert 2 pro vs flash alerts 2
          -flash alert 2 pro app store
          -flash alert 2 pro for iphone
          -flash alert 2 pro settings
          -flash alert 2 pro not working
          -flash alert 2 pro alternative
          -flash alert 2 pro premium apk
          -flash alert 2 pro for huawei
          -flash alert 2 pro for oppo
          -flash alert 2 pro for vivo
          -flash alert 2 pro for xiaomi
          -flash alert 2 pro for nokia
          -flash alert 2 pro for lg
          -flash alert 2 pro for sony
          -flash alert 2 pro for motorola
          -flash alert 2 pro for oneplus
          -flash alert 2 pro for realme
          -flash alert 2 pro for lenovo
          -flash alert 2 pro for asus
          -flash alert 2 pro for google pixel
          -flash alert 2 pro for android tv
          -flash alert 2 pro for android wear
          -flash alert 2 pro for android auto
          -download flash alerts on call and sms apk
          -download led flashlight alerts apk
          -download color flashlight alerts apk
          -download call flashlight - blink led on call apk
          -download flashlight notification - led torch light apk
          -download flashlight alerts on call and sms - led torch apk
          -download super bright led flashlight - call screen light apk
          -download flashlight - led light & call screen apk
          -download flashlight - super bright & super light apk
          -download flashlight - bright led light & strobe light apk

          -

          Adjust flash frequency and duration

          -

          Finally, you can adjust the flash frequency and duration for each app. You can tap on the app name in the list and slide the bars to change the settings. You can also tap on the "Test" button to see how the flash will look like when you get a notification from that app.

          -

          Pros and cons of Flash Alert 2 Pro

          -

          Pros

          -
            -
          • Flash Alert 2 Pro is a handy app that helps you get notified of important calls, messages, and notifications without missing them or disturbing others.
          • -
          • Flash Alert 2 Pro is easy to use and customize. You can change the flash settings, choose different notification modes, enable battery saving mode, and use it as a flashlight.
          • -
          • Flash Alert 2 Pro also supports emergency alerts that can alert you of disasters or emergencies in your area.
          • -
          -

          Cons

          -
            -
          • Flash Alert 2 Pro may not work on some devices or with some apps due to compatibility issues.
          • -
          • Flash Alert 2 Pro may cause battery drain or overheating if used excessively or with high brightness settings.
          • -
          • Flash Alert 2 Pro may not be suitable for people who are sensitive to flashing lights or who have epilepsy.
          • -
          -

          Conclusion

          -

          In conclusion, Flash Alert 2 Pro is a useful app that turns your phone's flash into a notification tool and a flashlight. It has many features that allow you to customize the flash settings, choose different notification modes, enable battery saving mode, and receive emergency alerts. However, it also has some drawbacks that you should be aware of before downloading it. If you are looking for an app that can help you get notified of incoming calls, messages, and notifications without missing them or disturbing others, you should download Flash Alert 2 Pro from Google Play Store or Uptodown website.

          - FAQs Q: How much does Flash Alert 2 Pro cost? A: Flash Alert 2 Pro is a free app that does not require any payment or subscription. Q: Is Flash Alert 2 Pro safe to download? A: Flash Alert 2 Pro is safe to download from Google Play Store or Uptodown website. However, you should always check the permissions and reviews before installing any app on your phone. Q: How can I turn off Flash Alert 2 Pro? A: You can turn off Flash Alert 2 Pro by tapping on the switch button at the top right corner of the app. You can also disable flash alerts for specific apps by tapping on the app name in the list and sliding the bar to zero. Q: Can I use Flash Alert 2 Pro as a flashlight? A: Yes, you can use Flash Alert 2 Pro as a flashlight by shaking your phone or pressing the power button three times. You can also change the flashlight settings in the app. Q: What are some alternatives to Flash Alert 2 Pro? A: Some alternatives to Flash Alert 2 Pro are Flash Alerts on Call and SMS, LED Blinker Notifications Lite, and Color Flash Launcher. These apps also offer similar features as Flash Alert 2 Pro.

          401be4b1e0
          -
          -
          \ No newline at end of file diff --git a/spaces/fffiloni/Video-Matting-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h b/spaces/fffiloni/Video-Matting-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h deleted file mode 100644 index b2b88e8c46f19b6db0933163e57ccdb51180f517..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/Video-Matting-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h +++ /dev/null @@ -1,35 +0,0 @@ -/*! -************************************************************************************************** -* Deformable DETR -* Copyright (c) 2020 SenseTime. All Rights Reserved. -* Licensed under the Apache License, Version 2.0 [see LICENSE for details] -************************************************************************************************** -* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -************************************************************************************************** -*/ - -#pragma once -#include - -namespace groundingdino { - -at::Tensor -ms_deform_attn_cpu_forward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const int im2col_step); - -std::vector -ms_deform_attn_cpu_backward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const at::Tensor &grad_output, - const int im2col_step); - -} // namespace groundingdino diff --git a/spaces/fffiloni/Video-Matting-Anything/segment-anything/segment_anything/__init__.py b/spaces/fffiloni/Video-Matting-Anything/segment-anything/segment_anything/__init__.py deleted file mode 100644 index 34383d83f5e76bc801f31b20e5651e383be348b6..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/Video-Matting-Anything/segment-anything/segment_anything/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from .build_sam import ( - build_sam, - build_sam_vit_h, - build_sam_vit_l, - build_sam_vit_b, - sam_model_registry, -) -from .predictor import SamPredictor -from .automatic_mask_generator import SamAutomaticMaskGenerator diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/express/lib/middleware/query.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/express/lib/middleware/query.js deleted file mode 100644 index 7e9166947aff3be10f7ffec3771c92581ce211da..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/express/lib/middleware/query.js +++ /dev/null @@ -1,47 +0,0 @@ -/*! - * express - * Copyright(c) 2009-2013 TJ Holowaychuk - * Copyright(c) 2013 Roman Shtylman - * Copyright(c) 2014-2015 Douglas Christopher Wilson - * MIT Licensed - */ - -'use strict'; - -/** - * Module dependencies. - */ - -var merge = require('utils-merge') -var parseUrl = require('parseurl'); -var qs = require('qs'); - -/** - * @param {Object} options - * @return {Function} - * @api public - */ - -module.exports = function query(options) { - var opts = merge({}, options) - var queryparse = qs.parse; - - if (typeof options === 'function') { - queryparse = options; - opts = undefined; - } - - if (opts !== undefined && opts.allowPrototypes === undefined) { - // back-compat for qs module - opts.allowPrototypes = true; - } - - return function query(req, res, next){ - if (!req.query) { - var val = parseUrl(req).query; - req.query = queryparse(val, opts); - } - - next(); - }; -}; diff --git a/spaces/flax-community/koclip/executables/embed_images.py b/spaces/flax-community/koclip/executables/embed_images.py deleted file mode 100644 index e17fda4fe0085f3938de253b0d56a39bd02500fd..0000000000000000000000000000000000000000 --- a/spaces/flax-community/koclip/executables/embed_images.py +++ /dev/null @@ -1,57 +0,0 @@ -import argparse -import csv -import os - -import jax.numpy as jnp -from jax import jit -from PIL import Image -from tqdm import tqdm - -from config import MODEL_LIST -from utils import load_model - - -def main(args): - root = args.image_path - files = list(os.listdir(root)) - for f in files: - assert f[-4:] == ".jpg" - for model_name in MODEL_LIST: - model, processor = load_model(f"koclip/{model_name}") - with tqdm(total=len(files)) as pbar: - for counter in range(0, len(files), args.batch_size): - images = [] - image_ids = [] - for idx in range(counter, min(len(files), counter + args.batch_size)): - file_ = files[idx] - image = Image.open(os.path.join(root, file_)).convert("RGB") - images.append(image) - image_ids.append(file_) - - pbar.update(args.batch_size) - try: - inputs = processor( - text=[""], images=images, return_tensors="jax", padding=True - ) - except: - print(image_ids) - break - inputs["pixel_values"] = jnp.transpose( - inputs["pixel_values"], axes=[0, 2, 3, 1] - ) - features = model(**inputs).image_embeds - with open(os.path.join(args.out_path, f"{model_name}.tsv"), "a+") as f: - writer = csv.writer(f, delimiter="\t") - for image_id, feature in zip(image_ids, features): - writer.writerow( - [image_id, ",".join(map(lambda x: str(x), feature))] - ) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--batch_size", default=16) - parser.add_argument("--image_path", default="images") - parser.add_argument("--out_path", default="features") - args = parser.parse_args() - main(args) diff --git a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/register.py b/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/register.py deleted file mode 100644 index 5c7fff1a53a8318f085a7b55439f2d34411817cd..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/register.py +++ /dev/null @@ -1,25 +0,0 @@ -from gym.envs.registration import register as gym_register - -env_list = [] - -def register( - id, - entry_point, - reward_threshold=0.95, - kwargs={} -): - assert id.startswith("MiniGrid-") or id.startswith("SocialAI-") - assert id not in env_list - - # print("Registered:", id) - - # Register the environment with OpenAI gym - gym_register( - id=id, - entry_point=entry_point, - reward_threshold=reward_threshold, - kwargs=kwargs - ) - - # Add the environment to the set - env_list.append(id) diff --git "a/spaces/frncscp/Patacotron/Patacotr\303\263n\342\204\242.py" "b/spaces/frncscp/Patacotron/Patacotr\303\263n\342\204\242.py" deleted file mode 100644 index 0eaf7e23464f134117461fcb14256e7027e008f2..0000000000000000000000000000000000000000 --- "a/spaces/frncscp/Patacotron/Patacotr\303\263n\342\204\242.py" +++ /dev/null @@ -1,49 +0,0 @@ -import streamlit as st - -st.set_page_config( - page_title = 'Patacotrón', - layout= 'centered', - initial_sidebar_state = 'collapsed', - menu_items = { - "About" : 'Proyecto ideado para la investigación de "Clasificación de imágenes de una sola clase con algortimos de Inteligencia Artificial".', - "Report a Bug" : 'https://docs.google.com/forms/d/e/1FAIpQLScH0ZxAV8aSqs7TPYi86u0nkxvQG3iuHCStWNB-BoQnSW2V0g/viewform?usp=sf_link' - }, - page_icon = "https://e.snmc.io/i/600/w/8ab031c2051e37bb60969dacfba220be/9961728" -) -link = '[Reportar un Bug](https://docs.google.com/forms/d/e/1FAIpQLScH0ZxAV8aSqs7TPYi86u0nkxvQG3iuHCStWNB-BoQnSW2V0g/viewform?usp=sf_link)' - -st.title("Patacotrón™") - - -with st.sidebar: - st.write("contact@patacotron.tech") - - -st.markdown( - f""" - ### ¿Qué es? - Patacotrón es un proyecto desarrollado simultáneamente con una investigación sobre **Clasificación de imágenes de una sola clase con algoritmos de Inteligencia - Artificial**. - - ### ¿Para qué sirve? - Su función es clasificar patacones como clase positiva, y objetos anómalos como clase - negativa, esta página permite usar diferentes modelos (combinándolos para obtener el promedio - de varias predicciones) y reportar errores, además de entender cómo se clasificaron según - su eficacia. - - ### ¿Cómo se usa? - En la **esquina superior izquierda** hay un botón para desplegar el menú de opciones. -""" -) - - -d, e, f = st.columns(3) - -with d: - pass - -with e: - st.image('patacones.gif') - -with f: - pass \ No newline at end of file diff --git a/spaces/g4f/freegpt-webui/client/html/index.html b/spaces/g4f/freegpt-webui/client/html/index.html deleted file mode 100644 index 687860682a6588303ee7d7b69876b31db5297ccf..0000000000000000000000000000000000000000 --- a/spaces/g4f/freegpt-webui/client/html/index.html +++ /dev/null @@ -1,119 +0,0 @@ - - - - - - - - - - - - - - - - - - FreeGPT - - - -
          - -
          -
          - -
          -
          -
          -
          - -
          - -
          -
          -
          -
          -
          -
          -
          - -
          -
          - -
          -
          -
          - - - Web Access -
          -
          -
          -
          -
          -
          - -
          - - - - - - - - - - - diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/apis/test.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/apis/test.py deleted file mode 100644 index e574eb7da04f09a59cf99ff953c36468ae87a326..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/apis/test.py +++ /dev/null @@ -1,238 +0,0 @@ -import os.path as osp -import pickle -import shutil -import tempfile - -import annotator.uniformer.mmcv as mmcv -import numpy as np -import torch -import torch.distributed as dist -from annotator.uniformer.mmcv.image import tensor2imgs -from annotator.uniformer.mmcv.runner import get_dist_info - - -def np2tmp(array, temp_file_name=None): - """Save ndarray to local numpy file. - - Args: - array (ndarray): Ndarray to save. - temp_file_name (str): Numpy file name. If 'temp_file_name=None', this - function will generate a file name with tempfile.NamedTemporaryFile - to save ndarray. Default: None. - - Returns: - str: The numpy file name. - """ - - if temp_file_name is None: - temp_file_name = tempfile.NamedTemporaryFile( - suffix='.npy', delete=False).name - np.save(temp_file_name, array) - return temp_file_name - - -def single_gpu_test(model, - data_loader, - show=False, - out_dir=None, - efficient_test=False, - opacity=0.5): - """Test with single GPU. - - Args: - model (nn.Module): Model to be tested. - data_loader (utils.data.Dataloader): Pytorch data loader. - show (bool): Whether show results during inference. Default: False. - out_dir (str, optional): If specified, the results will be dumped into - the directory to save output results. - efficient_test (bool): Whether save the results as local numpy files to - save CPU memory during evaluation. Default: False. - opacity(float): Opacity of painted segmentation map. - Default 0.5. - Must be in (0, 1] range. - Returns: - list: The prediction results. - """ - - model.eval() - results = [] - dataset = data_loader.dataset - prog_bar = mmcv.ProgressBar(len(dataset)) - for i, data in enumerate(data_loader): - with torch.no_grad(): - result = model(return_loss=False, **data) - - if show or out_dir: - img_tensor = data['img'][0] - img_metas = data['img_metas'][0].data[0] - imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg']) - assert len(imgs) == len(img_metas) - - for img, img_meta in zip(imgs, img_metas): - h, w, _ = img_meta['img_shape'] - img_show = img[:h, :w, :] - - ori_h, ori_w = img_meta['ori_shape'][:-1] - img_show = mmcv.imresize(img_show, (ori_w, ori_h)) - - if out_dir: - out_file = osp.join(out_dir, img_meta['ori_filename']) - else: - out_file = None - - model.module.show_result( - img_show, - result, - palette=dataset.PALETTE, - show=show, - out_file=out_file, - opacity=opacity) - - if isinstance(result, list): - if efficient_test: - result = [np2tmp(_) for _ in result] - results.extend(result) - else: - if efficient_test: - result = np2tmp(result) - results.append(result) - - batch_size = len(result) - for _ in range(batch_size): - prog_bar.update() - return results - - -def multi_gpu_test(model, - data_loader, - tmpdir=None, - gpu_collect=False, - efficient_test=False): - """Test model with multiple gpus. - - This method tests model with multiple gpus and collects the results - under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' - it encodes results to gpu tensors and use gpu communication for results - collection. On cpu mode it saves the results on different gpus to 'tmpdir' - and collects them by the rank 0 worker. - - Args: - model (nn.Module): Model to be tested. - data_loader (utils.data.Dataloader): Pytorch data loader. - tmpdir (str): Path of directory to save the temporary results from - different gpus under cpu mode. - gpu_collect (bool): Option to use either gpu or cpu to collect results. - efficient_test (bool): Whether save the results as local numpy files to - save CPU memory during evaluation. Default: False. - - Returns: - list: The prediction results. - """ - - model.eval() - results = [] - dataset = data_loader.dataset - rank, world_size = get_dist_info() - if rank == 0: - prog_bar = mmcv.ProgressBar(len(dataset)) - for i, data in enumerate(data_loader): - with torch.no_grad(): - result = model(return_loss=False, rescale=True, **data) - - if isinstance(result, list): - if efficient_test: - result = [np2tmp(_) for _ in result] - results.extend(result) - else: - if efficient_test: - result = np2tmp(result) - results.append(result) - - if rank == 0: - batch_size = data['img'][0].size(0) - for _ in range(batch_size * world_size): - prog_bar.update() - - # collect results from all ranks - if gpu_collect: - results = collect_results_gpu(results, len(dataset)) - else: - results = collect_results_cpu(results, len(dataset), tmpdir) - return results - - -def collect_results_cpu(result_part, size, tmpdir=None): - """Collect results with CPU.""" - rank, world_size = get_dist_info() - # create a tmp dir if it is not specified - if tmpdir is None: - MAX_LEN = 512 - # 32 is whitespace - dir_tensor = torch.full((MAX_LEN, ), - 32, - dtype=torch.uint8, - device='cuda') - if rank == 0: - tmpdir = tempfile.mkdtemp() - tmpdir = torch.tensor( - bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') - dir_tensor[:len(tmpdir)] = tmpdir - dist.broadcast(dir_tensor, 0) - tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() - else: - mmcv.mkdir_or_exist(tmpdir) - # dump the part result to the dir - mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank))) - dist.barrier() - # collect all parts - if rank != 0: - return None - else: - # load results of all parts from tmp dir - part_list = [] - for i in range(world_size): - part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i)) - part_list.append(mmcv.load(part_file)) - # sort the results - ordered_results = [] - for res in zip(*part_list): - ordered_results.extend(list(res)) - # the dataloader may pad some samples - ordered_results = ordered_results[:size] - # remove tmp dir - shutil.rmtree(tmpdir) - return ordered_results - - -def collect_results_gpu(result_part, size): - """Collect results with GPU.""" - rank, world_size = get_dist_info() - # dump result part to tensor with pickle - part_tensor = torch.tensor( - bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') - # gather all result part tensor shape - shape_tensor = torch.tensor(part_tensor.shape, device='cuda') - shape_list = [shape_tensor.clone() for _ in range(world_size)] - dist.all_gather(shape_list, shape_tensor) - # padding result part tensor to max length - shape_max = torch.tensor(shape_list).max() - part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') - part_send[:shape_tensor[0]] = part_tensor - part_recv_list = [ - part_tensor.new_zeros(shape_max) for _ in range(world_size) - ] - # gather all result part - dist.all_gather(part_recv_list, part_send) - - if rank == 0: - part_list = [] - for recv, shape in zip(part_recv_list, shape_list): - part_list.append( - pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())) - # sort the results - ordered_results = [] - for res in zip(*part_list): - ordered_results.extend(list(res)) - # the dataloader may pad some samples - ordered_results = ordered_results[:size] - return ordered_results diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Fotonovela Del Mono Mario Descargar [HOT].md b/spaces/gotiQspiryo/whisper-ui/examples/Fotonovela Del Mono Mario Descargar [HOT].md deleted file mode 100644 index 44a480629f0a5b23f6866c06e72dd295d9f7c6a2..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Fotonovela Del Mono Mario Descargar [HOT].md +++ /dev/null @@ -1,7 +0,0 @@ -
          -

          Les presentamos una nueva Fotonovela que sin duda nos llevaran por todo Azarcon. Esta Fotonovela comenzará a librestrecer el prsentador, y será super esta creación en 3D por nosotros en Comicsporno. Sera la m ultima Fotonovela en esta preciosa prela en la nuestra voz de: El hombre del cuento- Gimbel.- El parador de los soles.- Habitaciones de muerte. - Mar de Amor.- Marisimulo.- Mi vida hasta aqui.- Motos y Moradas.- Olayada.- Otra Nueva y Feliz.- Raymos de sol.- Tropezando lentes.- Y el Especial de Casa en unos das noches, con modelo, seguramente el mejor que he hecho en este blog -como decimos alla - de Fotonovelas xxx de incesto real los cuales podran disfrutar a todo color en distintos formatos en nuestra Comicsporno.

          -

          fotonovela del mono mario descargar


          Download File >>> https://urlgoal.com/2uyNyl



          -

          El área de votaciones ha terminado, esperamos poder darlo a conocer mañana, y gracias por todo amigo y compañero de todas las versiones de linux.- Fotonovela: D&D, Pit y Pat on TSR-145.- Lucas Nino: El sexo es un asunto de mejoras.- RCTV: Quieren entender al mundo por el mundo.- Leonel Pardo: Hacer a las pasiones de los fanáticos su imperio.- Agustin Piedra: El Plan Cyclone y los ladrones.- Quirino E. Barreto: El cinematografo en el colmado.- Igor Barreto: Voz profesional de acerasia.- Hugo Vera: El ganador de los toros gallegos.- Moreno Barreto: La tarde en que se rompe el cristal de la naturaleza.

          -

          Fotonovela del mono el cual nombre se deduce de un ciclo de claro de luna y el cual pertenece a teoriamona de teorias de cine y parece ser que está presente en todas las telas y para todas las narrativas e esta funciona como una fuerte cruz del sonido de la narrativa con la imagen de la imagen. La interpretacio se usa para describir

          899543212b
          -
          -
          \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Free Download Cyber Law In India By Farooq Ahmad Pdf Readerl Discover the Latest Trends and Developments in Cybersecurity and Cybercrime.md b/spaces/gotiQspiryo/whisper-ui/examples/Free Download Cyber Law In India By Farooq Ahmad Pdf Readerl Discover the Latest Trends and Developments in Cybersecurity and Cybercrime.md deleted file mode 100644 index 8c9c7016cc2bd45c3f1e906a24befa6206fdf53f..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Free Download Cyber Law In India By Farooq Ahmad Pdf Readerl Discover the Latest Trends and Developments in Cybersecurity and Cybercrime.md +++ /dev/null @@ -1,6 +0,0 @@ -

          HACK Wondershare Dr.Fone Toolkit For Iso Pc Mac 10.8.9.86 FULL Crackl


          Download File ✸✸✸ https://urlgoal.com/2uyMfY



          - - aaccfb2cb3
          -
          -
          -

          diff --git a/spaces/gradio/HuBERT/examples/simultaneous_translation/README.md b/spaces/gradio/HuBERT/examples/simultaneous_translation/README.md deleted file mode 100644 index 62a005e0ec6f15af9015d335e34b45df6ed89b6c..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/simultaneous_translation/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Simultaneous Translation -Examples of simultaneous translation in fairseq -- [English-to-Japanese text-to-text wait-k model](docs/enja-waitk.md) -- [English-to-Germen text-to-text monotonic multihead attention model](docs/ende-mma.md) -- [English-to-Germen speech-to-text simultaneous translation model](../speech_to_text/docs/simulst_mustc_example.md) diff --git a/spaces/gradio/HuBERT/fairseq/tasks/hubert_pretraining.py b/spaces/gradio/HuBERT/fairseq/tasks/hubert_pretraining.py deleted file mode 100644 index a63f2f6ef8ff4953e6ca424bc4413fe55695b273..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/tasks/hubert_pretraining.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright (c) 2017-present, Facebook, Inc. -# All rights reserved. -# -# This source code is licensed under the license found in the LICENSE file in -# the root directory of this source tree. An additional grant of patent rights -# can be found in the PATENTS file in the same directory. - -import logging -import os -import sys -from typing import Dict, List, Optional, Tuple - -import numpy as np - -from dataclasses import dataclass, field -from fairseq.data import Dictionary, HubertDataset -from fairseq.dataclass.configs import FairseqDataclass -from fairseq.tasks import register_task -from fairseq.tasks.fairseq_task import FairseqTask -from omegaconf import MISSING - -logger = logging.getLogger(__name__) - - -class LabelEncoder(object): - def __init__(self, dictionary: Dictionary) -> None: - self.dictionary = dictionary - - def __call__(self, label: str) -> List[str]: - return self.dictionary.encode_line( - label, append_eos=False, add_if_not_exist=False, - ) - - -@dataclass -class HubertPretrainingConfig(FairseqDataclass): - data: str = field( - default=MISSING, metadata={"help": "path to data directory"} - ) - fine_tuning: bool = field( - default=False, metadata={"help": "set to true if fine-tuning Hubert"} - ) - labels: List[str] = field( - default_factory=lambda: ["ltr"], - metadata={ - "help": ( - "extension of the label files to load, frame-level labels for" - " pre-training, and sequence-level label for fine-tuning" - ) - }, - ) - label_dir: Optional[str] = field( - default=None, - metadata={ - "help": "if set, looks for labels in this directory instead", - }, - ) - label_rate: int = field( - default=-1, - metadata={"help": "label frame rate. -1 for sequence label"}, - ) - sample_rate: int = field( - default=16_000, - metadata={ - "help": "target sample rate. audio files will be up/down " - "sampled to this rate" - }, - ) - normalize: bool = field( - default=False, - metadata={ - "help": "if set, normalizes input to have 0 mean and unit variance" - }, - ) - enable_padding: bool = field( - default=False, - metadata={"help": "pad shorter samples instead of cropping"}, - ) - max_sample_size: Optional[int] = field( - default=None, - metadata={"help": "max sample size to crop to for batching"}, - ) - min_sample_size: Optional[int] = field( - default=None, - metadata={"help": "min sample size to crop to for batching"}, - ) - single_target: Optional[bool] = field( - default=False, - metadata={ - "help": "if set, AddTargetDatasets outputs same keys " - "as AddTargetDataset" - }, - ) - random_crop: Optional[bool] = field( - default=True, - metadata={"help": "always crop from the beginning if false"}, - ) - pad_audio: Optional[bool] = field( - default=False, - metadata={"help": "pad audio to the longest one in the batch if true"}, - ) - - -@register_task("hubert_pretraining", dataclass=HubertPretrainingConfig) -class HubertPretrainingTask(FairseqTask): - - cfg: HubertPretrainingConfig - - def __init__( - self, - cfg: HubertPretrainingConfig, - ) -> None: - super().__init__(cfg) - - logger.info(f"current directory is {os.getcwd()}") - logger.info(f"HubertPretrainingTask Config {cfg}") - - self.cfg = cfg - self.fine_tuning = cfg.fine_tuning - - if cfg.fine_tuning: - self.state.add_factory("target_dictionary", lambda: self.load_dictionaries) - else: - self.state.add_factory("dictionaries", lambda: self.load_dictionaries) - - self._source_dictionary = None - - self.blank_symbol = "" - - @property - def source_dictionary(self) -> Optional[Dictionary]: - return self._source_dictionary - - @property - def target_dictionary(self) -> Optional[Dictionary]: - return self.state.target_dictionary - - @property - def dictionaries(self) -> List[Dictionary]: - return self.state.dictionaries - - @classmethod - def setup_task( - cls, cfg: HubertPretrainingConfig, **kwargs - ) -> "HubertPretrainingTask": - return cls(cfg) - - def load_dictionaries(self): - label_dir = self.cfg.data if self.cfg.label_dir is None else self.cfg.label_dir - dictionaries = [Dictionary.load(f"{label_dir}/dict.{label}.txt") for label in self.cfg.labels] - return dictionaries[0] if self.cfg.fine_tuning else dictionaries - - def get_label_dir(self) -> str: - if self.cfg.label_dir is None: - return self.cfg.data - return self.cfg.label_dir - - def load_dataset(self, split: str, **kwargs) -> None: - manifest = f"{self.cfg.data}/{split}.tsv" - dicts = [self.target_dictionary] if self.cfg.fine_tuning else self.dictionaries - pad_list = [dict.pad() for dict in dicts] - eos_list = [dict.eos() for dict in dicts] - procs = [LabelEncoder(dict) for dict in dicts] - paths = [ - f"{self.get_label_dir()}/{split}.{l}" for l in self.cfg.labels - ] - - # hubert v1: pad_audio=True, random_crop=False; - self.datasets[split] = HubertDataset( - manifest, - sample_rate=self.cfg.sample_rate, - label_paths=paths, - label_rates=self.cfg.label_rate, - pad_list=pad_list, - eos_list=eos_list, - label_processors=procs, - max_keep_sample_size=None, - min_keep_sample_size=self.cfg.min_sample_size, - max_sample_size=self.cfg.max_sample_size, - pad_audio=self.cfg.pad_audio, - normalize=self.cfg.normalize, - store_labels=False, - random_crop=self.cfg.random_crop, - single_target=self.cfg.single_target, - ) - - def max_positions(self) -> Tuple[int, int]: - return (sys.maxsize, sys.maxsize) - - def filter_indices_by_size( - self, indices: np.array, *args, **kwargs - ) -> np.array: - return indices diff --git a/spaces/gyugnsu/DragGan-Inversion/PTI/models/StyleCLIP/models/stylegan2/op/upfirdn2d.py b/spaces/gyugnsu/DragGan-Inversion/PTI/models/StyleCLIP/models/stylegan2/op/upfirdn2d.py deleted file mode 100644 index 02fc25af780868d9b883631eb6b03a25c225d745..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/PTI/models/StyleCLIP/models/stylegan2/op/upfirdn2d.py +++ /dev/null @@ -1,60 +0,0 @@ -import os - -import torch -from torch.nn import functional as F - - -module_path = os.path.dirname(__file__) - - - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - out = upfirdn2d_native( - input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1] - ) - - return out - - -def upfirdn2d_native( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 -): - _, channel, in_h, in_w = input.shape - input = input.reshape(-1, in_h, in_w, 1) - - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad( - out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] - ) - out = out[ - :, - max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), - :, - ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape( - [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] - ) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - out = out[:, ::down_y, ::down_x, :] - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - - return out.view(-1, channel, out_h, out_w) \ No newline at end of file diff --git a/spaces/gyugnsu/DragGan-Inversion/stylegan_human/torch_utils/models.py b/spaces/gyugnsu/DragGan-Inversion/stylegan_human/torch_utils/models.py deleted file mode 100644 index 936e16ad992fce3faf868d974274b5cd7c6a6be9..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/stylegan_human/torch_utils/models.py +++ /dev/null @@ -1,770 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -# https://github.com/rosinality/stylegan2-pytorch/blob/master/model.py - -import math -import random -import functools -import operator - -import torch -from torch import nn -from torch.nn import functional as F -import torch.nn.init as init -from torch.autograd import Function - -from .op_edit import FusedLeakyReLU, fused_leaky_relu, upfirdn2d - - -class PixelNorm(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, input): - return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8) - - -def make_kernel(k): - k = torch.tensor(k, dtype=torch.float32) - if k.ndim == 1: - k = k[None, :] * k[:, None] - k /= k.sum() - return k - - -class Upsample(nn.Module): - def __init__(self, kernel, factor=2): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) * (factor ** 2) - self.register_buffer("kernel", kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 - - self.pad = (pad0, pad1) - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=self.factor, - down=1, pad=self.pad) - return out - - -class Downsample(nn.Module): - def __init__(self, kernel, factor=2): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) - self.register_buffer("kernel", kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 - pad1 = p // 2 - - self.pad = (pad0, pad1) - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=1, - down=self.factor, pad=self.pad) - return out - - -class Blur(nn.Module): - def __init__(self, kernel, pad, upsample_factor=1): - super().__init__() - - kernel = make_kernel(kernel) - - if upsample_factor > 1: - kernel = kernel * (upsample_factor ** 2) - - self.register_buffer("kernel", kernel) - - self.pad = pad - - def forward(self, input): - out = upfirdn2d(input, self.kernel, pad=self.pad) - return out - - -class EqualConv2d(nn.Module): - def __init__( - self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True - ): - super().__init__() - - self.weight = nn.Parameter( - torch.randn(out_channel, in_channel, kernel_size, kernel_size) - ) - self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) - - self.stride = stride - self.padding = padding - - if bias: - self.bias = nn.Parameter(torch.zeros(out_channel)) - - else: - self.bias = None - - def forward(self, input): - out = F.conv2d( - input, - self.weight * self.scale, - bias=self.bias, - stride=self.stride, - padding=self.padding, - ) - return out - - def __repr__(self): - return ( - f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}," - f" {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})" - ) - - -class EqualLinear(nn.Module): - def __init__( - self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None - ): - super().__init__() - - self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) - - if bias: - self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) - else: - self.bias = None - - self.activation = activation - - self.scale = (1 / math.sqrt(in_dim)) * lr_mul - self.lr_mul = lr_mul - - def forward(self, input): - if self.activation: - out = F.linear(input, self.weight * self.scale) - out = fused_leaky_relu(out, self.bias * self.lr_mul) - else: - out = F.linear( - input, self.weight * self.scale, bias=self.bias * self.lr_mul - ) - return out - - def __repr__(self): - return ( - f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})" - ) - - -class ScaledLeakyReLU(nn.Module): - def __init__(self, negative_slope=0.2): - super().__init__() - self.negative_slope = negative_slope - - def forward(self, input): - out = F.leaky_relu(input, negative_slope=self.negative_slope) - return out * math.sqrt(2) - - -class ModulatedConv2d(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - style_dim, - demodulate=True, - upsample=False, - downsample=False, - blur_kernel=[1, 3, 3, 1], - ): - super().__init__() - - self.eps = 1e-8 - self.kernel_size = kernel_size - self.in_channel = in_channel - self.out_channel = out_channel - self.upsample = upsample - self.downsample = downsample - - if upsample: - factor = 2 - p = (len(blur_kernel) - factor) - (kernel_size - 1) - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 + 1 - self.blur = Blur(blur_kernel, pad=( - pad0, pad1), upsample_factor=factor) - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - self.blur = Blur(blur_kernel, pad=(pad0, pad1)) - - fan_in = in_channel * kernel_size ** 2 - self.scale = 1 / math.sqrt(fan_in) - self.padding = kernel_size // 2 - self.weight = nn.Parameter( - torch.randn(1, out_channel, in_channel, kernel_size, kernel_size) - ) - self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) - self.demodulate = demodulate - - def __repr__(self): - return ( - f"{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, " - f"upsample={self.upsample}, downsample={self.downsample})" - ) - - def forward(self, input, style): - batch, in_channel, height, width = input.shape - - style = self.modulation(style).view(batch, 1, in_channel, 1, 1) - weight = self.scale * self.weight * style - - if self.demodulate: - demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8) - weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) - - weight = weight.view( - batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size - ) - - if self.upsample: - input = input.view(1, batch * in_channel, height, width) - weight = weight.view( - batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size - ) - weight = weight.transpose(1, 2).reshape( - batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size - ) - out = F.conv_transpose2d( - input, weight, padding=0, stride=2, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - out = self.blur(out) - - elif self.downsample: - input = self.blur(input) - _, _, height, width = input.shape - input = input.view(1, batch * in_channel, height, width) - out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - - else: - input = input.view(1, batch * in_channel, height, width) - out = F.conv2d(input, weight, padding=self.padding, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - - return out - - -class NoiseInjection(nn.Module): - def __init__(self): - super().__init__() - self.weight = nn.Parameter(torch.zeros(1)) - - def forward(self, image, noise=None): - if noise is None: - batch, _, height, width = image.shape - noise = image.new_empty(batch, 1, height, width).normal_() - return image + self.weight * noise - - -class ConstantInput(nn.Module): - def __init__(self, channel, size=4): - super().__init__() - self.input = nn.Parameter(torch.randn(1, channel, size, size // 2)) - - def forward(self, input): - batch = input.shape[0] - out = self.input.repeat(batch, 1, 1, 1) - return out - - -class StyledConv(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - style_dim, - upsample=False, - blur_kernel=[1, 3, 3, 1], - demodulate=True, - ): - super().__init__() - self.conv = ModulatedConv2d( - in_channel, - out_channel, - kernel_size, - style_dim, - upsample=upsample, - blur_kernel=blur_kernel, - demodulate=demodulate, - ) - self.noise = NoiseInjection() - self.activate = FusedLeakyReLU(out_channel) - - def forward(self, input, style, noise=None): - out = self.conv(input, style) - out = self.noise(out, noise=noise) - out = self.activate(out) - return out - - -class ToRGB(nn.Module): - def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): - super().__init__() - if upsample: - self.upsample = Upsample(blur_kernel) - - self.conv = ModulatedConv2d( - in_channel, 3, 1, style_dim, demodulate=False) - self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) - - def forward(self, input, style, skip=None): - out = self.conv(input, style) - out = out + self.bias - - if skip is not None: - skip = self.upsample(skip) - out = out + skip - - return out - - -class Generator(nn.Module): - def __init__( - self, - size, - style_dim, - n_mlp, - channel_multiplier=1, - blur_kernel=[1, 3, 3, 1], - lr_mlp=0.01, - small=False, - small_isaac=False, - ): - super().__init__() - - self.size = size - - if small and size > 64: - raise ValueError("small only works for sizes <= 64") - - self.style_dim = style_dim - layers = [PixelNorm()] - - for i in range(n_mlp): - layers.append( - EqualLinear( - style_dim, style_dim, lr_mul=lr_mlp, activation="fused_lrelu" - ) - ) - - self.style = nn.Sequential(*layers) - - if small: - self.channels = { - 4: 64 * channel_multiplier, - 8: 64 * channel_multiplier, - 16: 64 * channel_multiplier, - 32: 64 * channel_multiplier, - 64: 64 * channel_multiplier, - } - elif small_isaac: - self.channels = {4: 256, 8: 256, - 16: 256, 32: 256, 64: 128, 128: 128} - else: - self.channels = { - 4: 512, - 8: 512, - 16: 512, - 32: 512, - 64: 256 * channel_multiplier, - 128: 128 * channel_multiplier, - 256: 64 * channel_multiplier, - 512: 32 * channel_multiplier, - 1024: 16 * channel_multiplier, - } - - self.input = ConstantInput(self.channels[4]) - self.conv1 = StyledConv( - self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel - ) - self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False) - - self.log_size = int(math.log(size, 2)) - self.num_layers = (self.log_size - 2) * 2 + 1 - - self.convs = nn.ModuleList() - self.upsamples = nn.ModuleList() - self.to_rgbs = nn.ModuleList() - self.noises = nn.Module() - - in_channel = self.channels[4] - - for layer_idx in range(self.num_layers): - res = (layer_idx + 5) // 2 - shape = [1, 1, 2 ** res, 2 ** res // 2] - self.noises.register_buffer( - "noise_{}".format(layer_idx), torch.randn(*shape) - ) - - for i in range(3, self.log_size + 1): - out_channel = self.channels[2 ** i] - - self.convs.append( - StyledConv( - in_channel, - out_channel, - 3, - style_dim, - upsample=True, - blur_kernel=blur_kernel, - ) - ) - - self.convs.append( - StyledConv( - out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel - ) - ) - - self.to_rgbs.append(ToRGB(out_channel, style_dim)) - in_channel = out_channel - - self.n_latent = self.log_size * 2 - 2 - - def make_noise(self): - device = self.input.input.device - - noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2 // 2, device=device)] - - for i in range(3, self.log_size + 1): - for _ in range(2): - noises.append(torch.randn( - 1, 1, 2 ** i, 2 ** i // 2, device=device)) - - return noises - - def mean_latent(self, n_latent): - latent_in = torch.randn( - n_latent, self.style_dim, device=self.input.input.device - ) - latent = self.style(latent_in).mean(0, keepdim=True) - - return latent - - def get_latent(self, input): - return self.style(input) - - def forward( - self, - styles, - return_latents=False, - return_features=False, - inject_index=None, - truncation=1, - truncation_latent=None, - input_is_latent=False, - noise=None, - randomize_noise=True, - real=False, - ): - if not input_is_latent: - styles = [self.style(s) for s in styles] - if noise is None: - if randomize_noise: - noise = [None] * self.num_layers - else: - noise = [ - getattr(self.noises, "noise_{}".format(i)) - for i in range(self.num_layers) - ] - - if truncation < 1: - # print('truncation_latent: ', truncation_latent.shape) - if not real: # if type(styles) == list: - style_t = [] - for style in styles: - style_t.append( - truncation_latent + truncation * - (style - truncation_latent) - ) # (-1.1162e-03-(-1.0914e-01))*0.8+(-1.0914e-01) - styles = style_t - else: # styles are latent (tensor: 1,18,512), for real PTI output - truncation_latent = truncation_latent.repeat( - 18, 1).unsqueeze(0) # (1,512) --> (1,18,512) - styles = torch.add(truncation_latent, torch.mul( - torch.sub(styles, truncation_latent), truncation)) - # print('now styles after truncation : ', styles) - # if type(styles) == list and len(styles) < 2: # this if for input as list of [(1,512)] - if not real: - if len(styles) < 2: - inject_index = self.n_latent - if styles[0].ndim < 3: - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - else: - latent = styles[0] - elif type(styles) == list: - if inject_index is None: - inject_index = 4 - - latent = styles[0].unsqueeze(0) - if latent.shape[1] == 1: - latent = latent.repeat(1, inject_index, 1) - else: - latent = latent[:, :inject_index, :] - latent2 = styles[1].unsqueeze(1).repeat( - 1, self.n_latent - inject_index, 1) - latent = torch.cat([latent, latent2], 1) - # input is tensor of size with torch.Size([1, 18, 512]), for real PTI output - else: - latent = styles - - # print(f'processed latent: {latent.shape}') - - features = {} - out = self.input(latent) - features["out_0"] = out - out = self.conv1(out, latent[:, 0], noise=noise[0]) - features["conv1_0"] = out - - skip = self.to_rgb1(out, latent[:, 1]) - features["skip_0"] = skip - i = 1 - for conv1, conv2, noise1, noise2, to_rgb in zip( - self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs - ): - out = conv1(out, latent[:, i], noise=noise1) - features["conv1_{}".format(i)] = out - out = conv2(out, latent[:, i + 1], noise=noise2) - features["conv2_{}".format(i)] = out - skip = to_rgb(out, latent[:, i + 2], skip) - features["skip_{}".format(i)] = skip - - i += 2 - - image = skip - - if return_latents: - return image, latent - elif return_features: - return image, features - else: - return image, None - - -class ConvLayer(nn.Sequential): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - downsample=False, - blur_kernel=[1, 3, 3, 1], - bias=True, - activate=True, - ): - layers = [] - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - - layers.append(Blur(blur_kernel, pad=(pad0, pad1))) - - stride = 2 - self.padding = 0 - - else: - stride = 1 - self.padding = kernel_size // 2 - - layers.append( - EqualConv2d( - in_channel, - out_channel, - kernel_size, - padding=self.padding, - stride=stride, - bias=bias and not activate, - ) - ) - - if activate: - if bias: - layers.append(FusedLeakyReLU(out_channel)) - else: - layers.append(ScaledLeakyReLU(0.2)) - - super().__init__(*layers) - - -class ResBlock(nn.Module): - def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - self.conv1 = ConvLayer(in_channel, in_channel, 3) - self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True) - - self.skip = ConvLayer( - in_channel, out_channel, 1, downsample=True, activate=False, bias=False - ) - - def forward(self, input): - out = self.conv1(input) - out = self.conv2(out) - - skip = self.skip(input) - out = (out + skip) / math.sqrt(2) - - return out - - -class StyleDiscriminator(nn.Module): - def __init__( - self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1], small=False - ): - super().__init__() - - if small: - channels = {4: 64, 8: 64, 16: 64, 32: 64, 64: 64} - - else: - channels = { - 4: 512, - 8: 512, - 16: 512, - 32: 512, - 64: 256 * channel_multiplier, - 128: 128 * channel_multiplier, - 256: 64 * channel_multiplier, - 512: 32 * channel_multiplier, - 1024: 16 * channel_multiplier, - } - - convs = [ConvLayer(3, channels[size], 1)] - - log_size = int(math.log(size, 2)) - in_channel = channels[size] - - for i in range(log_size, 2, -1): - out_channel = channels[2 ** (i - 1)] - - convs.append(ResBlock(in_channel, out_channel, blur_kernel)) - - in_channel = out_channel - - self.convs = nn.Sequential(*convs) - - self.stddev_group = 4 - self.stddev_feat = 1 - - self.final_conv = ConvLayer(in_channel + 1, channels[4], 3) - self.final_linear = nn.Sequential( - EqualLinear(channels[4] * 4 * 4, channels[4], - activation="fused_lrelu"), - EqualLinear(channels[4], 1), - ) - - def forward(self, input): - h = input - h_list = [] - - for index, blocklist in enumerate(self.convs): - h = blocklist(h) - h_list.append(h) - - out = h - batch, channel, height, width = out.shape - group = min(batch, self.stddev_group) - stddev = out.view( - group, -1, self.stddev_feat, channel // self.stddev_feat, height, width - ) - stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) - stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2) - stddev = stddev.repeat(group, 1, height, width) - out = torch.cat([out, stddev], 1) - - out = self.final_conv(out) - h_list.append(out) - - out = out.view(batch, -1) - out = self.final_linear(out) - - return out, h_list - - -class StyleEncoder(nn.Module): - def __init__(self, size, w_dim=512): - super().__init__() - - channels = { - 4: 512, - 8: 512, - 16: 512, - 32: 512, - 64: 256, - 128: 128, - 256: 64, - 512: 32, - 1024: 16 - } - - self.w_dim = w_dim - log_size = int(math.log(size, 2)) - convs = [ConvLayer(3, channels[size], 1)] - - in_channel = channels[size] - for i in range(log_size, 2, -1): - out_channel = channels[2 ** (i - 1)] - convs.append(ResBlock(in_channel, out_channel)) - in_channel = out_channel - - convs.append(EqualConv2d( - in_channel, 2*self.w_dim, 4, padding=0, bias=False)) - - self.convs = nn.Sequential(*convs) - - def forward(self, input): - out = self.convs(input) - # return out.view(len(input), self.n_latents, self.w_dim) - reshaped = out.view(len(input), 2*self.w_dim) - return reshaped[:, :self.w_dim], reshaped[:, self.w_dim:] - - -def kaiming_init(m): - if isinstance(m, (nn.Linear, nn.Conv2d)): - init.kaiming_normal_(m.weight) - if m.bias is not None: - m.bias.data.fill_(0) - elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)): - m.weight.data.fill_(1) - if m.bias is not None: - m.bias.data.fill_(0) - - -def normal_init(m): - if isinstance(m, (nn.Linear, nn.Conv2d)): - init.normal_(m.weight, 0, 0.02) - if m.bias is not None: - m.bias.data.fill_(0) - elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)): - m.weight.data.fill_(1) - if m.bias is not None: - m.bias.data.fill_(0) diff --git a/spaces/gyugnsu/DragGan-Inversion/stylegan_human/training/dataset.py b/spaces/gyugnsu/DragGan-Inversion/stylegan_human/training/dataset.py deleted file mode 100644 index f04842155f754b0aac49b91b1de1de6db017a776..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/stylegan_human/training/dataset.py +++ /dev/null @@ -1,252 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Streaming images and labels from datasets created with dataset_tool.py.""" - -import os -import numpy as np -import zipfile -import PIL.Image -import json -import torch -import dnnlib - -try: - import pyspng -except ImportError: - pyspng = None - -# ---------------------------------------------------------------------------- - - -class Dataset(torch.utils.data.Dataset): - def __init__(self, - name, # Name of the dataset. - raw_shape, # Shape of the raw image data (NCHW). - # Artificially limit the size of the dataset. None = no limit. Applied before xflip. - max_size=None, - # Enable conditioning labels? False = label dimension is zero. - use_labels=False, - # Artificially double the size of the dataset via x-flips. Applied after max_size. - xflip=False, - # Random seed to use when applying max_size. - random_seed=0, - ): - self._name = name - self._raw_shape = list(raw_shape) - self._use_labels = use_labels - self._raw_labels = None - self._label_shape = None - - # Apply max_size. - self._raw_idx = np.arange(self._raw_shape[0], dtype=np.int64) - if (max_size is not None) and (self._raw_idx.size > max_size): - np.random.RandomState(random_seed).shuffle(self._raw_idx) - self._raw_idx = np.sort(self._raw_idx[:max_size]) - - # Apply xflip. - self._xflip = np.zeros(self._raw_idx.size, dtype=np.uint8) - if xflip: - self._raw_idx = np.tile(self._raw_idx, 2) - self._xflip = np.concatenate( - [self._xflip, np.ones_like(self._xflip)]) - - def _get_raw_labels(self): - if self._raw_labels is None: - self._raw_labels = self._load_raw_labels() if self._use_labels else None - if self._raw_labels is None: - self._raw_labels = np.zeros( - [self._raw_shape[0], 0], dtype=np.float32) - assert isinstance(self._raw_labels, np.ndarray) - assert self._raw_labels.shape[0] == self._raw_shape[0] - assert self._raw_labels.dtype in [np.float32, np.int64] - if self._raw_labels.dtype == np.int64: - assert self._raw_labels.ndim == 1 - assert np.all(self._raw_labels >= 0) - return self._raw_labels - - def close(self): # to be overridden by subclass - pass - - def _load_raw_image(self, raw_idx): # to be overridden by subclass - raise NotImplementedError - - def _load_raw_labels(self): # to be overridden by subclass - raise NotImplementedError - - def __getstate__(self): - return dict(self.__dict__, _raw_labels=None) - - def __del__(self): - try: - self.close() - except: - pass - - def __len__(self): - return self._raw_idx.size - - def __getitem__(self, idx): - image = self._load_raw_image(self._raw_idx[idx]) - assert isinstance(image, np.ndarray) - assert list(image.shape) == self.image_shape - assert image.dtype == np.uint8 - if self._xflip[idx]: - assert image.ndim == 3 # CHW - image = image[:, :, ::-1] - return image.copy(), self.get_label(idx) - - def get_label(self, idx): - label = self._get_raw_labels()[self._raw_idx[idx]] - if label.dtype == np.int64: - onehot = np.zeros(self.label_shape, dtype=np.float32) - onehot[label] = 1 - label = onehot - return label.copy() - - def get_details(self, idx): - d = dnnlib.EasyDict() - d.raw_idx = int(self._raw_idx[idx]) - d.xflip = (int(self._xflip[idx]) != 0) - d.raw_label = self._get_raw_labels()[d.raw_idx].copy() - return d - - @property - def name(self): - return self._name - - @property - def image_shape(self): - return list(self._raw_shape[1:]) - - @property - def num_channels(self): - assert len(self.image_shape) == 3 # CHW - return self.image_shape[0] - - @property - def resolution(self): - assert len(self.image_shape) == 3 # CHW - assert self.image_shape[1] == self.image_shape[2] - return self.image_shape[1] - - @property - def label_shape(self): - if self._label_shape is None: - raw_labels = self._get_raw_labels() - if raw_labels.dtype == np.int64: - self._label_shape = [int(np.max(raw_labels)) + 1] - else: - self._label_shape = raw_labels.shape[1:] - return list(self._label_shape) - - @property - def label_dim(self): - assert len(self.label_shape) == 1 - return self.label_shape[0] - - @property - def has_labels(self): - return any(x != 0 for x in self.label_shape) - - @property - def has_onehot_labels(self): - return self._get_raw_labels().dtype == np.int64 - -# ---------------------------------------------------------------------------- - - -class ImageFolderDataset(Dataset): - def __init__(self, - path, # Path to directory or zip. - # Ensure specific resolution, None = highest available. - resolution=None, - # Additional arguments for the Dataset base class. - **super_kwargs, - ): - self._path = path - self._zipfile = None - - if os.path.isdir(self._path): - self._type = 'dir' - self._all_fnames = {os.path.relpath(os.path.join( - root, fname), start=self._path) for root, _dirs, files in os.walk(self._path) for fname in files} - elif self._file_ext(self._path) == '.zip': - self._type = 'zip' - self._all_fnames = set(self._get_zipfile().namelist()) - else: - raise IOError('Path must point to a directory or zip') - - PIL.Image.init() - self._image_fnames = sorted( - fname for fname in self._all_fnames if self._file_ext(fname) in PIL.Image.EXTENSION) - if len(self._image_fnames) == 0: - raise IOError('No image files found in the specified path') - - name = os.path.splitext(os.path.basename(self._path))[0] - raw_shape = [len(self._image_fnames)] + \ - list(self._load_raw_image(0).shape) - if resolution is not None and (raw_shape[2] != resolution or raw_shape[3] != resolution): - raise IOError('Image files do not match the specified resolution') - super().__init__(name=name, raw_shape=raw_shape, **super_kwargs) - - @staticmethod - def _file_ext(fname): - return os.path.splitext(fname)[1].lower() - - def _get_zipfile(self): - assert self._type == 'zip' - if self._zipfile is None: - self._zipfile = zipfile.ZipFile(self._path) - return self._zipfile - - def _open_file(self, fname): - if self._type == 'dir': - return open(os.path.join(self._path, fname), 'rb') - if self._type == 'zip': - return self._get_zipfile().open(fname, 'r') - return None - - def close(self): - try: - if self._zipfile is not None: - self._zipfile.close() - finally: - self._zipfile = None - - def __getstate__(self): - return dict(super().__getstate__(), _zipfile=None) - - def _load_raw_image(self, raw_idx): - fname = self._image_fnames[raw_idx] - with self._open_file(fname) as f: - if pyspng is not None and self._file_ext(fname) == '.png': - image = pyspng.load(f.read()) - else: - image = np.array(PIL.Image.open(f)) - if image.ndim == 2: - image = image[:, :, np.newaxis] # HW => HWC - image = image.transpose(2, 0, 1) # HWC => CHW - return image - - def _load_raw_labels(self): - fname = 'dataset.json' - if fname not in self._all_fnames: - return None - with self._open_file(fname) as f: - labels = json.load(f)['labels'] - if labels is None: - return None - labels = dict(labels) - labels = [labels[fname.replace('\\', '/')] - for fname in self._image_fnames] - labels = np.array(labels) - labels = labels.astype({1: np.int64, 2: np.float32}[labels.ndim]) - return labels - -# ---------------------------------------------------------------------------- diff --git a/spaces/h2oai/h2ogpt-chatbot/src/serpapi.py b/spaces/h2oai/h2ogpt-chatbot/src/serpapi.py deleted file mode 100644 index f7ed7f066df2503e5031d59df7f80766b6984ef9..0000000000000000000000000000000000000000 --- a/spaces/h2oai/h2ogpt-chatbot/src/serpapi.py +++ /dev/null @@ -1,167 +0,0 @@ -import functools -import typing - -import aiohttp -from langchain.docstore.document import Document -from langchain import SerpAPIWrapper - -from src.utils_langchain import _chunk_sources, add_parser, _add_meta -from urllib.parse import urlparse - - -class H2OSerpAPIWrapper(SerpAPIWrapper): - def get_search_documents(self, query, - query_action=True, - chunk=True, chunk_size=512, - db_type='chroma', - headsize=50, - top_k_docs=-1): - docs = self.run(query, headsize) - - chunk_sources = functools.partial(_chunk_sources, chunk=chunk, chunk_size=chunk_size, db_type=db_type) - docs = chunk_sources(docs) - - # choose chunk type - if query_action: - docs = [x for x in docs if x.metadata['chunk_id'] >= 0] - else: - docs = [x for x in docs if x.metadata['chunk_id'] == -1] - - # get score assuming search results scale with ranking - delta = 0.05 - [x.metadata.update(score=0.1 + delta * x.metadata['chunk_id'] if x.metadata['chunk_id'] >= 0 else -1) for x in - docs] - - # ensure see all results up to cutoff or mixing with non-web docs - if top_k_docs >= 1: - top_k_docs = max(top_k_docs, len(docs)) - - return docs, top_k_docs - - async def arun(self, query: str, headsize: int, **kwargs: typing.Any) -> list: - """Run query through SerpAPI and parse result async.""" - return self._process_response(await self.aresults(query), query, headsize) - - def run(self, query: str, headsize: int, **kwargs: typing.Any) -> list: - """Run query through SerpAPI and parse result.""" - return self._process_response(self.results(query), query, headsize) - - @staticmethod - def _process_response(res: dict, query: str, headsize: int) -> list: - try: - return H2OSerpAPIWrapper.__process_response(res, query, headsize) - except Exception as e: - print("SERP search failed: %s" % str(e)) - return [] - - @staticmethod - def __process_response(res: dict, query: str, headsize: int) -> list: - docs = [] - - res1 = SerpAPIWrapper._process_response(res) - if res1: - if isinstance(res1, str) and not res1.startswith('['): # avoid snippets - docs += [Document(page_content='Web search result %s: ' % len(docs) + res1, - metadata=dict(source='Web Search %s for %s' % (len(docs), query), score=0.0))] - elif isinstance(res1, list): - for x in res1: - date = '' - content = '' - if 'source' in x: - source = x['source'] - content += '%s says' % source - else: - content = 'Web search result %s: ' % len(docs) - if 'date' in x: - date = x['date'] - content += ' %s' % date - if 'title' in x: - content += ': %s' % x['title'] - if 'snippet' in x: - content += ': %s' % x['snippet'] - if 'link' in x: - link = x['link'] - domain = urlparse(link).netloc - font_size = 2 - source_name = domain - http_content = """%s""" % ( - font_size, link, source_name) - source = 'Web Search %s' % len(docs) + \ - ' from Date: %s Domain: %s Link: %s' % (date, domain, http_content) - if date: - content += ' around %s' % date - content += ' according to %s' % domain - else: - source = 'Web Search %s for %s' % (len(docs), query) - docs += [Document(page_content=content, metadata=dict(source=source, score=0.0))] - - if "knowledge_graph" in res.keys(): - knowledge_graph = res["knowledge_graph"] - title = knowledge_graph["title"] if "title" in knowledge_graph else "" - if "description" in knowledge_graph.keys(): - docs += [Document(page_content='Web search result %s: ' % len(docs) + knowledge_graph["description"], - metadata=dict(source='Web Search %s with knowledge_graph description for %s' % ( - len(docs), query), score=0.0))] - for key, value in knowledge_graph.items(): - if ( - type(key) == str - and type(value) == str - and key not in ["title", "description"] - and not key.endswith("_stick") - and not key.endswith("_link") - and not value.startswith("http") - ): - docs += [Document(page_content='Web search result %s: ' % len(docs) + f"{title} {key}: {value}.", - metadata=dict( - source='Web Search %s with knowledge_graph for %s' % (len(docs), query), - score=0.0))] - if "organic_results" in res.keys(): - for org_res in res["organic_results"]: - keys_to_try = ['snippet', 'snippet_highlighted_words', 'rich_snippet', 'rich_snippet_table', 'link'] - for key in keys_to_try: - if key in org_res.keys(): - date = '' - domain = '' - link = '' - snippet1 = '' - if key != 'link': - snippet1 = org_res[key] - if 'date' in org_res.keys(): - date = org_res['date'] - snippet1 += ' on %s' % date - else: - date = 'unknown date' - if 'link' in org_res.keys(): - link = org_res['link'] - domain = urlparse(link).netloc - if key == 'link': - # worst case, only url might have REST info - snippet1 += ' Link at %s: %s' % (domain, link, domain) - else: - snippet1 += ' according to %s' % domain - if snippet1: - font_size = 2 - source_name = domain - http_content = """%s""" % ( - font_size, link, source_name) - source = 'Web Search %s' % len(docs) + \ - ' from Date: %s Domain: %s Link: %s' % (date, domain, http_content) - domain_simple = domain.replace('www.', '').replace('.com', '') - snippet1 = '%s says on %s: %s' % (domain_simple, date, snippet1) - docs += [Document(page_content=snippet1, metadata=dict(source=source), score=0.0)] - break - if "buying_guide" in res.keys(): - docs += [Document(page_content='Web search result %s: ' % len(docs) + res["buying_guide"], - metadata=dict(source='Web Search %s with buying_guide for %s' % (len(docs), query)), - score=0.0)] - if "local_results" in res.keys() and "places" in res["local_results"].keys(): - docs += [Document(page_content='Web search result %s: ' % len(docs) + res["local_results"]["places"], - metadata=dict( - source='Web Search %s with local_results_places for %s' % (len(docs), query)), - score=0.0)] - - # add meta - add_meta = functools.partial(_add_meta, headsize=headsize, parser='SERPAPI') - add_meta(docs, query) - - return docs diff --git a/spaces/h2oai/wave-tour/examples/plot_bokeh_callbacks.py b/spaces/h2oai/wave-tour/examples/plot_bokeh_callbacks.py deleted file mode 100644 index 492797c70e92ffcaab241996edc11df0ee11fdea..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/plot_bokeh_callbacks.py +++ /dev/null @@ -1,109 +0,0 @@ -# Plot / Bokeh / Widgets -# Embed Bokeh widgets with script callbacks -# --- - -# Original source: https://docs.bokeh.org/en/latest/docs/user_guide/interaction/callbacks.html#customjs-for-selections - -import json -from random import random -from h2o_wave import main, app, Q, ui, data -from bokeh.resources import CDN -from bokeh.layouts import row -from bokeh.models import ColumnDataSource, CustomJS -from bokeh.plotting import figure, output_file, show -from bokeh.embed import json_item - - -@app('/demo') -async def serve(q: Q): - if not q.client.initialized: - q.client.initialized = True - - # Create a plot - x = [random() for x in range(500)] - y = [random() for y in range(500)] - - s1 = ColumnDataSource(data=dict(x=x, y=y)) - p1 = figure(plot_width=250, plot_height=300, tools="lasso_select", title="Select Here") - p1.circle('x', 'y', source=s1, alpha=0.6) - - s2 = ColumnDataSource(data=dict(x=[], y=[])) - p2 = figure(plot_width=250, plot_height=300, x_range=(0, 1), y_range=(0, 1), tools="", title="Watch Here") - p2.circle('x', 'y', source=s2, alpha=0.6) - - s1.selected.js_on_change( - 'indices', - CustomJS( - args=dict(s1=s1, s2=s2), - code=""" - var indices = cb_obj.indices; - var d1 = s1.data; - var d2 = s2.data; - d2['x'] = [] - d2['y'] = [] - for (var i = 0; i < indices.length; i++) { - d2['x'].push(d1['x'][indices[i]]) - d2['y'].push(d1['y'][indices[i]]) - } - s2.change.emit(); - - // Send the selected indices to the Wave app via an event. - // Here, - // - The first argument, 'the_plot', is some name to uniquely identify the source of the event. - // - The second argument, 'selected', is some name to uniquely identify the type of event. - // - The third argument is any arbitrary data to be sent as part of the event. - // Ordinarily, we would just call wave.emit('the_plot', 'selected', indices), but this particular - // example triggers events every time the indices change (which is several times per second), - // so we use a 'debounced' version of 'emit()' that waits for a second before emitting an event. - // Here, 'emit_debounced()' is not part of the Wave API, but custom-built for this example - see - // the inline_script's 'content' below. - emit_debounced('the_plot', 'selected', indices); - // The indices will be accessible to the Wave app using 'q.events.the_plot.selected'. - """ - ) - ) - - layout = row(p1, p2) - - # Serialize the plot as JSON. - # See https://docs.bokeh.org/en/latest/docs/user_guide/embed.html#json-items - plot_id = 'my_plot' - plot_data = json.dumps(json_item(layout, plot_id)) - - q.page['meta'] = ui.meta_card( - box='', - # Import Bokeh Javascript libraries from CDN - scripts=[ui.script(path=f) for f in CDN.js_files], - # Execute custom Javascript - script=ui.inline_script( - # The inline script does two things: - content=f''' - // 1. Create a debounced version of `wave.emit()` and make it accessible to Bokeh's event handler. - // window.emit_debounced() is the name of new, debounced (calmer) version of wave.emit() that waits - // for 1000ms before emitting an event. - window.emit_debounced=window.wave.debounce(1000, window.wave.emit); - - // 2. Make Bokeh render the plot. - Bokeh.embed.embed_item({plot_data}); - ''', - # Ensure that the Bokeh Javascript library is loaded - requires=['Bokeh'], - # Ensure that the target HTML container element is available - targets=[plot_id], - ), - ) - q.page['plot'] = ui.markup_card( - box='1 1 4 4', - title='', - content=f'
          ', - ) - q.page['details'] = ui.markdown_card( - box='1 5 4 1', - title='Selected Marks', - content='Nothing selected.', - ) - else: - if q.events.the_plot.selected: - q.page['details'].content = f'You selected {q.events.the_plot.selected}' - - await q.page.save() diff --git a/spaces/hahahehe99340/chatgpt/run_Windows.bat b/spaces/hahahehe99340/chatgpt/run_Windows.bat deleted file mode 100644 index 4c18f9ccaeea0af972301ffdf48778641221f76d..0000000000000000000000000000000000000000 --- a/spaces/hahahehe99340/chatgpt/run_Windows.bat +++ /dev/null @@ -1,5 +0,0 @@ -@echo off -echo Opening ChuanhuChatGPT... - -REM Open powershell via bat -start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py" diff --git a/spaces/haonanzhang/ChatGPT-BOT/assets/Kelpy-Codos.js b/spaces/haonanzhang/ChatGPT-BOT/assets/Kelpy-Codos.js deleted file mode 100644 index cfbaeedb4f371dfb5fe157db545b364046fca3e1..0000000000000000000000000000000000000000 --- a/spaces/haonanzhang/ChatGPT-BOT/assets/Kelpy-Codos.js +++ /dev/null @@ -1,76 +0,0 @@ -// ==UserScript== -// @name Kelpy Codos -// @namespace https://github.com/Keldos-Li/Kelpy-Codos -// @version 1.0.5 -// @author Keldos; https://keldos.me/ -// @description Add copy button to PRE tags before CODE tag, for Chuanhu ChatGPT especially. -// Based on Chuanhu ChatGPT version: ac04408 (2023-3-22) -// @license GPL-3.0 -// @grant none -// ==/UserScript== - -(function () { - 'use strict'; - - function addCopyButton(pre) { - var code = pre.querySelector('code'); - if (!code) { - return; // 如果没有找到 元素,则不添加按钮 - } - var firstChild = code.firstChild; - if (!firstChild) { - return; // 如果 元素没有子节点,则不添加按钮 - } - var button = document.createElement('button'); - button.textContent = '\uD83D\uDCCE'; // 使用 📎 符号作为“复制”按钮的文本 - button.style.position = 'relative'; - button.style.float = 'right'; - button.style.fontSize = '1em'; // 可选:调整按钮大小 - button.style.background = 'none'; // 可选:去掉背景颜色 - button.style.border = 'none'; // 可选:去掉边框 - button.style.cursor = 'pointer'; // 可选:显示指针样式 - button.addEventListener('click', function () { - var range = document.createRange(); - range.selectNodeContents(code); - range.setStartBefore(firstChild); // 将范围设置为第一个子节点之前 - var selection = window.getSelection(); - selection.removeAllRanges(); - selection.addRange(range); - - try { - var success = document.execCommand('copy'); - if (success) { - button.textContent = '\u2714'; - setTimeout(function () { - button.textContent = '\uD83D\uDCCE'; // 恢复按钮为“复制” - }, 2000); - } else { - button.textContent = '\u2716'; - } - } catch (e) { - console.error(e); - button.textContent = '\u2716'; - } - - selection.removeAllRanges(); - }); - code.insertBefore(button, firstChild); // 将按钮插入到第一个子元素之前 - } - - function handleNewElements(mutationsList, observer) { - for (var mutation of mutationsList) { - if (mutation.type === 'childList') { - for (var node of mutation.addedNodes) { - if (node.nodeName === 'PRE') { - addCopyButton(node); - } - } - } - } - } - - var observer = new MutationObserver(handleNewElements); - observer.observe(document.documentElement, { childList: true, subtree: true }); - - document.querySelectorAll('pre').forEach(addCopyButton); -})(); diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/datasets/evaluation/lvis/lvis.py b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/datasets/evaluation/lvis/lvis.py deleted file mode 100644 index 9cad8004bfbf962d03927f1826f1525b2c93789b..0000000000000000000000000000000000000000 --- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/datasets/evaluation/lvis/lvis.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import json -import os -import time -from collections import defaultdict - -import pycocotools.mask as mask_utils -import torchvision -from PIL import Image - - - -def _isArrayLike(obj): - return hasattr(obj, "__iter__") and hasattr(obj, "__len__") - - -class LVIS: - def __init__(self, annotation_path=None): - """Class for reading and visualizing annotations. - Args: - annotation_path (str): location of annotation file - """ - self.anns = {} - self.cats = {} - self.imgs = {} - self.img_ann_map = defaultdict(list) - self.cat_img_map = defaultdict(list) - self.dataset = {} - - if annotation_path is not None: - print("Loading annotations.") - - tic = time.time() - self.dataset = self._load_json(annotation_path) - print("Done (t={:0.2f}s)".format(time.time() - tic)) - - assert type(self.dataset) == dict, "Annotation file format {} not supported.".format(type(self.dataset)) - self._create_index() - - def _load_json(self, path): - with open(path, "r") as f: - return json.load(f) - - def _create_index(self): - print("Creating index.") - - self.img_ann_map = defaultdict(list) - self.cat_img_map = defaultdict(list) - - self.anns = {} - self.cats = {} - self.imgs = {} - - for ann in self.dataset["annotations"]: - self.img_ann_map[ann["image_id"]].append(ann) - self.anns[ann["id"]] = ann - - for img in self.dataset["images"]: - self.imgs[img["id"]] = img - - for cat in self.dataset["categories"]: - self.cats[cat["id"]] = cat - - for ann in self.dataset["annotations"]: - self.cat_img_map[ann["category_id"]].append(ann["image_id"]) - - print("Index created.") - - def get_ann_ids(self, img_ids=None, cat_ids=None, area_rng=None): - """Get ann ids that satisfy given filter conditions. - Args: - img_ids (int array): get anns for given imgs - cat_ids (int array): get anns for given cats - area_rng (float array): get anns for a given area range. e.g [0, inf] - Returns: - ids (int array): integer array of ann ids - """ - if img_ids is not None: - img_ids = img_ids if _isArrayLike(img_ids) else [img_ids] - if cat_ids is not None: - cat_ids = cat_ids if _isArrayLike(cat_ids) else [cat_ids] - anns = [] - if img_ids is not None: - for img_id in img_ids: - anns.extend(self.img_ann_map[img_id]) - else: - anns = self.dataset["annotations"] - - # return early if no more filtering required - if cat_ids is None and area_rng is None: - return [_ann["id"] for _ann in anns] - - cat_ids = set(cat_ids) - - if area_rng is None: - area_rng = [0, float("inf")] - - ann_ids = [ - _ann["id"] - for _ann in anns - if _ann["category_id"] in cat_ids and _ann["area"] > area_rng[0] and _ann["area"] < area_rng[1] - ] - return ann_ids - - def get_cat_ids(self): - """Get all category ids. - Returns: - ids (int array): integer array of category ids - """ - return list(self.cats.keys()) - - def get_img_ids(self): - """Get all img ids. - Returns: - ids (int array): integer array of image ids - """ - return list(self.imgs.keys()) - - def _load_helper(self, _dict, ids): - if ids is None: - return list(_dict.values()) - elif _isArrayLike(ids): - return [_dict[id] for id in ids] - else: - return [_dict[ids]] - - def load_anns(self, ids=None): - """Load anns with the specified ids. If ids=None load all anns. - Args: - ids (int array): integer array of annotation ids - Returns: - anns (dict array) : loaded annotation objects - """ - return self._load_helper(self.anns, ids) - - def load_cats(self, ids): - """Load categories with the specified ids. If ids=None load all - categories. - Args: - ids (int array): integer array of category ids - Returns: - cats (dict array) : loaded category dicts - """ - return self._load_helper(self.cats, ids) - - def load_imgs(self, ids): - """Load categories with the specified ids. If ids=None load all images. - Args: - ids (int array): integer array of image ids - Returns: - imgs (dict array) : loaded image dicts - """ - return self._load_helper(self.imgs, ids) - - def download(self, save_dir, img_ids=None): - """Download images from mscoco.org server. - Args: - save_dir (str): dir to save downloaded images - img_ids (int array): img ids of images to download - """ - imgs = self.load_imgs(img_ids) - - if not os.path.exists(save_dir): - os.makedirs(save_dir) - - for img in imgs: - file_name = os.path.join(save_dir, img["file_name"]) - if not os.path.exists(file_name): - from urllib.request import urlretrieve - - urlretrieve(img["coco_url"], file_name) - - def ann_to_rle(self, ann): - """Convert annotation which can be polygons, uncompressed RLE to RLE. - Args: - ann (dict) : annotation object - Returns: - ann (rle) - """ - img_data = self.imgs[ann["image_id"]] - h, w = img_data["height"], img_data["width"] - segm = ann["segmentation"] - if isinstance(segm, list): - # polygon -- a single object might consist of multiple parts - # we merge all parts into one mask rle code - rles = mask_utils.frPyObjects(segm, h, w) - rle = mask_utils.merge(rles) - elif isinstance(segm["counts"], list): - # uncompressed RLE - rle = mask_utils.frPyObjects(segm, h, w) - else: - # rle - rle = ann["segmentation"] - return rle - - def ann_to_mask(self, ann): - """Convert annotation which can be polygons, uncompressed RLE, or RLE - to binary mask. - Args: - ann (dict) : annotation object - Returns: - binary mask (numpy 2D array) - """ - rle = self.ann_to_rle(ann) - return mask_utils.decode(rle) - diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/projects/DensePose/doc/GETTING_STARTED.md b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/projects/DensePose/doc/GETTING_STARTED.md deleted file mode 100644 index a6bcbedee42835c99fa5aa1110309329dfbff6f0..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/projects/DensePose/doc/GETTING_STARTED.md +++ /dev/null @@ -1,58 +0,0 @@ -# Getting Started with DensePose - -## Inference with Pre-trained Models - -1. Pick a model and its config file from [Model Zoo](MODEL_ZOO.md), for example [densepose_rcnn_R_50_FPN_s1x.yaml](../configs/densepose_rcnn_R_50_FPN_s1x.yaml) -2. Run the [Apply Net](TOOL_APPLY_NET.md) tool to visualize the results or save the to disk. For example, to use contour visualization for DensePose, one can run: -```bash -python apply_net.py show configs/densepose_rcnn_R_50_FPN_s1x.yaml densepose_rcnn_R_50_FPN_s1x.pkl image.jpg dp_contour,bbox --output image_densepose_contour.png -``` -Please see [Apply Net](TOOL_APPLY_NET.md) for more details on the tool. - -## Training - -First, prepare the [dataset](http://densepose.org/#dataset) into the following structure under the directory you'll run training scripts: -
          -datasets/coco/
          -  annotations/
          -    densepose_{train,minival,valminusminival}2014.json
          -    densepose_minival2014_100.json   (optional, for testing only)
          -  {train,val}2014/
          -    # image files that are mentioned in the corresponding json
          -
          - -To train a model one can use the [train_net.py](../train_net.py) script. -This script was used to train all DensePose models in [Model Zoo](MODEL_ZOO.md). -For example, to launch end-to-end DensePose-RCNN training with ResNet-50 FPN backbone -on 8 GPUs following the s1x schedule, one can run -```bash -python train_net.py --config-file configs/densepose_rcnn_R_50_FPN_s1x.yaml --num-gpus 8 -``` -The configs are made for 8-GPU training. To train on 1 GPU, one can apply the -[linear learning rate scaling rule](https://arxiv.org/abs/1706.02677): -```bash -python train_net.py --config-file configs/densepose_rcnn_R_50_FPN_s1x.yaml \ - SOLVER.IMS_PER_BATCH 2 SOLVER.BASE_LR 0.0025 -``` - -## Evaluation - -Model testing can be done in the same way as training, except for an additional flag `--eval-only` and -model location specification through `MODEL.WEIGHTS model.pth` in the command line -```bash -python train_net.py --config-file configs/densepose_rcnn_R_50_FPN_s1x.yaml \ - --eval-only MODEL.WEIGHTS model.pth -``` - -## Tools - -We provide tools which allow one to: - - easily view DensePose annotated data in a dataset; - - perform DensePose inference on a set of images; - - visualize DensePose model results; - -`query_db` is a tool to print or visualize DensePose data in a dataset. -Please refer to [Query DB](TOOL_QUERY_DB.md) for more details on this tool - -`apply_net` is a tool to print or visualize DensePose results. -Please refer to [Apply Net](TOOL_APPLY_NET.md) for more details on this tool diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/modules/bn.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/modules/bn.py deleted file mode 100644 index a794698867e89140a030d550d832e6fa12561c8b..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/modules/bn.py +++ /dev/null @@ -1,132 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as functional - -try: - from queue import Queue -except ImportError: - from Queue import Queue - -from .functions import * - - -class ABN(nn.Module): - """Activated Batch Normalization - - This gathers a `BatchNorm2d` and an activation function in a single module - """ - - def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01): - """Creates an Activated Batch Normalization module - - Parameters - ---------- - num_features : int - Number of feature channels in the input and output. - eps : float - Small constant to prevent numerical issues. - momentum : float - Momentum factor applied to compute running statistics as. - affine : bool - If `True` apply learned scale and shift transformation after normalization. - activation : str - Name of the activation functions, one of: `leaky_relu`, `elu` or `none`. - slope : float - Negative slope for the `leaky_relu` activation. - """ - super(ABN, self).__init__() - self.num_features = num_features - self.affine = affine - self.eps = eps - self.momentum = momentum - self.activation = activation - self.slope = slope - if self.affine: - self.weight = nn.Parameter(torch.ones(num_features)) - self.bias = nn.Parameter(torch.zeros(num_features)) - else: - self.register_parameter('weight', None) - self.register_parameter('bias', None) - self.register_buffer('running_mean', torch.zeros(num_features)) - self.register_buffer('running_var', torch.ones(num_features)) - self.reset_parameters() - - def reset_parameters(self): - nn.init.constant_(self.running_mean, 0) - nn.init.constant_(self.running_var, 1) - if self.affine: - nn.init.constant_(self.weight, 1) - nn.init.constant_(self.bias, 0) - - def forward(self, x): - x = functional.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias, - self.training, self.momentum, self.eps) - - if self.activation == ACT_RELU: - return functional.relu(x, inplace=True) - elif self.activation == ACT_LEAKY_RELU: - return functional.leaky_relu(x, negative_slope=self.slope, inplace=True) - elif self.activation == ACT_ELU: - return functional.elu(x, inplace=True) - else: - return x - - def __repr__(self): - rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \ - ' affine={affine}, activation={activation}' - if self.activation == "leaky_relu": - rep += ', slope={slope})' - else: - rep += ')' - return rep.format(name=self.__class__.__name__, **self.__dict__) - - -class InPlaceABN(ABN): - """InPlace Activated Batch Normalization""" - - def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01): - """Creates an InPlace Activated Batch Normalization module - - Parameters - ---------- - num_features : int - Number of feature channels in the input and output. - eps : float - Small constant to prevent numerical issues. - momentum : float - Momentum factor applied to compute running statistics as. - affine : bool - If `True` apply learned scale and shift transformation after normalization. - activation : str - Name of the activation functions, one of: `leaky_relu`, `elu` or `none`. - slope : float - Negative slope for the `leaky_relu` activation. - """ - super(InPlaceABN, self).__init__(num_features, eps, momentum, affine, activation, slope) - - def forward(self, x): - x, _, _ = inplace_abn(x, self.weight, self.bias, self.running_mean, self.running_var, - self.training, self.momentum, self.eps, self.activation, self.slope) - return x - - -class InPlaceABNSync(ABN): - """InPlace Activated Batch Normalization with cross-GPU synchronization - This assumes that it will be replicated across GPUs using the same mechanism as in `nn.DistributedDataParallel`. - """ - - def forward(self, x): - x, _, _ = inplace_abn_sync(x, self.weight, self.bias, self.running_mean, self.running_var, - self.training, self.momentum, self.eps, self.activation, self.slope) - return x - - def __repr__(self): - rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \ - ' affine={affine}, activation={activation}' - if self.activation == "leaky_relu": - rep += ', slope={slope})' - else: - rep += ')' - return rep.format(name=self.__class__.__name__, **self.__dict__) - - diff --git a/spaces/huggingchat/chat-ui/src/lib/types/SharedConversation.ts b/spaces/huggingchat/chat-ui/src/lib/types/SharedConversation.ts deleted file mode 100644 index 8571f2c3f3af281791c1b71680960c861d98d121..0000000000000000000000000000000000000000 --- a/spaces/huggingchat/chat-ui/src/lib/types/SharedConversation.ts +++ /dev/null @@ -1,13 +0,0 @@ -import type { Message } from "./Message"; -import type { Timestamps } from "./Timestamps"; - -export interface SharedConversation extends Timestamps { - _id: string; - - hash: string; - - model: string; - title: string; - messages: Message[]; - preprompt?: string; -} diff --git a/spaces/humblepenguin/mental-health-chatbot/about.md b/spaces/humblepenguin/mental-health-chatbot/about.md deleted file mode 100644 index e3d723845fa00982ee2b91f9ae95ea0b8bbc1136..0000000000000000000000000000000000000000 --- a/spaces/humblepenguin/mental-health-chatbot/about.md +++ /dev/null @@ -1,75 +0,0 @@ - -

          - Logo -

          - -
          -

          -A mental health chatbot - -![Python](https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54) -![](https://img.shields.io/badge/flask-%23000.svg?style=for-the-badge&logo=flask&logoColor=white) -![](https://img.shields.io/badge/Tears-B9DBE1?style=for-the-badge&logo=elixir&logoColor=white) - -[![](https://api.codiga.io/project/30574/status/svg)](https://api.codiga.io/project/30574/score/svg) - -

          -
          - - -
          - -

          This is not the final product

          -
          - -# About -> Only a mind free of impediment is capable of grasping the chaotic beauty of the world. This is our greatest asset. -> -> -- Altair Ibn Lahad - -In collaboration with the Vitruvian society, ```STEMx``` has created a mental health chatbot known as ```AOA``` with which students can go and talk about anything with. They can freely discuss their day-to-day general stressors which they usually would not confer with anyone else, and expect to get a decent response from the bot. - -No data of the conversation is stored, user responses are only fed to the bot so it can generate a response; it is all end-to-end encrypted ensuring no one can invade your session. - -In the future the bot will retain the conversations with the user, so it can generate better responses in the next session. - -# Who is it for -The bot is meant to be used for general day to day conversations or the discussion of small day to day issues. Users suffering from severe depression or anxiety are advised to seek proficient help - -# Usage -The chatbot will be made available to the student body by sharing its link on various different offical WhatsApp groups and the bio of the offical instagram pages of both the ```STEMx``` and ```Vitruvian``` club. QR codes will also be placed around campus. - -Other than that in the future the chatbot will be made available in the ```STEMx application``` which is currently under development - -# Behind the scenes -This section covers the technological aspect of the bot. It does not provide a line for line explanation on how the bot works internally but delivers a brief rundown. - -Normally traditonal chatbots were made using conditional statements and could only answer requests structured in a very specific manner - -```python -if input.contains('hi'): - print("Hello") -elif input.contains('name'): - print("My name is AOA... -``` - -But with the rapid development in technology, specifically ```Artifical Intelligence```, doors have been opened to a new domain of creating machines that behave like a normal human being. A way to create a human like machine is to use something known as a "Neural Network". Just like how it sounds, Neural networks are computing systems with connected nodes that work much like neurons in the human brain. Using algorithms, they can recognize hidden patterns and correlations in raw data, cluster and classify it, and – over time – continuously learn and improve - -The inner workings of a neural network are a bit technical but that is needed to be known is that ```AOA``` uses a neural network to provide the profiency in generating human like responses. - -The nerual network has been trained using a large data set of conversations. - -Since the data set is not being updated ```AOA``` cannot learn over time however conversations with the user will be used to continuously improve the bot - -## Responses -90% of the time the bot will provide a well structured response that makes sense, but since its still a computer it still has its limitations so sometimes the user may recieve some weird responses. The user can simply re-start the conversation to reset the bot to its inital state. - -## Tools used to create the project -### Python -Python is a high-level, interpreted, general-purpose programming language. It is a well regarded language when it comes to building any project related to artifical intelligence. Its the language that we used to build the project - -### Flask -Flask is a micro web framework written in Python, it allows us to quickly write simple websites. Flask has been used as the web framework to build the website for the chatbot - -# Issues -Any issues while using the bot are meant to reported to the STEMx society heads \ No newline at end of file diff --git a/spaces/hylee/finetuned_diffusion/app.py b/spaces/hylee/finetuned_diffusion/app.py deleted file mode 100644 index 69531bc382b2a356d5342491a44ecaf773511106..0000000000000000000000000000000000000000 --- a/spaces/hylee/finetuned_diffusion/app.py +++ /dev/null @@ -1,349 +0,0 @@ -from diffusers import AutoencoderKL, UNet2DConditionModel, StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler -import gradio as gr -import torch -from PIL import Image -import utils -import datetime -import time -import psutil -import random - - -start_time = time.time() -is_colab = utils.is_google_colab() -state = None -current_steps = 25 - -class Model: - def __init__(self, name, path="", prefix=""): - self.name = name - self.path = path - self.prefix = prefix - self.pipe_t2i = None - self.pipe_i2i = None - -models = [ - Model("Arcane", "nitrosocke/Arcane-Diffusion", "arcane style "), - Model("Dreamlike Diffusion 1.0", "dreamlike-art/dreamlike-diffusion-1.0", "dreamlikeart "), - Model("Archer", "nitrosocke/archer-diffusion", "archer style "), - Model("Anything V4", "andite/anything-v4.0", ""), - Model("Modern Disney", "nitrosocke/mo-di-diffusion", "modern disney style "), - Model("Classic Disney", "nitrosocke/classic-anim-diffusion", "classic disney style "), - Model("Loving Vincent (Van Gogh)", "dallinmackay/Van-Gogh-diffusion", "lvngvncnt "), - Model("Wavyfusion", "wavymulder/wavyfusion", "wa-vy style "), - Model("Analog Diffusion", "wavymulder/Analog-Diffusion", "analog style "), - Model("Redshift renderer (Cinema4D)", "nitrosocke/redshift-diffusion", "redshift style "), - Model("Midjourney v4 style", "prompthero/midjourney-v4-diffusion", "mdjrny-v4 style "), - Model("Waifu", "hakurei/waifu-diffusion"), - Model("Cyberpunk Anime", "DGSpitzer/Cyberpunk-Anime-Diffusion", "dgs illustration style "), - Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style "), - Model("TrinArt v2", "naclbit/trinart_stable_diffusion_v2"), - Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style "), - Model("Balloon Art", "Fictiverse/Stable_Diffusion_BalloonArt_Model", "BalloonArt "), - Model("Tron Legacy", "dallinmackay/Tron-Legacy-diffusion", "trnlgcy "), - Model("Pokémon", "lambdalabs/sd-pokemon-diffusers"), - Model("Pony Diffusion", "AstraliteHeart/pony-diffusion"), - Model("Robo Diffusion", "nousr/robo-diffusion"), - Model("Epic Diffusion", "johnslegers/epic-diffusion") - ] - -custom_model = None -if is_colab: - models.insert(0, Model("Custom model")) - custom_model = models[0] - -last_mode = "txt2img" -current_model = models[1] if is_colab else models[0] -current_model_path = current_model.path - -if is_colab: - pipe = StableDiffusionPipeline.from_pretrained( - current_model.path, - torch_dtype=torch.float32, - scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"), - safety_checker=lambda images, clip_input: (images, False) - ) - -else: - pipe = StableDiffusionPipeline.from_pretrained( - current_model.path, - torch_dtype=torch.float32, - scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler") - ) - -if torch.cuda.is_available(): - pipe = pipe.to("cuda") - pipe.enable_xformers_memory_efficient_attention() - -device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶" - -def error_str(error, title="Error"): - return f"""#### {title} - {error}""" if error else "" - -def update_state(new_state): - global state - state = new_state - -def update_state_info(old_state): - if state and state != old_state: - return gr.update(value=state) - -def custom_model_changed(path): - models[0].path = path - global current_model - current_model = models[0] - -def on_model_change(model_name): - - prefix = "Enter prompt. \"" + next((m.prefix for m in models if m.name == model_name), None) + "\" is prefixed automatically" if model_name != models[0].name else "Don't forget to use the custom model prefix in the prompt!" - - return gr.update(visible = model_name == models[0].name), gr.update(placeholder=prefix) - -def on_steps_change(steps): - global current_steps - current_steps = steps - -def pipe_callback(step: int, timestep: int, latents: torch.FloatTensor): - update_state(f"{step}/{current_steps} steps")#\nTime left, sec: {timestep/100:.0f}") - -def inference(model_name, prompt, guidance, steps, n_images=1, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""): - - update_state(" ") - - print(psutil.virtual_memory()) # print memory usage - - global current_model - for model in models: - if model.name == model_name: - current_model = model - model_path = current_model.path - - # generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None - if seed == 0: - seed = random.randint(0, 2147483647) - - generator = torch.Generator('cpu').manual_seed(seed) - - try: - if img is not None: - return img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed), f"Done. Seed: {seed}" - else: - return txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed), f"Done. Seed: {seed}" - except Exception as e: - return None, error_str(e) - -def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed): - - print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}") - - global last_mode - global pipe - global current_model_path - if model_path != current_model_path or last_mode != "txt2img": - current_model_path = model_path - - update_state(f"Loading {current_model.name} text-to-image model...") - - if is_colab or current_model == custom_model: - pipe = StableDiffusionPipeline.from_pretrained( - current_model_path, - torch_dtype=torch.float32, - scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"), - safety_checker=lambda images, clip_input: (images, False) - ) - else: - pipe = StableDiffusionPipeline.from_pretrained( - current_model_path, - torch_dtype=torch.float32, - scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler") - ) - # pipe = pipe.to("cpu") - # pipe = current_model.pipe_t2i - - if torch.cuda.is_available(): - pipe = pipe.to("cuda") - pipe.enable_xformers_memory_efficient_attention() - last_mode = "txt2img" - - prompt = current_model.prefix + prompt - result = pipe( - prompt, - negative_prompt = neg_prompt, - num_images_per_prompt=n_images, - num_inference_steps = int(steps), - guidance_scale = guidance, - width = width, - height = height, - generator = generator, - callback=pipe_callback) - - # update_state(f"Done. Seed: {seed}") - - return replace_nsfw_images(result) - -def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed): - - print(f"{datetime.datetime.now()} img_to_img, model: {model_path}") - - global last_mode - global pipe - global current_model_path - if model_path != current_model_path or last_mode != "img2img": - current_model_path = model_path - - update_state(f"Loading {current_model.name} image-to-image model...") - - if is_colab or current_model == custom_model: - pipe = StableDiffusionImg2ImgPipeline.from_pretrained( - current_model_path, - torch_dtype=torch.float32, - scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"), - safety_checker=lambda images, clip_input: (images, False) - ) - else: - pipe = StableDiffusionImg2ImgPipeline.from_pretrained( - current_model_path, - torch_dtype=torch.float32, - scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler") - ) - # pipe = pipe.to("cpu") - # pipe = current_model.pipe_i2i - - if torch.cuda.is_available(): - pipe = pipe.to("cuda") - pipe.enable_xformers_memory_efficient_attention() - last_mode = "img2img" - - prompt = current_model.prefix + prompt - ratio = min(height / img.height, width / img.width) - img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) - result = pipe( - prompt, - negative_prompt = neg_prompt, - num_images_per_prompt=n_images, - image = img, - num_inference_steps = int(steps), - strength = strength, - guidance_scale = guidance, - # width = width, - # height = height, - generator = generator, - callback=pipe_callback) - - # update_state(f"Done. Seed: {seed}") - - return replace_nsfw_images(result) - -def replace_nsfw_images(results): - - if is_colab: - return results.images - - for i in range(len(results.images)): - if results.nsfw_content_detected[i]: - results.images[i] = Image.open("nsfw.png") - return results.images - -# css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem} -# """ -with gr.Blocks(css="style.css") as demo: - gr.HTML( - f""" -
          -
          -

          Finetuned Diffusion

          -
          -

          - Demo for multiple fine-tuned Stable Diffusion models, trained on different styles:
          - Arcane, Archer, Elden Ring, Spider-Verse, Modern Disney, Classic Disney, Loving Vincent (Van Gogh), Redshift renderer (Cinema4D), Midjourney v4 style, Waifu, Pokémon, Pony Diffusion, Robo Diffusion, Cyberpunk Anime, Tron Legacy, Balloon Art + in colab notebook you can load any other Diffusers 🧨 SD model hosted on HuggingFace 🤗. -

          -

          You can skip the queue and load custom models in the colab: Open In Colab

          - Running on {device}{(" in a Google Colab." if is_colab else "")} -

          -

          You can also duplicate this space and upgrade to gpu by going to settings:
          - Duplicate Space

          -
          - """ - ) - with gr.Row(): - - with gr.Column(scale=55): - with gr.Group(): - model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=current_model.name) - with gr.Box(visible=False) as custom_model_group: - custom_model_path = gr.Textbox(label="Custom model path", placeholder="Path to model, e.g. nitrosocke/Arcane-Diffusion", interactive=True) - gr.HTML("
          Custom models have to be downloaded first, so give it some time.
          ") - - with gr.Row(): - prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False) - generate = gr.Button(value="Generate").style(rounded=(False, True, True, False)) - - - # image_out = gr.Image(height=512) - gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto") - - state_info = gr.Textbox(label="State", show_label=False, max_lines=2).style(container=False) - error_output = gr.Markdown() - - with gr.Column(scale=45): - with gr.Tab("Options"): - with gr.Group(): - neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image") - - n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1) - - with gr.Row(): - guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15) - steps = gr.Slider(label="Steps", value=current_steps, minimum=2, maximum=75, step=1) - - with gr.Row(): - width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8) - height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8) - - seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1) - - with gr.Tab("Image to image"): - with gr.Group(): - image = gr.Image(label="Image", height=256, tool="editor", type="pil") - strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5) - - if is_colab: - model_name.change(on_model_change, inputs=model_name, outputs=[custom_model_group, prompt], queue=False) - custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None) - # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery) - steps.change(on_steps_change, inputs=[steps], outputs=[], queue=False) - - inputs = [model_name, prompt, guidance, steps, n_images, width, height, seed, image, strength, neg_prompt] - outputs = [gallery, error_output] - prompt.submit(inference, inputs=inputs, outputs=outputs) - generate.click(inference, inputs=inputs, outputs=outputs) - - ex = gr.Examples([ - [models[7].name, "tiny cute and adorable kitten adventurer dressed in a warm overcoat with survival gear on a winters day", 7.5, 25], - [models[4].name, "portrait of dwayne johnson", 7.0, 35], - [models[5].name, "portrait of a beautiful alyx vance half life", 10, 25], - [models[6].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 30], - [models[5].name, "fantasy portrait painting, digital art", 4.0, 20], - ], inputs=[model_name, prompt, guidance, steps], outputs=outputs, fn=inference, cache_examples=False) - - gr.HTML(""" -
          -
          -

          Models by @nitrosocke, @haruu1367, @Helixngc7293, @dal_mack, @prompthero and others. ❤️

          -

          This space uses the DPM-Solver++ sampler by Cheng Lu, et al..

          -

          Space by:
          - Twitter Follow
          - GitHub followers



          - Buy Me A Coffee

          -

          visitors

          -
          - """) - - demo.load(update_state_info, inputs=state_info, outputs=state_info, every=0.5, show_progress=False) - -print(f"Space built in {time.time() - start_time:.2f} seconds") - -# if not is_colab: -demo.queue(concurrency_count=1) -demo.launch(debug=is_colab, share=is_colab) diff --git a/spaces/hysts/TADNE-interpolation/style.css b/spaces/hysts/TADNE-interpolation/style.css deleted file mode 100644 index 3c8bbe9faf61130e752c100dcf523e3afda611eb..0000000000000000000000000000000000000000 --- a/spaces/hysts/TADNE-interpolation/style.css +++ /dev/null @@ -1,7 +0,0 @@ -h1 { - text-align: center; -} - -#duplicate-button { - margin: auto; -} diff --git a/spaces/iitolstykh/age_gender_estimation_demo/README.md b/spaces/iitolstykh/age_gender_estimation_demo/README.md deleted file mode 100644 index db44246e8caeeabb275f9f184932e234dee8a455..0000000000000000000000000000000000000000 --- a/spaces/iitolstykh/age_gender_estimation_demo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Demo -emoji: 🌖 -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.37.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/imdebamrita/whatsapp_chat_analysis/README.md b/spaces/imdebamrita/whatsapp_chat_analysis/README.md deleted file mode 100644 index 1f2bec0deaea64fe5c3c810ec576ac261671ed12..0000000000000000000000000000000000000000 --- a/spaces/imdebamrita/whatsapp_chat_analysis/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Whatsapp Chat Analysis -emoji: 🚀 -colorFrom: gray -colorTo: indigo -sdk: streamlit -sdk_version: 1.25.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/inamXcontru/PoeticTTS/Bhaag Milkha Bhaag Movie Download Kickass 1080p.md b/spaces/inamXcontru/PoeticTTS/Bhaag Milkha Bhaag Movie Download Kickass 1080p.md deleted file mode 100644 index bb177b8f66204e23afd315df9391c8671d66a713..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Bhaag Milkha Bhaag Movie Download Kickass 1080p.md +++ /dev/null @@ -1,7 +0,0 @@ -

          Bhaag Milkha Bhaag Movie Download Kickass 1080p


          Download >>> https://gohhs.com/2uz5xU



          - -Bhaag Milkha Bhaag 2013 movie Download in Hindi 480p, 720p. This Bollywood film directed by Rakesh Omprakash Mehra is based on biography, drama, sports. Download Bhaag Milkha Bhaag (2013) Hindi Full Movie 480p [400MB] | 720p .mp4 Download Bhaag Milkha Bhaag (2013) Hindi Full Movie 720p [500MB] .mkv Download Bhaag Milkha Bhaag (2013) Hindi Full Movie 480p [400MB] .mkv -3D - Movies » Download torrent Year: 2013 Genre: Fantasy, Action, Adventure, Family Director: Rohit Shetty Cast: Sanjay Dutt, Priyanka Chopra, Rahendranath Zutshi, Sanjay Dutt, Kareena Kapoor, Rahul Vohra, Priyana Chopra, Mukesh Rishi, Sheeba Chadha, Anupam Kher Description: Rahul is a street urchin living in a ruthless underworld. 8a78ff9644
          -
          -
          -

          diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/3cdaemon Windows 10.epub.md b/spaces/inplisQlawa/anything-midjourney-v4-1/3cdaemon Windows 10.epub.md deleted file mode 100644 index f6a949dcfdd9ad2845013c9fd867a93304670981..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/3cdaemon Windows 10.epub.md +++ /dev/null @@ -1,10 +0,0 @@ -

          3cdaemon Windows 10.epub


          Download File ⚹⚹⚹ https://urlin.us/2uEwom



          - -January 17, 2022 - . Marcos Witt Pdf DownloadMardaani full movie hd 1080p download free movie kickassGattu download in hindi mp43cdaemon Windows 10.epub. Ebook download free download on android phone -Download cartoon in mp4 to your phone or tablet, android without registration! -Watch online cartoon Masha and the Bear, on our website for free and in good quality, if you are already 2 years old and you love cartoons and watch cartoons online and listen to music, then you are here. -Here you can download the cartoon Masha and the Bear in mp4 to your Android phone or tablet without registration and SMS. -Download Cartoon Masha and the Bear Free Mp4 To Phone 8a78ff9644
          -
          -
          -

          diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/AMIBCPv45393.md b/spaces/inplisQlawa/anything-midjourney-v4-1/AMIBCPv45393.md deleted file mode 100644 index b5106be7ddbf103d95643f94fbe1f185961feec0..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/AMIBCPv45393.md +++ /dev/null @@ -1,10 +0,0 @@ -
          -

          Namastes the time now, its a marvellous issues support here. Transfusion. none, on WINDOWS. justracinus 57e404fda0 https://coub.com/stories/4255248-full-version-amibcpv45393-rar-utorrent-full-version-files.

          -

          dvkecky wo2211b2bcf https://coub.com/stories/4255248-full-version-amibcpv45393-rar-utorrent-full-version-files. , Vidalia. A patch with all the updates. Here are the software, which solves all your problems related to Windows 10/8/7/vista operating system (64-bit). Release date: 26/11/2013, size: 63.92 Mb, checksum: 81bdd09931ed1b74c5bdee85b1bea93c. I had a dozen or so issues, I had real the same problem, aske me to create a support ticket, my download speed is really slow, when I download there's a massive leak!

          -

          AMIBCPv45393


          Download Zip ✪✪✪ https://urlin.us/2uEwwI



          -

          wpadmin ab0026619 https://coub.com/stories/4255248-full-version-amibcpv45393-rar-utorrent-full-version-files. ps alkmedob 975a64875 https://www.sade.com/sade-latest-version-in-latest-september-september-2019-for-windows-10-8-7-8-and-vista-32-bit-full-version-v2/ fp. https://superagr.com/help/how-do-i-stream-tv-with-a-raspberry-pi-2-b-usb-hdmi-cable-hdtv-senders-mice-keyboards-and-more-1080p/ Name: AMD R5 Series Radeon Instinct Product ID (VID): 0x0005 USB Device ID: 0x1004 VID: 0x1043 PRODUCT_NAME: AMD Polaris 1

          -

          ramecho 491c2f320e https://coub.com/stories/4240292-full-version-amibcpv45393-rar-utorrent-x32-pc-nulled-key. I'm a fan of Master Collection Windows 8 Keycrack. manuel 444a689b8d https://coub.com/stories/4641233-amibcpv45393-master-collection-star-trek-set-w-instal-amdware-windows-install-w7-r. Please try again later.

          -

          marek 8bc4429cbe https://coub.com/stories/4641233-amibcpv45393-master-collection-star-trek-set-w-instal-amdware-windows-install-w7-r. Download the master collection of Cs6 at full version for free.

          -

          899543212b
          -
          -
          \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Dilwale Dulhania Le Jayenge 1995 Hindi BRRip 720p X264 AAC 51Hon3y !NEW!.md b/spaces/inreVtussa/clothingai/Examples/Dilwale Dulhania Le Jayenge 1995 Hindi BRRip 720p X264 AAC 51Hon3y !NEW!.md deleted file mode 100644 index 684eb4a89c30a6ba25a6bc0e2047091478bb0934..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Dilwale Dulhania Le Jayenge 1995 Hindi BRRip 720p X264 AAC 51Hon3y !NEW!.md +++ /dev/null @@ -1,10 +0,0 @@ -

          Dilwale Dulhania Le Jayenge 1995 Hindi BRRip 720p X264 AAC 51Hon3y


          DOWNLOADhttps://tiurll.com/2uCjXK



          - -Dilwale.Dulhania.Le.Jayenge.1995.Hindi.1080p.Blu-Ray.x264.DD.5.1 MSubs-HDSector added subtitles download in English with srt file. Download free movies to your phone in mp4 and 3gp format for android, smartphone or tablet. -Movies on your phone in high quality. -Download free movies on your phone. -Movies watch online, watch movies online for free, movies 2013 online. -Download 3gp movies, mp4 movies to your phone, smartphone, PDA, android, iphone, ipad for free without registration on the site www.kinona. 8a78ff9644
          -
          -
          -

          diff --git a/spaces/ivanlau/IntelliLabel/app.py b/spaces/ivanlau/IntelliLabel/app.py deleted file mode 100644 index 4decbfefc3ab4937eb28132263064ecc9c6a294a..0000000000000000000000000000000000000000 --- a/spaces/ivanlau/IntelliLabel/app.py +++ /dev/null @@ -1,89 +0,0 @@ -from transformers import AutoTokenizer, AutoModelForSequenceClassification -import neattext.functions as nfx -import re -import torch -import streamlit as st - -# labels -labels = [ - 'bug', - 'enhancement', - 'question' -] - -# Model path -# LOCAL -# MODEL_DIR = "./model/distil-bert-uncased-finetuned-github-issues/" - -# REMOTE -MODEL_DIR = "ivanlau/distil-bert-uncased-finetuned-github-issues" - - -@st.cache(allow_output_mutation=True, show_spinner=False) -def load_model(): - model = AutoModelForSequenceClassification.from_pretrained(MODEL_DIR) - tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR) - return model, tokenizer - -# Helpers -reg_obj = re.compile(r'[^\u0000-\u007F]+', re.UNICODE) -def is_english_text(text): - return (False if reg_obj.match(text) else True) - -# remove the stopwords, emojis from the text and convert it into lower case -def neatify_text(text): - text = str(text).lower() - text = nfx.remove_stopwords(text) - text = nfx.remove_emojis(text) - return text - - - -def main(): - # st UI setting - st.set_page_config( - page_title="IntelliLabel", - page_icon="🏷", - layout="centered", - initial_sidebar_state="auto", - ) - st.title("IntelliLabel") - st.write("IntelliLabel is a github issue classification app. It classifies issue into 3 categories (Bug, Enhancement, Question).") - - # load model - with st.spinner("Downloading model (takes ~1 min)"): - model, tokenizer = load_model() - - - - default_text = "Unable to run Speech2Text example in documentation" - - text = st.text_area('Enter text here:', value=default_text) - submit = st.button('Predict 🏷') - - - if submit: - text = text.strip(" \n\t") - if is_english_text(text): - text = neatify_text(text) - tokenized_sentence = tokenizer(text, return_tensors='pt') - output = model(**tokenized_sentence) - predictions = torch.nn.functional.softmax(output.logits, dim=-1) - _, preds = torch.max(predictions, dim=-1) - predicted = labels[preds.item()] - - predictions = predictions.tolist()[0] - c1, c2, c3 = st.columns(3) - c1.metric(label="Bug", value=round(predictions[0],3)) - c2.metric(label="Enhancement", value=round(predictions[1],3)) - c3.metric(label="Question", value=round(predictions[2],3)) - - st.info("Prediction") - st.write(predicted.capitalize()) - - else: - st.error(str("Please input english text.")) - - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/spaces/jaybeeja/age_predictor/README.md b/spaces/jaybeeja/age_predictor/README.md deleted file mode 100644 index 43ba152967e05eef8b562eacb4638f7683a0cca9..0000000000000000000000000000000000000000 --- a/spaces/jaybeeja/age_predictor/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Age Predictor -emoji: 🏃 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/jb2k/bert-base-multilingual-cased-language-detection/app.py b/spaces/jb2k/bert-base-multilingual-cased-language-detection/app.py deleted file mode 100644 index 428d08a20c0a43daa325b94118bca93851fd93c0..0000000000000000000000000000000000000000 --- a/spaces/jb2k/bert-base-multilingual-cased-language-detection/app.py +++ /dev/null @@ -1,77 +0,0 @@ -from transformers import AutoModelForSequenceClassification, AutoTokenizer -import torch -import gradio as gr - -model_path = "jb2k/bert-base-multilingual-cased-language-detection" - -model = AutoModelForSequenceClassification.from_pretrained(model_path) -tokenizer = AutoTokenizer.from_pretrained(model_path) - -language_dict = {0: 'Arabic', - 1: 'Basque', - 2: 'Breton', - 3: 'Catalan', - 4: 'Chinese_China', - 5: 'Chinese_Hongkong', - 6: 'Chinese_Taiwan', - 7: 'Chuvash', - 8: 'Czech', - 9: 'Dhivehi', - 10: 'Dutch', - 11: 'English', - 12: 'Esperanto', - 13: 'Estonian', - 14: 'French', - 15: 'Frisian', - 16: 'Georgian', - 17: 'German', - 18: 'Greek', - 19: 'Hakha_Chin', - 20: 'Indonesian', - 21: 'Interlingua', - 22: 'Italian', - 23: 'Japanese', - 24: 'Kabyle', - 25: 'Kinyarwanda', - 26: 'Kyrgyz', - 27: 'Latvian', - 28: 'Maltese', - 29: 'Mongolian', - 30: 'Persian', - 31: 'Polish', - 32: 'Portuguese', - 33: 'Romanian', - 34: 'Romansh_Sursilvan', - 35: 'Russian', - 36: 'Sakha', - 37: 'Slovenian', - 38: 'Spanish', - 39: 'Swedish', - 40: 'Tamil', - 41: 'Tatar', - 42: 'Turkish', - 43: 'Ukranian', - 44: 'Welsh'} - -examples = ['Transformers are really cool!', 'Трансформеры действительно классные!', '¡Los transformadores son realmente geniales!'] - -def inference(sentence): - tokenized_sentence = tokenizer(sentence, return_tensors='pt') - output = model(**tokenized_sentence) - predictions = torch.nn.functional.softmax(output.logits, dim=-1) - certainy, highest_value = torch.max(predictions, dim=-1, keepdim=False, out=None) - highest_value_int = highest_value.item() - language = language_dict[highest_value_int] - return language - -if __name__ == '__main__': - interFace = gr.Interface(fn=inference, - inputs=gr.inputs.Textbox(placeholder="Enter text here", label="Text content", lines=5), - outputs=gr.outputs.Label(num_top_classes=6, label="Language of this text is "), - verbose=True, - examples = examples, - title="Language Detector", - description="Language detector with support for 45 languages. Created as part of the huggingface course community event.", - theme="grass") - interFace.launch() - diff --git a/spaces/jbilcke-hf/ai-clip-factory/src/components/icons/hugging-clap.tsx b/spaces/jbilcke-hf/ai-clip-factory/src/components/icons/hugging-clap.tsx deleted file mode 100644 index ffb37ae6183cd8ce7fe7c212e383a6510eba2485..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-clip-factory/src/components/icons/hugging-clap.tsx +++ /dev/null @@ -1,8 +0,0 @@ -export function HuggingClap() { - return ( - - ) -} \ No newline at end of file diff --git a/spaces/jbilcke-hf/observer/README.md b/spaces/jbilcke-hf/observer/README.md deleted file mode 100644 index a796f95336aabec3fc567f31b9ba34e122f3407b..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/observer/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Observer -emoji: 🐶🎥 -colorFrom: red -colorTo: yellow -sdk: docker -pinned: true -app_port: 3000 ---- - -Your webcam, sent to Idefics every ~12-15 sec, then interpreted by Llama-2 👀 - -So it's an agent that can look at things (but not do much) diff --git a/spaces/jcenaa/Segment-Any-RGBD/open_vocab_seg/data/dataset_mappers/mask_former_semantic_dataset_mapper.py b/spaces/jcenaa/Segment-Any-RGBD/open_vocab_seg/data/dataset_mappers/mask_former_semantic_dataset_mapper.py deleted file mode 100644 index 2836579942cf91c726cb34cbbd2d137c975bee37..0000000000000000000000000000000000000000 --- a/spaces/jcenaa/Segment-Any-RGBD/open_vocab_seg/data/dataset_mappers/mask_former_semantic_dataset_mapper.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Copyright (c) Meta Platforms, Inc. All Rights Reserved - -import copy -import logging - -import numpy as np -import torch -from torch.nn import functional as F - -from detectron2.config import configurable -from detectron2.data import MetadataCatalog -from detectron2.data import detection_utils as utils -from detectron2.data import transforms as T -from detectron2.projects.point_rend import ColorAugSSDTransform -from detectron2.structures import BitMasks, Instances - -__all__ = ["MaskFormerSemanticDatasetMapper"] - - -class MaskFormerSemanticDatasetMapper: - """ - A callable which takes a dataset dict in Detectron2 Dataset format, - and map it into a format used by MaskFormer for semantic segmentation. - - The callable currently does the following: - - 1. Read the image from "file_name" - 2. Applies geometric transforms to the image and annotation - 3. Find and applies suitable cropping to the image and annotation - 4. Prepare image and annotation to Tensors - """ - - @configurable - def __init__( - self, - is_train=True, - *, - augmentations, - image_format, - ignore_label, - size_divisibility, - ): - """ - NOTE: this interface is experimental. - Args: - is_train: for training or inference - augmentations: a list of augmentations or deterministic transforms to apply - image_format: an image format supported by :func:`detection_utils.read_image`. - ignore_label: the label that is ignored to evaluation - size_divisibility: pad image size to be divisible by this value - """ - self.is_train = is_train - self.tfm_gens = augmentations - self.img_format = image_format - self.ignore_label = ignore_label - self.size_divisibility = size_divisibility - - logger = logging.getLogger(__name__) - mode = "training" if is_train else "inference" - logger.info( - f"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}" - ) - - @classmethod - def from_config(cls, cfg, is_train=True): - # Build augmentation - if is_train: - augs = [ - T.ResizeShortestEdge( - cfg.INPUT.MIN_SIZE_TRAIN, - cfg.INPUT.MAX_SIZE_TRAIN, - cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING, - ) - ] - if cfg.INPUT.CROP.ENABLED: - augs.append( - T.RandomCrop_CategoryAreaConstraint( - cfg.INPUT.CROP.TYPE, - cfg.INPUT.CROP.SIZE, - cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA, - cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, - ) - ) - if cfg.INPUT.COLOR_AUG_SSD: - augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT)) - augs.append(T.RandomFlip()) - - # Assume always applies to the training set. - dataset_names = cfg.DATASETS.TRAIN - else: - min_size = cfg.INPUT.MIN_SIZE_TEST - max_size = cfg.INPUT.MAX_SIZE_TEST - sample_style = "choice" - augs = [T.ResizeShortestEdge(min_size, max_size, sample_style)] - dataset_names = cfg.DATASETS.TEST - meta = MetadataCatalog.get(dataset_names[0]) - ignore_label = meta.ignore_label - - ret = { - "is_train": is_train, - "augmentations": augs, - "image_format": cfg.INPUT.FORMAT, - "ignore_label": ignore_label, - "size_divisibility": cfg.INPUT.SIZE_DIVISIBILITY if is_train else -1, - } - return ret - - def __call__(self, dataset_dict): - """ - Args: - dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. - - Returns: - dict: a format that builtin models in detectron2 accept - """ - # assert self.is_train, "MaskFormerSemanticDatasetMapper should only be used for training!" - - dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below - image = utils.read_image(dataset_dict["file_name"], format=self.img_format) - utils.check_image_size(dataset_dict, image) - - if "sem_seg_file_name" in dataset_dict: - # PyTorch transformation not implemented for uint16, so converting it to double first - sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype( - "double" - ) - else: - sem_seg_gt = None - - if sem_seg_gt is None: - raise ValueError( - "Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.".format( - dataset_dict["file_name"] - ) - ) - - aug_input = T.AugInput(image, sem_seg=sem_seg_gt) - aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input) - image = aug_input.image - sem_seg_gt = aug_input.sem_seg - - # Pad image and segmentation label here! - image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) - if sem_seg_gt is not None: - sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long")) - - if self.size_divisibility > 0: - image_size = (image.shape[-2], image.shape[-1]) - padding_size = [ - 0, - self.size_divisibility - image_size[1], - 0, - self.size_divisibility - image_size[0], - ] - image = F.pad(image, padding_size, value=128).contiguous() - if sem_seg_gt is not None: - sem_seg_gt = F.pad( - sem_seg_gt, padding_size, value=self.ignore_label - ).contiguous() - - image_shape = (image.shape[-2], image.shape[-1]) # h, w - - # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, - # but not efficient on large generic data structures due to the use of pickle & mp.Queue. - # Therefore it's important to use torch.Tensor. - dataset_dict["image"] = image - - if sem_seg_gt is not None: - dataset_dict["sem_seg"] = sem_seg_gt.long() - - if "annotations" in dataset_dict: - raise ValueError( - "Semantic segmentation dataset should not have 'annotations'." - ) - - # Prepare per-category binary masks - if sem_seg_gt is not None: - sem_seg_gt = sem_seg_gt.numpy() - instances = Instances(image_shape) - classes = np.unique(sem_seg_gt) - # remove ignored region - classes = classes[classes != self.ignore_label] - instances.gt_classes = torch.tensor(classes, dtype=torch.int64) - - masks = [] - for class_id in classes: - masks.append(sem_seg_gt == class_id) - - if len(masks) == 0: - # Some image does not have annotation (all ignored) - instances.gt_masks = torch.zeros( - (0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1]) - ) - else: - masks = BitMasks( - torch.stack( - [ - torch.from_numpy(np.ascontiguousarray(x.copy())) - for x in masks - ] - ) - ) - instances.gt_masks = masks.tensor - - dataset_dict["instances"] = instances - - return dataset_dict diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Signature/PKCS1_PSS.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Signature/PKCS1_PSS.py deleted file mode 100644 index c39d3881630e647cf67b28ee86f3de47cce193a3..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Signature/PKCS1_PSS.py +++ /dev/null @@ -1,55 +0,0 @@ -# =================================================================== -# -# Copyright (c) 2014, Legrandin -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -""" -Legacy module for PKCS#1 PSS signatures. - -:undocumented: __package__ -""" - -import types - -from Crypto.Signature import pss - - -def _pycrypto_verify(self, hash_object, signature): - try: - self._verify(hash_object, signature) - except (ValueError, TypeError): - return False - return True - - -def new(rsa_key, mgfunc=None, saltLen=None, randfunc=None): - pkcs1 = pss.new(rsa_key, mask_func=mgfunc, - salt_bytes=saltLen, rand_func=randfunc) - pkcs1._verify = pkcs1.verify - pkcs1.verify = types.MethodType(_pycrypto_verify, pkcs1) - return pkcs1 diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Util/__init__.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Util/__init__.py deleted file mode 100644 index f12214d3044998bb644b8323984cb6197e1fbd93..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Util/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -# -# =================================================================== -# The contents of this file are dedicated to the public domain. To -# the extent that dedication to the public domain is not available, -# everyone is granted a worldwide, perpetual, royalty-free, -# non-exclusive license to exercise all rights associated with the -# contents of this file for any purpose whatsoever. -# No rights are reserved. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# =================================================================== - -"""Miscellaneous modules - -Contains useful modules that don't belong into any of the -other Crypto.* subpackages. - -======================== ============================================= -Module Description -======================== ============================================= -`Crypto.Util.number` Number-theoretic functions (primality testing, etc.) -`Crypto.Util.Counter` Fast counter functions for CTR cipher modes. -`Crypto.Util.RFC1751` Converts between 128-bit keys and human-readable - strings of words. -`Crypto.Util.asn1` Minimal support for ASN.1 DER encoding -`Crypto.Util.Padding` Set of functions for adding and removing padding. -======================== ============================================= - -:undocumented: _galois, _number_new, cpuid, py3compat, _raw_api -""" - -__all__ = ['RFC1751', 'number', 'strxor', 'asn1', 'Counter', 'Padding'] - diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageChops.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageChops.py deleted file mode 100644 index 70120031797c2493c0ce878c13c3fd3d5554c354..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageChops.py +++ /dev/null @@ -1,303 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# standard channel operations -# -# History: -# 1996-03-24 fl Created -# 1996-08-13 fl Added logical operations (for "1" images) -# 2000-10-12 fl Added offset method (from Image.py) -# -# Copyright (c) 1997-2000 by Secret Labs AB -# Copyright (c) 1996-2000 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - -from . import Image - - -def constant(image, value): - """Fill a channel with a given grey level. - - :rtype: :py:class:`~PIL.Image.Image` - """ - - return Image.new("L", image.size, value) - - -def duplicate(image): - """Copy a channel. Alias for :py:meth:`PIL.Image.Image.copy`. - - :rtype: :py:class:`~PIL.Image.Image` - """ - - return image.copy() - - -def invert(image): - """ - Invert an image (channel). :: - - out = MAX - image - - :rtype: :py:class:`~PIL.Image.Image` - """ - - image.load() - return image._new(image.im.chop_invert()) - - -def lighter(image1, image2): - """ - Compares the two images, pixel by pixel, and returns a new image containing - the lighter values. :: - - out = max(image1, image2) - - :rtype: :py:class:`~PIL.Image.Image` - """ - - image1.load() - image2.load() - return image1._new(image1.im.chop_lighter(image2.im)) - - -def darker(image1, image2): - """ - Compares the two images, pixel by pixel, and returns a new image containing - the darker values. :: - - out = min(image1, image2) - - :rtype: :py:class:`~PIL.Image.Image` - """ - - image1.load() - image2.load() - return image1._new(image1.im.chop_darker(image2.im)) - - -def difference(image1, image2): - """ - Returns the absolute value of the pixel-by-pixel difference between the two - images. :: - - out = abs(image1 - image2) - - :rtype: :py:class:`~PIL.Image.Image` - """ - - image1.load() - image2.load() - return image1._new(image1.im.chop_difference(image2.im)) - - -def multiply(image1, image2): - """ - Superimposes two images on top of each other. - - If you multiply an image with a solid black image, the result is black. If - you multiply with a solid white image, the image is unaffected. :: - - out = image1 * image2 / MAX - - :rtype: :py:class:`~PIL.Image.Image` - """ - - image1.load() - image2.load() - return image1._new(image1.im.chop_multiply(image2.im)) - - -def screen(image1, image2): - """ - Superimposes two inverted images on top of each other. :: - - out = MAX - ((MAX - image1) * (MAX - image2) / MAX) - - :rtype: :py:class:`~PIL.Image.Image` - """ - - image1.load() - image2.load() - return image1._new(image1.im.chop_screen(image2.im)) - - -def soft_light(image1, image2): - """ - Superimposes two images on top of each other using the Soft Light algorithm - - :rtype: :py:class:`~PIL.Image.Image` - """ - - image1.load() - image2.load() - return image1._new(image1.im.chop_soft_light(image2.im)) - - -def hard_light(image1, image2): - """ - Superimposes two images on top of each other using the Hard Light algorithm - - :rtype: :py:class:`~PIL.Image.Image` - """ - - image1.load() - image2.load() - return image1._new(image1.im.chop_hard_light(image2.im)) - - -def overlay(image1, image2): - """ - Superimposes two images on top of each other using the Overlay algorithm - - :rtype: :py:class:`~PIL.Image.Image` - """ - - image1.load() - image2.load() - return image1._new(image1.im.chop_overlay(image2.im)) - - -def add(image1, image2, scale=1.0, offset=0): - """ - Adds two images, dividing the result by scale and adding the - offset. If omitted, scale defaults to 1.0, and offset to 0.0. :: - - out = ((image1 + image2) / scale + offset) - - :rtype: :py:class:`~PIL.Image.Image` - """ - - image1.load() - image2.load() - return image1._new(image1.im.chop_add(image2.im, scale, offset)) - - -def subtract(image1, image2, scale=1.0, offset=0): - """ - Subtracts two images, dividing the result by scale and adding the offset. - If omitted, scale defaults to 1.0, and offset to 0.0. :: - - out = ((image1 - image2) / scale + offset) - - :rtype: :py:class:`~PIL.Image.Image` - """ - - image1.load() - image2.load() - return image1._new(image1.im.chop_subtract(image2.im, scale, offset)) - - -def add_modulo(image1, image2): - """Add two images, without clipping the result. :: - - out = ((image1 + image2) % MAX) - - :rtype: :py:class:`~PIL.Image.Image` - """ - - image1.load() - image2.load() - return image1._new(image1.im.chop_add_modulo(image2.im)) - - -def subtract_modulo(image1, image2): - """Subtract two images, without clipping the result. :: - - out = ((image1 - image2) % MAX) - - :rtype: :py:class:`~PIL.Image.Image` - """ - - image1.load() - image2.load() - return image1._new(image1.im.chop_subtract_modulo(image2.im)) - - -def logical_and(image1, image2): - """Logical AND between two images. - - Both of the images must have mode "1". If you would like to perform a - logical AND on an image with a mode other than "1", try - :py:meth:`~PIL.ImageChops.multiply` instead, using a black-and-white mask - as the second image. :: - - out = ((image1 and image2) % MAX) - - :rtype: :py:class:`~PIL.Image.Image` - """ - - image1.load() - image2.load() - return image1._new(image1.im.chop_and(image2.im)) - - -def logical_or(image1, image2): - """Logical OR between two images. - - Both of the images must have mode "1". :: - - out = ((image1 or image2) % MAX) - - :rtype: :py:class:`~PIL.Image.Image` - """ - - image1.load() - image2.load() - return image1._new(image1.im.chop_or(image2.im)) - - -def logical_xor(image1, image2): - """Logical XOR between two images. - - Both of the images must have mode "1". :: - - out = ((bool(image1) != bool(image2)) % MAX) - - :rtype: :py:class:`~PIL.Image.Image` - """ - - image1.load() - image2.load() - return image1._new(image1.im.chop_xor(image2.im)) - - -def blend(image1, image2, alpha): - """Blend images using constant transparency weight. Alias for - :py:func:`PIL.Image.blend`. - - :rtype: :py:class:`~PIL.Image.Image` - """ - - return Image.blend(image1, image2, alpha) - - -def composite(image1, image2, mask): - """Create composite using transparency mask. Alias for - :py:func:`PIL.Image.composite`. - - :rtype: :py:class:`~PIL.Image.Image` - """ - - return Image.composite(image1, image2, mask) - - -def offset(image, xoffset, yoffset=None): - """Returns a copy of the image where data has been offset by the given - distances. Data wraps around the edges. If ``yoffset`` is omitted, it - is assumed to be equal to ``xoffset``. - - :param image: Input image. - :param xoffset: The horizontal distance. - :param yoffset: The vertical distance. If omitted, both - distances are set to the same value. - :rtype: :py:class:`~PIL.Image.Image` - """ - - if yoffset is None: - yoffset = xoffset - image.load() - return image._new(image.im.offset(xoffset, yoffset)) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/bson/time64.c b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/bson/time64.c deleted file mode 100644 index a21fbb90bd6f86b7363603c403357d3e87192d14..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/bson/time64.c +++ /dev/null @@ -1,781 +0,0 @@ -/* - -Copyright (c) 2007-2010 Michael G Schwern - -This software originally derived from Paul Sheer's pivotal_gmtime_r.c. - -The MIT License: - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -*/ - -/* - -Programmers who have available to them 64-bit time values as a 'long -long' type can use cbson_localtime64_r() and cbson_gmtime64_r() which correctly -converts the time even on 32-bit systems. Whether you have 64-bit time -values will depend on the operating system. - -cbson_localtime64_r() is a 64-bit equivalent of localtime_r(). - -cbson_gmtime64_r() is a 64-bit equivalent of gmtime_r(). - -*/ - -#ifdef _MSC_VER - #define _CRT_SECURE_NO_WARNINGS -#endif - -/* Including Python.h fixes issues with interpreters built with -std=c99. */ -#define PY_SSIZE_T_CLEAN -#include "Python.h" - -#include -#include "time64.h" -#include "time64_limits.h" - - -/* Spec says except for stftime() and the _r() functions, these - all return static memory. Stabbings! */ -static struct TM Static_Return_Date; - -static const int days_in_month[2][12] = { - {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, - {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, -}; - -static const int julian_days_by_month[2][12] = { - {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334}, - {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335}, -}; - -static const int length_of_year[2] = { 365, 366 }; - -/* Some numbers relating to the gregorian cycle */ -static const Year years_in_gregorian_cycle = 400; -#define days_in_gregorian_cycle ((365 * 400) + 100 - 4 + 1) -static const Time64_T seconds_in_gregorian_cycle = days_in_gregorian_cycle * 60LL * 60LL * 24LL; - -/* Year range we can trust the time functions with */ -#define MAX_SAFE_YEAR 2037 -#define MIN_SAFE_YEAR 1971 - -/* 28 year Julian calendar cycle */ -#define SOLAR_CYCLE_LENGTH 28 - -/* Year cycle from MAX_SAFE_YEAR down. */ -static const int safe_years_high[SOLAR_CYCLE_LENGTH] = { - 2016, 2017, 2018, 2019, - 2020, 2021, 2022, 2023, - 2024, 2025, 2026, 2027, - 2028, 2029, 2030, 2031, - 2032, 2033, 2034, 2035, - 2036, 2037, 2010, 2011, - 2012, 2013, 2014, 2015 -}; - -/* Year cycle from MIN_SAFE_YEAR up */ -static const int safe_years_low[SOLAR_CYCLE_LENGTH] = { - 1996, 1997, 1998, 1971, - 1972, 1973, 1974, 1975, - 1976, 1977, 1978, 1979, - 1980, 1981, 1982, 1983, - 1984, 1985, 1986, 1987, - 1988, 1989, 1990, 1991, - 1992, 1993, 1994, 1995, -}; - -/* Let's assume people are going to be looking for dates in the future. - Let's provide some cheats so you can skip ahead. - This has a 4x speed boost when near 2008. -*/ -/* Number of days since epoch on Jan 1st, 2008 GMT */ -#define CHEAT_DAYS (1199145600 / 24 / 60 / 60) -#define CHEAT_YEARS 108 - -#define IS_LEAP(n) ((!(((n) + 1900) % 400) || (!(((n) + 1900) % 4) && (((n) + 1900) % 100))) != 0) -#define _TIME64_WRAP(a,b,m) ((a) = ((a) < 0 ) ? ((b)--, (a) + (m)) : (a)) - -#ifdef USE_SYSTEM_LOCALTIME -# define SHOULD_USE_SYSTEM_LOCALTIME(a) ( \ - (a) <= SYSTEM_LOCALTIME_MAX && \ - (a) >= SYSTEM_LOCALTIME_MIN \ -) -#else -# define SHOULD_USE_SYSTEM_LOCALTIME(a) (0) -#endif - -#ifdef USE_SYSTEM_GMTIME -# define SHOULD_USE_SYSTEM_GMTIME(a) ( \ - (a) <= SYSTEM_GMTIME_MAX && \ - (a) >= SYSTEM_GMTIME_MIN \ -) -#else -# define SHOULD_USE_SYSTEM_GMTIME(a) (0) -#endif - -/* Multi varadic macros are a C99 thing, alas */ -#ifdef TIME_64_DEBUG -# define TIME64_TRACE(format) (fprintf(stderr, format)) -# define TIME64_TRACE1(format, var1) (fprintf(stderr, format, var1)) -# define TIME64_TRACE2(format, var1, var2) (fprintf(stderr, format, var1, var2)) -# define TIME64_TRACE3(format, var1, var2, var3) (fprintf(stderr, format, var1, var2, var3)) -#else -# define TIME64_TRACE(format) ((void)0) -# define TIME64_TRACE1(format, var1) ((void)0) -# define TIME64_TRACE2(format, var1, var2) ((void)0) -# define TIME64_TRACE3(format, var1, var2, var3) ((void)0) -#endif - - -static int is_exception_century(Year year) -{ - int is_exception = ((year % 100 == 0) && !(year % 400 == 0)); - TIME64_TRACE1("# is_exception_century: %s\n", is_exception ? "yes" : "no"); - - return(is_exception); -} - - -/* Compare two dates. - The result is like cmp. - Ignores things like gmtoffset and dst -*/ -int cbson_cmp_date( const struct TM* left, const struct tm* right ) { - if( left->tm_year > right->tm_year ) - return 1; - else if( left->tm_year < right->tm_year ) - return -1; - - if( left->tm_mon > right->tm_mon ) - return 1; - else if( left->tm_mon < right->tm_mon ) - return -1; - - if( left->tm_mday > right->tm_mday ) - return 1; - else if( left->tm_mday < right->tm_mday ) - return -1; - - if( left->tm_hour > right->tm_hour ) - return 1; - else if( left->tm_hour < right->tm_hour ) - return -1; - - if( left->tm_min > right->tm_min ) - return 1; - else if( left->tm_min < right->tm_min ) - return -1; - - if( left->tm_sec > right->tm_sec ) - return 1; - else if( left->tm_sec < right->tm_sec ) - return -1; - - return 0; -} - - -/* Check if a date is safely inside a range. - The intention is to check if its a few days inside. -*/ -int cbson_date_in_safe_range( const struct TM* date, const struct tm* min, const struct tm* max ) { - if( cbson_cmp_date(date, min) == -1 ) - return 0; - - if( cbson_cmp_date(date, max) == 1 ) - return 0; - - return 1; -} - - -/* timegm() is not in the C or POSIX spec, but it is such a useful - extension I would be remiss in leaving it out. Also I need it - for cbson_localtime64() -*/ -Time64_T cbson_timegm64(const struct TM *date) { - Time64_T days = 0; - Time64_T seconds = 0; - Year year; - Year orig_year = (Year)date->tm_year; - int cycles = 0; - - if( orig_year > 100 ) { - cycles = (int)((orig_year - 100) / 400); - orig_year -= cycles * 400; - days += (Time64_T)cycles * days_in_gregorian_cycle; - } - else if( orig_year < -300 ) { - cycles = (int)((orig_year - 100) / 400); - orig_year -= cycles * 400; - days += (Time64_T)cycles * days_in_gregorian_cycle; - } - TIME64_TRACE3("# timegm/ cycles: %d, days: %lld, orig_year: %lld\n", cycles, days, orig_year); - - if( orig_year > 70 ) { - year = 70; - while( year < orig_year ) { - days += length_of_year[IS_LEAP(year)]; - year++; - } - } - else if ( orig_year < 70 ) { - year = 69; - do { - days -= length_of_year[IS_LEAP(year)]; - year--; - } while( year >= orig_year ); - } - - days += julian_days_by_month[IS_LEAP(orig_year)][date->tm_mon]; - days += date->tm_mday - 1; - - seconds = days * 60 * 60 * 24; - - seconds += date->tm_hour * 60 * 60; - seconds += date->tm_min * 60; - seconds += date->tm_sec; - - return(seconds); -} - - -#ifndef NDEBUG -static int check_tm(struct TM *tm) -{ - /* Don't forget leap seconds */ - assert(tm->tm_sec >= 0); - assert(tm->tm_sec <= 61); - - assert(tm->tm_min >= 0); - assert(tm->tm_min <= 59); - - assert(tm->tm_hour >= 0); - assert(tm->tm_hour <= 23); - - assert(tm->tm_mday >= 1); - assert(tm->tm_mday <= days_in_month[IS_LEAP(tm->tm_year)][tm->tm_mon]); - - assert(tm->tm_mon >= 0); - assert(tm->tm_mon <= 11); - - assert(tm->tm_wday >= 0); - assert(tm->tm_wday <= 6); - - assert(tm->tm_yday >= 0); - assert(tm->tm_yday <= length_of_year[IS_LEAP(tm->tm_year)]); - -#ifdef HAS_TM_TM_GMTOFF - assert(tm->tm_gmtoff >= -24 * 60 * 60); - assert(tm->tm_gmtoff <= 24 * 60 * 60); -#endif - - return 1; -} -#endif - - -/* The exceptional centuries without leap years cause the cycle to - shift by 16 -*/ -static Year cycle_offset(Year year) -{ - const Year start_year = 2000; - Year year_diff = year - start_year; - Year exceptions; - - if( year > start_year ) - year_diff--; - - exceptions = year_diff / 100; - exceptions -= year_diff / 400; - - TIME64_TRACE3("# year: %lld, exceptions: %lld, year_diff: %lld\n", - year, exceptions, year_diff); - - return exceptions * 16; -} - -/* For a given year after 2038, pick the latest possible matching - year in the 28 year calendar cycle. - - A matching year... - 1) Starts on the same day of the week. - 2) Has the same leap year status. - - This is so the calendars match up. - - Also the previous year must match. When doing Jan 1st you might - wind up on Dec 31st the previous year when doing a -UTC time zone. - - Finally, the next year must have the same start day of week. This - is for Dec 31st with a +UTC time zone. - It doesn't need the same leap year status since we only care about - January 1st. -*/ -static int safe_year(const Year year) -{ - int safe_year = 0; - Year year_cycle; - - if( year >= MIN_SAFE_YEAR && year <= MAX_SAFE_YEAR ) { - return (int)year; - } - - year_cycle = year + cycle_offset(year); - - /* safe_years_low is off from safe_years_high by 8 years */ - if( year < MIN_SAFE_YEAR ) - year_cycle -= 8; - - /* Change non-leap xx00 years to an equivalent */ - if( is_exception_century(year) ) - year_cycle += 11; - - /* Also xx01 years, since the previous year will be wrong */ - if( is_exception_century(year - 1) ) - year_cycle += 17; - - year_cycle %= SOLAR_CYCLE_LENGTH; - if( year_cycle < 0 ) - year_cycle = SOLAR_CYCLE_LENGTH + year_cycle; - - assert( year_cycle >= 0 ); - assert( year_cycle < SOLAR_CYCLE_LENGTH ); - if( year < MIN_SAFE_YEAR ) - safe_year = safe_years_low[year_cycle]; - else if( year > MAX_SAFE_YEAR ) - safe_year = safe_years_high[year_cycle]; - else - assert(0); - - TIME64_TRACE3("# year: %lld, year_cycle: %lld, safe_year: %d\n", - year, year_cycle, safe_year); - - assert(safe_year <= MAX_SAFE_YEAR && safe_year >= MIN_SAFE_YEAR); - - return safe_year; -} - - -void pymongo_copy_tm_to_TM64(const struct tm *src, struct TM *dest) { - if( src == NULL ) { - memset(dest, 0, sizeof(*dest)); - } - else { -# ifdef USE_TM64 - dest->tm_sec = src->tm_sec; - dest->tm_min = src->tm_min; - dest->tm_hour = src->tm_hour; - dest->tm_mday = src->tm_mday; - dest->tm_mon = src->tm_mon; - dest->tm_year = (Year)src->tm_year; - dest->tm_wday = src->tm_wday; - dest->tm_yday = src->tm_yday; - dest->tm_isdst = src->tm_isdst; - -# ifdef HAS_TM_TM_GMTOFF - dest->tm_gmtoff = src->tm_gmtoff; -# endif - -# ifdef HAS_TM_TM_ZONE - dest->tm_zone = src->tm_zone; -# endif - -# else - /* They're the same type */ - memcpy(dest, src, sizeof(*dest)); -# endif - } -} - - -void cbson_copy_TM64_to_tm(const struct TM *src, struct tm *dest) { - if( src == NULL ) { - memset(dest, 0, sizeof(*dest)); - } - else { -# ifdef USE_TM64 - dest->tm_sec = src->tm_sec; - dest->tm_min = src->tm_min; - dest->tm_hour = src->tm_hour; - dest->tm_mday = src->tm_mday; - dest->tm_mon = src->tm_mon; - dest->tm_year = (int)src->tm_year; - dest->tm_wday = src->tm_wday; - dest->tm_yday = src->tm_yday; - dest->tm_isdst = src->tm_isdst; - -# ifdef HAS_TM_TM_GMTOFF - dest->tm_gmtoff = src->tm_gmtoff; -# endif - -# ifdef HAS_TM_TM_ZONE - dest->tm_zone = src->tm_zone; -# endif - -# else - /* They're the same type */ - memcpy(dest, src, sizeof(*dest)); -# endif - } -} - - -/* Simulate localtime_r() to the best of our ability */ -struct tm * cbson_fake_localtime_r(const time_t *time, struct tm *result) { - const struct tm *static_result = localtime(time); - - assert(result != NULL); - - if( static_result == NULL ) { - memset(result, 0, sizeof(*result)); - return NULL; - } - else { - memcpy(result, static_result, sizeof(*result)); - return result; - } -} - - -/* Simulate gmtime_r() to the best of our ability */ -struct tm * cbson_fake_gmtime_r(const time_t *time, struct tm *result) { - const struct tm *static_result = gmtime(time); - - assert(result != NULL); - - if( static_result == NULL ) { - memset(result, 0, sizeof(*result)); - return NULL; - } - else { - memcpy(result, static_result, sizeof(*result)); - return result; - } -} - - -static Time64_T seconds_between_years(Year left_year, Year right_year) { - int increment = (left_year > right_year) ? 1 : -1; - Time64_T seconds = 0; - int cycles; - - if( left_year > 2400 ) { - cycles = (int)((left_year - 2400) / 400); - left_year -= cycles * 400; - seconds += cycles * seconds_in_gregorian_cycle; - } - else if( left_year < 1600 ) { - cycles = (int)((left_year - 1600) / 400); - left_year += cycles * 400; - seconds += cycles * seconds_in_gregorian_cycle; - } - - while( left_year != right_year ) { - seconds += length_of_year[IS_LEAP(right_year - 1900)] * 60 * 60 * 24; - right_year += increment; - } - - return seconds * increment; -} - - -Time64_T cbson_mktime64(const struct TM *input_date) { - struct tm safe_date; - struct TM date; - Time64_T time; - Year year = input_date->tm_year + 1900; - - if( cbson_date_in_safe_range(input_date, &SYSTEM_MKTIME_MIN, &SYSTEM_MKTIME_MAX) ) - { - cbson_copy_TM64_to_tm(input_date, &safe_date); - return (Time64_T)mktime(&safe_date); - } - - /* Have to make the year safe in date else it won't fit in safe_date */ - date = *input_date; - date.tm_year = safe_year(year) - 1900; - cbson_copy_TM64_to_tm(&date, &safe_date); - - time = (Time64_T)mktime(&safe_date); - - time += seconds_between_years(year, (Year)(safe_date.tm_year + 1900)); - - return time; -} - - -/* Because I think mktime() is a crappy name */ -Time64_T timelocal64(const struct TM *date) { - return cbson_mktime64(date); -} - - -struct TM *cbson_gmtime64_r (const Time64_T *in_time, struct TM *p) -{ - int v_tm_sec, v_tm_min, v_tm_hour, v_tm_mon, v_tm_wday; - Time64_T v_tm_tday; - int leap; - Time64_T m; - Time64_T time = *in_time; - Year year = 70; - int cycles = 0; - - assert(p != NULL); - -#ifdef USE_SYSTEM_GMTIME - /* Use the system gmtime() if time_t is small enough */ - if( SHOULD_USE_SYSTEM_GMTIME(*in_time) ) { - time_t safe_time = (time_t)*in_time; - struct tm safe_date; - GMTIME_R(&safe_time, &safe_date); - - pymongo_copy_tm_to_TM64(&safe_date, p); - assert(check_tm(p)); - - return p; - } -#endif - -#ifdef HAS_TM_TM_GMTOFF - p->tm_gmtoff = 0; -#endif - p->tm_isdst = 0; - -#ifdef HAS_TM_TM_ZONE - p->tm_zone = "UTC"; -#endif - - v_tm_sec = (int)(time % 60); - time /= 60; - v_tm_min = (int)(time % 60); - time /= 60; - v_tm_hour = (int)(time % 24); - time /= 24; - v_tm_tday = time; - - _TIME64_WRAP (v_tm_sec, v_tm_min, 60); - _TIME64_WRAP (v_tm_min, v_tm_hour, 60); - _TIME64_WRAP (v_tm_hour, v_tm_tday, 24); - - v_tm_wday = (int)((v_tm_tday + 4) % 7); - if (v_tm_wday < 0) - v_tm_wday += 7; - m = v_tm_tday; - - if (m >= CHEAT_DAYS) { - year = CHEAT_YEARS; - m -= CHEAT_DAYS; - } - - if (m >= 0) { - /* Gregorian cycles, this is huge optimization for distant times */ - cycles = (int)(m / (Time64_T) days_in_gregorian_cycle); - if( cycles ) { - m -= (cycles * (Time64_T) days_in_gregorian_cycle); - year += (cycles * years_in_gregorian_cycle); - } - - /* Years */ - leap = IS_LEAP (year); - while (m >= (Time64_T) length_of_year[leap]) { - m -= (Time64_T) length_of_year[leap]; - year++; - leap = IS_LEAP (year); - } - - /* Months */ - v_tm_mon = 0; - while (m >= (Time64_T) days_in_month[leap][v_tm_mon]) { - m -= (Time64_T) days_in_month[leap][v_tm_mon]; - v_tm_mon++; - } - } else { - year--; - - /* Gregorian cycles */ - cycles = (int)((m / (Time64_T) days_in_gregorian_cycle) + 1); - if( cycles ) { - m -= (cycles * (Time64_T) days_in_gregorian_cycle); - year += (cycles * years_in_gregorian_cycle); - } - - /* Years */ - leap = IS_LEAP (year); - while (m < (Time64_T) -length_of_year[leap]) { - m += (Time64_T) length_of_year[leap]; - year--; - leap = IS_LEAP (year); - } - - /* Months */ - v_tm_mon = 11; - while (m < (Time64_T) -days_in_month[leap][v_tm_mon]) { - m += (Time64_T) days_in_month[leap][v_tm_mon]; - v_tm_mon--; - } - m += (Time64_T) days_in_month[leap][v_tm_mon]; - } - - p->tm_year = (int)year; - if( p->tm_year != year ) { -#ifdef EOVERFLOW - errno = EOVERFLOW; -#endif - return NULL; - } - - /* At this point m is less than a year so casting to an int is safe */ - p->tm_mday = (int) m + 1; - p->tm_yday = julian_days_by_month[leap][v_tm_mon] + (int)m; - p->tm_sec = v_tm_sec; - p->tm_min = v_tm_min; - p->tm_hour = v_tm_hour; - p->tm_mon = v_tm_mon; - p->tm_wday = v_tm_wday; - - assert(check_tm(p)); - - return p; -} - - -struct TM *cbson_localtime64_r (const Time64_T *time, struct TM *local_tm) -{ - time_t safe_time; - struct tm safe_date; - struct TM gm_tm; - Year orig_year; - int month_diff; - - assert(local_tm != NULL); - -#ifdef USE_SYSTEM_LOCALTIME - /* Use the system localtime() if time_t is small enough */ - if( SHOULD_USE_SYSTEM_LOCALTIME(*time) ) { - safe_time = (time_t)*time; - - TIME64_TRACE1("Using system localtime for %lld\n", *time); - - LOCALTIME_R(&safe_time, &safe_date); - - pymongo_copy_tm_to_TM64(&safe_date, local_tm); - assert(check_tm(local_tm)); - - return local_tm; - } -#endif - - if( cbson_gmtime64_r(time, &gm_tm) == NULL ) { - TIME64_TRACE1("cbson_gmtime64_r returned null for %lld\n", *time); - return NULL; - } - - orig_year = gm_tm.tm_year; - - if (gm_tm.tm_year > (2037 - 1900) || - gm_tm.tm_year < (1970 - 1900) - ) - { - TIME64_TRACE1("Mapping tm_year %lld to safe_year\n", (Year)gm_tm.tm_year); - gm_tm.tm_year = safe_year((Year)(gm_tm.tm_year + 1900)) - 1900; - } - - safe_time = (time_t)cbson_timegm64(&gm_tm); - if( LOCALTIME_R(&safe_time, &safe_date) == NULL ) { - TIME64_TRACE1("localtime_r(%d) returned NULL\n", (int)safe_time); - return NULL; - } - - pymongo_copy_tm_to_TM64(&safe_date, local_tm); - - local_tm->tm_year = (int)orig_year; - if( local_tm->tm_year != orig_year ) { - TIME64_TRACE2("tm_year overflow: tm_year %lld, orig_year %lld\n", - (Year)local_tm->tm_year, (Year)orig_year); - -#ifdef EOVERFLOW - errno = EOVERFLOW; -#endif - return NULL; - } - - - month_diff = local_tm->tm_mon - gm_tm.tm_mon; - - /* When localtime is Dec 31st previous year and - gmtime is Jan 1st next year. - */ - if( month_diff == 11 ) { - local_tm->tm_year--; - } - - /* When localtime is Jan 1st, next year and - gmtime is Dec 31st, previous year. - */ - if( month_diff == -11 ) { - local_tm->tm_year++; - } - - /* GMT is Jan 1st, xx01 year, but localtime is still Dec 31st - in a non-leap xx00. There is one point in the cycle - we can't account for which the safe xx00 year is a leap - year. So we need to correct for Dec 31st coming out as - the 366th day of the year. - */ - if( !IS_LEAP(local_tm->tm_year) && local_tm->tm_yday == 365 ) - local_tm->tm_yday--; - - assert(check_tm(local_tm)); - - return local_tm; -} - - -int cbson_valid_tm_wday( const struct TM* date ) { - if( 0 <= date->tm_wday && date->tm_wday <= 6 ) - return 1; - else - return 0; -} - -int cbson_valid_tm_mon( const struct TM* date ) { - if( 0 <= date->tm_mon && date->tm_mon <= 11 ) - return 1; - else - return 0; -} - - -/* Non-thread safe versions of the above */ -struct TM *cbson_localtime64(const Time64_T *time) { -#ifdef _MSC_VER - _tzset(); -#else - tzset(); -#endif - return cbson_localtime64_r(time, &Static_Return_Date); -} - -struct TM *cbson_gmtime64(const Time64_T *time) { - return cbson_gmtime64_r(time, &Static_Return_Date); -} diff --git a/spaces/jone/Music_Source_Separation/scripts/4_train/vctk-musdb18/train.sh b/spaces/jone/Music_Source_Separation/scripts/4_train/vctk-musdb18/train.sh deleted file mode 100644 index e64648c63f465981aa5fdea48a983ba78fe22259..0000000000000000000000000000000000000000 --- a/spaces/jone/Music_Source_Separation/scripts/4_train/vctk-musdb18/train.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -WORKSPACE=${1:-"./workspaces/bytesep"} # The first argument is workspace directory. - -echo "WORKSPACE=${WORKSPACE}" - -# Users can modify the following config file. -TRAIN_CONFIG_YAML="scripts/4_train/vctk-musdb18/configs/speech-accompaniment,unet.yaml" - -# Train & evaluate & save checkpoints. -CUDA_VISIBLE_DEVICES=0 python3 bytesep/train.py train \ - --workspace=$WORKSPACE \ - --gpus=1 \ - --config_yaml=$TRAIN_CONFIG_YAML \ No newline at end of file diff --git a/spaces/jpfearnworks/ai_agents/modules/vector_stores/embedding/__init__.py b/spaces/jpfearnworks/ai_agents/modules/vector_stores/embedding/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/jspr/tweet-ab/app.py b/spaces/jspr/tweet-ab/app.py deleted file mode 100644 index 5938d3f33ba20608369f21ddb429de63b49dac0a..0000000000000000000000000000000000000000 --- a/spaces/jspr/tweet-ab/app.py +++ /dev/null @@ -1,27 +0,0 @@ -import gradio as gr -import autokeras as ak -import numpy as np -from tensorflow.keras.models import load_model - -loaded_model = load_model("text_model", custom_objects=ak.CUSTOM_OBJECTS) - -def tweet_tester(tweet1, tweet2): - pred1 = loaded_model.predict(np.array([[tweet1]]))[0][0] - pred2 = loaded_model.predict(np.array([[tweet2]]))[0][0] - print(pred1, pred2) - diff_pct = (pred1 - pred2) / pred1 * 100 - # truncate diff_pct to 2 decimal places - diff_pct = round(diff_pct, 3) - if diff_pct > 0: - return f"tweet2 is {diff_pct}% better than tweet1" - else: - return f"tweet2 is {abs(diff_pct)}% worse than tweet1" - -interface = gr.Interface( - title="Tweet A/B Test", - description="Enter the text of two tweets you'd like to A/B test. The output number represents the percent difference in expected likes between the two tweets.", - fn=tweet_tester, - inputs=["text", "text"], - outputs=["text"] -) -interface.launch() diff --git a/spaces/kaicheng/ChatGPT_ad/ChuanhuChatbot.py b/spaces/kaicheng/ChatGPT_ad/ChuanhuChatbot.py deleted file mode 100644 index 890e5c7ec70f26a0452ded3e33cd56f488819932..0000000000000000000000000000000000000000 --- a/spaces/kaicheng/ChatGPT_ad/ChuanhuChatbot.py +++ /dev/null @@ -1,473 +0,0 @@ -# -*- coding:utf-8 -*- -import os -import logging -import sys - -import gradio as gr - -from modules import config -from modules.config import * -from modules.utils import * -from modules.presets import * -from modules.overwrites import * -from modules.models.models import get_model - -logging.getLogger("httpx").setLevel(logging.WARNING) - -gr.Chatbot._postprocess_chat_messages = postprocess_chat_messages -gr.Chatbot.postprocess = postprocess - -with open("assets/custom.css", "r", encoding="utf-8") as f: - customCSS = f.read() - -def create_new_model(): - return get_model(model_name = MODELS[DEFAULT_MODEL], access_key = my_api_key)[0] - -with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo: - user_name = gr.State("") - promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2)) - user_question = gr.State("") - assert type(my_api_key)==str - user_api_key = gr.State(my_api_key) - current_model = gr.State(create_new_model) - - topic = gr.State(i18n("未命名对话历史记录")) - - with gr.Row(): - gr.HTML(CHUANHU_TITLE, elem_id="app_title") - status_display = gr.Markdown(get_geoip(), elem_id="status_display") - with gr.Row(elem_id="float_display"): - user_info = gr.Markdown(value="getting user info...", elem_id="user_info") - - with gr.Row().style(equal_height=True): - with gr.Column(scale=5): - with gr.Row(): - chatbot = gr.Chatbot(label="Chuanhu Chat", elem_id="chuanhu_chatbot").style(height="100%") - with gr.Row(): - with gr.Column(min_width=225, scale=12): - user_input = gr.Textbox( - elem_id="user_input_tb", - show_label=False, placeholder=i18n("在这里输入") - ).style(container=False) - with gr.Column(min_width=42, scale=1): - submitBtn = gr.Button(value="", variant="primary", elem_id="submit_btn") - cancelBtn = gr.Button(value="", variant="secondary", visible=False, elem_id="cancel_btn") - with gr.Row(): - emptyBtn = gr.Button( - i18n("🧹 新的对话"), elem_id="empty_btn" - ) - retryBtn = gr.Button(i18n("🔄 重新生成")) - delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话")) - delLastBtn = gr.Button(i18n("🗑️ 删除最新对话")) - with gr.Row(visible=False) as like_dislike_area: - with gr.Column(min_width=20, scale=1): - likeBtn = gr.Button(i18n("👍")) - with gr.Column(min_width=20, scale=1): - dislikeBtn = gr.Button(i18n("👎")) - - with gr.Column(): - with gr.Column(min_width=50, scale=1): - with gr.Tab(label=i18n("模型")): - keyTxt = gr.Textbox( - show_label=True, - placeholder=f"Your API-key...", - value=hide_middle_chars(user_api_key.value), - type="password", - visible=not HIDE_MY_KEY, - label="API-Key", - ) - if multi_api_key: - usageTxt = gr.Markdown(i18n("多账号模式已开启,无需输入key,可直接开始对话"), elem_id="usage_display", elem_classes="insert_block") - else: - usageTxt = gr.Markdown(i18n("**发送消息** 或 **提交key** 以显示额度"), elem_id="usage_display", elem_classes="insert_block") - model_select_dropdown = gr.Dropdown( - label=i18n("选择模型"), choices=MODELS, multiselect=False, value=MODELS[DEFAULT_MODEL], interactive=True - ) - lora_select_dropdown = gr.Dropdown( - label=i18n("选择LoRA模型"), choices=[], multiselect=False, interactive=True, visible=False - ) - with gr.Row(): - single_turn_checkbox = gr.Checkbox(label=i18n("单轮对话"), value=False) - use_websearch_checkbox = gr.Checkbox(label=i18n("使用在线搜索"), value=False) - language_select_dropdown = gr.Dropdown( - label=i18n("选择回复语言(针对搜索&索引功能)"), - choices=REPLY_LANGUAGES, - multiselect=False, - value=REPLY_LANGUAGES[0], - ) - index_files = gr.Files(label=i18n("上传"), type="file") - two_column = gr.Checkbox(label=i18n("双栏pdf"), value=advance_docs["pdf"].get("two_column", False)) - summarize_btn = gr.Button(i18n("总结")) - # TODO: 公式ocr - # formula_ocr = gr.Checkbox(label=i18n("识别公式"), value=advance_docs["pdf"].get("formula_ocr", False)) - - with gr.Tab(label="Prompt"): - systemPromptTxt = gr.Textbox( - show_label=True, - placeholder=i18n("在这里输入System Prompt..."), - label="System prompt", - value=INITIAL_SYSTEM_PROMPT, - lines=10, - ).style(container=False) - with gr.Accordion(label=i18n("加载Prompt模板"), open=True): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - templateFileSelectDropdown = gr.Dropdown( - label=i18n("选择Prompt模板集合文件"), - choices=get_template_names(plain=True), - multiselect=False, - value=get_template_names(plain=True)[0], - ).style(container=False) - with gr.Column(scale=1): - templateRefreshBtn = gr.Button(i18n("🔄 刷新")) - with gr.Row(): - with gr.Column(): - templateSelectDropdown = gr.Dropdown( - label=i18n("从Prompt模板中加载"), - choices=load_template( - get_template_names(plain=True)[0], mode=1 - ), - multiselect=False, - ).style(container=False) - - with gr.Tab(label=i18n("保存/加载")): - with gr.Accordion(label=i18n("保存/加载对话历史记录"), open=True): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - historyFileSelectDropdown = gr.Dropdown( - label=i18n("从列表中加载对话"), - choices=get_history_names(plain=True), - multiselect=False - ) - with gr.Column(scale=1): - historyRefreshBtn = gr.Button(i18n("🔄 刷新")) - with gr.Row(): - with gr.Column(scale=6): - saveFileName = gr.Textbox( - show_label=True, - placeholder=i18n("设置文件名: 默认为.json,可选为.md"), - label=i18n("设置保存文件名"), - value=i18n("对话历史记录"), - ).style(container=True) - with gr.Column(scale=1): - saveHistoryBtn = gr.Button(i18n("💾 保存对话")) - exportMarkdownBtn = gr.Button(i18n("📝 导出为Markdown")) - gr.Markdown(i18n("默认保存于history文件夹")) - with gr.Row(): - with gr.Column(): - downloadFile = gr.File(interactive=True) - - with gr.Tab(label=i18n("高级")): - gr.Markdown(i18n("# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置")) - gr.HTML(get_html("appearance_switcher.html").format(label=i18n("切换亮暗色主题")), elem_classes="insert_block") - use_streaming_checkbox = gr.Checkbox( - label=i18n("实时传输回答"), value=True, visible=ENABLE_STREAMING_OPTION - ) - with gr.Accordion(i18n("参数"), open=False): - temperature_slider = gr.Slider( - minimum=-0, - maximum=2.0, - value=1.0, - step=0.1, - interactive=True, - label="temperature", - ) - top_p_slider = gr.Slider( - minimum=-0, - maximum=1.0, - value=1.0, - step=0.05, - interactive=True, - label="top-p", - ) - n_choices_slider = gr.Slider( - minimum=1, - maximum=10, - value=1, - step=1, - interactive=True, - label="n choices", - ) - stop_sequence_txt = gr.Textbox( - show_label=True, - placeholder=i18n("在这里输入停止符,用英文逗号隔开..."), - label="stop", - value="", - lines=1, - ) - max_context_length_slider = gr.Slider( - minimum=1, - maximum=32768, - value=2000, - step=1, - interactive=True, - label="max context", - ) - max_generation_slider = gr.Slider( - minimum=1, - maximum=32768, - value=1000, - step=1, - interactive=True, - label="max generations", - ) - presence_penalty_slider = gr.Slider( - minimum=-2.0, - maximum=2.0, - value=0.0, - step=0.01, - interactive=True, - label="presence penalty", - ) - frequency_penalty_slider = gr.Slider( - minimum=-2.0, - maximum=2.0, - value=0.0, - step=0.01, - interactive=True, - label="frequency penalty", - ) - logit_bias_txt = gr.Textbox( - show_label=True, - placeholder=f"word:likelihood", - label="logit bias", - value="", - lines=1, - ) - user_identifier_txt = gr.Textbox( - show_label=True, - placeholder=i18n("用于定位滥用行为"), - label=i18n("用户名"), - value=user_name.value, - lines=1, - ) - - with gr.Accordion(i18n("网络设置"), open=False, visible=False): - # 优先展示自定义的api_host - apihostTxt = gr.Textbox( - show_label=True, - placeholder=i18n("在这里输入API-Host..."), - label="API-Host", - value=config.api_host or shared.API_HOST, - lines=1, - ) - changeAPIURLBtn = gr.Button(i18n("🔄 切换API地址")) - proxyTxt = gr.Textbox( - show_label=True, - placeholder=i18n("在这里输入代理地址..."), - label=i18n("代理地址(示例:http://127.0.0.1:10809)"), - value="", - lines=2, - ) - changeProxyBtn = gr.Button(i18n("🔄 设置代理地址")) - default_btn = gr.Button(i18n("🔙 恢复默认设置")) - - gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description") - gr.HTML(get_html("footer.html").format(versions=versions_html()), elem_id="footer") - - # https://github.com/gradio-app/gradio/pull/3296 - def create_greeting(request: gr.Request): - if hasattr(request, "username") and request.username: # is not None or is not "" - logging.info(f"Get User Name: {request.username}") - user_info, user_name = gr.Markdown.update(value=f"User: {request.username}"), request.username - else: - user_info, user_name = gr.Markdown.update(value=f"", visible=False), "" - current_model = get_model(model_name = MODELS[DEFAULT_MODEL], access_key = my_api_key)[0] - current_model.set_user_identifier(user_name) - chatbot = gr.Chatbot.update(label=MODELS[DEFAULT_MODEL]) - return user_info, user_name, current_model, toggle_like_btn_visibility(DEFAULT_MODEL), *current_model.auto_load(), get_history_names(False, user_name), chatbot - demo.load(create_greeting, inputs=None, outputs=[user_info, user_name, current_model, like_dislike_area, systemPromptTxt, chatbot, historyFileSelectDropdown, chatbot], api_name="load") - chatgpt_predict_args = dict( - fn=predict, - inputs=[ - current_model, - user_question, - chatbot, - use_streaming_checkbox, - use_websearch_checkbox, - index_files, - language_select_dropdown, - ], - outputs=[chatbot, status_display], - show_progress=True, - ) - - start_outputing_args = dict( - fn=start_outputing, - inputs=[], - outputs=[submitBtn, cancelBtn], - show_progress=True, - ) - - end_outputing_args = dict( - fn=end_outputing, inputs=[], outputs=[submitBtn, cancelBtn] - ) - - reset_textbox_args = dict( - fn=reset_textbox, inputs=[], outputs=[user_input] - ) - - transfer_input_args = dict( - fn=transfer_input, inputs=[user_input], outputs=[user_question, user_input, submitBtn, cancelBtn], show_progress=True - ) - - get_usage_args = dict( - fn=billing_info, inputs=[current_model], outputs=[usageTxt], show_progress=False - ) - - load_history_from_file_args = dict( - fn=load_chat_history, - inputs=[current_model, historyFileSelectDropdown, user_name], - outputs=[saveFileName, systemPromptTxt, chatbot] - ) - - - # Chatbot - cancelBtn.click(interrupt, [current_model], []) - - user_input.submit(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args) - user_input.submit(**get_usage_args) - - submitBtn.click(**transfer_input_args).then(**chatgpt_predict_args, api_name="predict").then(**end_outputing_args) - submitBtn.click(**get_usage_args) - - index_files.change(handle_file_upload, [current_model, index_files, chatbot, language_select_dropdown], [index_files, chatbot, status_display]) - summarize_btn.click(handle_summarize_index, [current_model, index_files, chatbot, language_select_dropdown], [chatbot, status_display]) - - emptyBtn.click( - reset, - inputs=[current_model], - outputs=[chatbot, status_display], - show_progress=True, - ) - - retryBtn.click(**start_outputing_args).then( - retry, - [ - current_model, - chatbot, - use_streaming_checkbox, - use_websearch_checkbox, - index_files, - language_select_dropdown, - ], - [chatbot, status_display], - show_progress=True, - ).then(**end_outputing_args) - retryBtn.click(**get_usage_args) - - delFirstBtn.click( - delete_first_conversation, - [current_model], - [status_display], - ) - - delLastBtn.click( - delete_last_conversation, - [current_model, chatbot], - [chatbot, status_display], - show_progress=False - ) - - likeBtn.click( - like, - [current_model], - [status_display], - show_progress=False - ) - - dislikeBtn.click( - dislike, - [current_model], - [status_display], - show_progress=False - ) - - two_column.change(update_doc_config, [two_column], None) - - # LLM Models - keyTxt.change(set_key, [current_model, keyTxt], [user_api_key, status_display], api_name="set_key").then(**get_usage_args) - keyTxt.submit(**get_usage_args) - single_turn_checkbox.change(set_single_turn, [current_model, single_turn_checkbox], None) - model_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot, lora_select_dropdown], show_progress=True, api_name="get_model") - model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [like_dislike_area], show_progress=False) - lora_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot], show_progress=True) - - # Template - systemPromptTxt.change(set_system_prompt, [current_model, systemPromptTxt], None) - templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown]) - templateFileSelectDropdown.change( - load_template, - [templateFileSelectDropdown], - [promptTemplates, templateSelectDropdown], - show_progress=True, - ) - templateSelectDropdown.change( - get_template_content, - [promptTemplates, templateSelectDropdown, systemPromptTxt], - [systemPromptTxt], - show_progress=True, - ) - - # S&L - saveHistoryBtn.click( - save_chat_history, - [current_model, saveFileName, chatbot, user_name], - downloadFile, - show_progress=True, - ) - saveHistoryBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown]) - exportMarkdownBtn.click( - export_markdown, - [current_model, saveFileName, chatbot, user_name], - downloadFile, - show_progress=True, - ) - historyRefreshBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown]) - historyFileSelectDropdown.change(**load_history_from_file_args) - downloadFile.change(upload_chat_history, [current_model, downloadFile, user_name], [saveFileName, systemPromptTxt, chatbot]) - - # Advanced - max_context_length_slider.change(set_token_upper_limit, [current_model, max_context_length_slider], None) - temperature_slider.change(set_temperature, [current_model, temperature_slider], None) - top_p_slider.change(set_top_p, [current_model, top_p_slider], None) - n_choices_slider.change(set_n_choices, [current_model, n_choices_slider], None) - stop_sequence_txt.change(set_stop_sequence, [current_model, stop_sequence_txt], None) - max_generation_slider.change(set_max_tokens, [current_model, max_generation_slider], None) - presence_penalty_slider.change(set_presence_penalty, [current_model, presence_penalty_slider], None) - frequency_penalty_slider.change(set_frequency_penalty, [current_model, frequency_penalty_slider], None) - logit_bias_txt.change(set_logit_bias, [current_model, logit_bias_txt], None) - user_identifier_txt.change(set_user_identifier, [current_model, user_identifier_txt], None) - - default_btn.click( - reset_default, [], [apihostTxt, proxyTxt, status_display], show_progress=True - ) - changeAPIURLBtn.click( - change_api_host, - [apihostTxt], - [status_display], - show_progress=True, - ) - changeProxyBtn.click( - change_proxy, - [proxyTxt], - [status_display], - show_progress=True, - ) - -logging.info( - colorama.Back.GREEN - + "\n川虎的温馨提示:访问 http://localhost:7860 查看界面" - + colorama.Style.RESET_ALL -) -# 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接 -demo.title = i18n("川虎Chat 🚀") - -if __name__ == "__main__": - reload_javascript() - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - blocked_paths=["config.json"], - favicon_path="./assets/favicon.ico" - ) diff --git a/spaces/kangvcar/RealChar/entrypoint.sh b/spaces/kangvcar/RealChar/entrypoint.sh deleted file mode 100644 index debf994902e1ebfa9c030107a29eceafceb74b44..0000000000000000000000000000000000000000 --- a/spaces/kangvcar/RealChar/entrypoint.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh -set -e -alembic upgrade head -uvicorn realtime_ai_character.main:app --host 0.0.0.0 --port 7860 diff --git a/spaces/keneonyeachonam/AutoML_UsingStreamlit_Plotly_020923/Prompts.md b/spaces/keneonyeachonam/AutoML_UsingStreamlit_Plotly_020923/Prompts.md deleted file mode 100644 index ec0817592c1a131230048d4f6e9994f6d2c9863d..0000000000000000000000000000000000000000 --- a/spaces/keneonyeachonam/AutoML_UsingStreamlit_Plotly_020923/Prompts.md +++ /dev/null @@ -1,115 +0,0 @@ - - -write an automl streamlit python tensorflow keras application with mechanics that takes a CSV dataset, performs pandas profiling and generates the code to do classification -Here is an example of an Automated Machine Learning (AutoML) Streamlit application that takes a CSV dataset, performs pandas profiling, and generates the code to do classification using TensorFlow and Keras: - -python -Copy code -import streamlit as st -import pandas as pd -import pandas_profiling -from tensorflow.keras.models import Sequential -from tensorflow.keras.layers import Dense -from sklearn.model_selection import train_test_split - -st.set_page_config(page_title="AutoML Streamlit App", page_icon=":robot:", layout="wide") - -st.title("AutoML Streamlit App") - -# Upload a CSV dataset -uploaded_file = st.file_uploader("Upload your dataset", type=["csv"]) -if uploaded_file is not None: - # Load the dataset and display the first 5 rows - df = pd.read_csv(uploaded_file) - st.dataframe(df.head()) - - # Perform pandas profiling and display the report - profile = pandas_profiling.ProfileReport(df) - st.write("Pandas Profiling Report:", profile) - - # Split the data into features and target variables - X = df.drop(df.columns[-1], axis=1) - y = df[df.columns[-1]] - - # Split the data into training and testing sets - X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) - - # Define a sequential model with TensorFlow and Keras - model = Sequential() - model.add(Dense(16, input_dim=X_train.shape[1], activation="relu")) - model.add(Dense(1, activation="sigmoid")) - - # Compile the model - model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) - - # Train the model - model.fit(X_train, y_train, epochs=10, batch_size=32) - - # Evaluate the model on the test set - test_loss, test_accuracy = model.evaluate(X_test, y_test) - st.write("Test Accuracy:", test_accuracy) - - # Generate the code to do classification - st.write("Generated Code:") - st.code(""" -import pandas as pd -import tensorflow as tf -from tensorflow.keras.models import Sequential -from tensorflow.keras.layers import Dense - -# Load the dataset -df = pd.read_csv("dataset.csv") - -# Split the data into features and target variables -X = df.drop(df.columns[-1], axis=1) -y = df[df.columns[-1]] - -# Define a sequential model with TensorFlow and Keras -model = Sequential() -model.add(Dense(16, input_dim=X.shape[1], activation="relu")) -model.add(Dense(1, activation="sigmoid")) - -# Compile the model -model.compile(loss="binary_crossentropy", optimizer=" - - - -Aaron Wacker -continue writing the program above but simplify it and automatically generate a plotly visualization using treemap and sunburst plot from the dataset dynamically by interpreting the data types inside the CSV file -Here is a simplified version of the AutoML Streamlit application that automatically generates a Plotly visualization using treemap and sunburst plots from the CSV dataset dynamically by interpreting the data types inside the file: - -python -Copy code -import streamlit as st -import pandas as pd -import plotly.express as px - -st.set_page_config(page_title="AutoML Streamlit App", page_icon=":robot:", layout="wide") - -st.title("AutoML Streamlit App") - -# Upload a CSV dataset -uploaded_file = st.file_uploader("Upload your dataset", type=["csv"]) -if uploaded_file is not None: - # Load the dataset and display the first 5 rows - df = pd.read_csv(uploaded_file) - st.dataframe(df.head()) - - # Generate a treemap or sunburst plot based on data types - numerical_cols = df.select_dtypes(include=["float", "int"]).columns - categorical_cols = df.select_dtypes(include=["object"]).columns - - if len(numerical_cols) >= 2: - fig = px.scatter_matrix(df, dimensions=numerical_cols) - st.plotly_chart(fig) - elif len(categorical_cols) >= 2: - fig = px.treemap(df, path=categorical_cols) - st.plotly_chart(fig) - else: - fig = px.sunburst(df, path=categorical_cols + numerical_cols) - st.plotly_chart(fig) -In this simplified version, we use Plotly Express to generate a scatter matrix plot for numerical data, a treemap plot for categorical data, and a sunburst plot for both numerical and categorical data. The type of plot generated is based on the number of numerical and categorical columns present in the dataset. - - - -Regenerate response diff --git a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/models/arcface_torch/utils/plot.py b/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/models/arcface_torch/utils/plot.py deleted file mode 100644 index ccc588e5c01ca550b69c385aeb3fd139c59fb88a..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/models/arcface_torch/utils/plot.py +++ /dev/null @@ -1,72 +0,0 @@ -# coding: utf-8 - -import os -from pathlib import Path - -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap -from prettytable import PrettyTable -from sklearn.metrics import roc_curve, auc - -image_path = "/data/anxiang/IJB_release/IJBC" -files = [ - "./ms1mv3_arcface_r100/ms1mv3_arcface_r100/ijbc.npy" -] - - -def read_template_pair_list(path): - pairs = pd.read_csv(path, sep=' ', header=None).values - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) - return t1, t2, label - - -p1, p2, label = read_template_pair_list( - os.path.join('%s/meta' % image_path, - '%s_template_pair_label.txt' % 'ijbc')) - -methods = [] -scores = [] -for file in files: - methods.append(file.split('/')[-2]) - scores.append(np.load(file)) - -methods = np.array(methods) -scores = dict(zip(methods, scores)) -colours = dict( - zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2'))) -x_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1] -tpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels]) -fig = plt.figure() -for method in methods: - fpr, tpr, _ = roc_curve(label, scores[method]) - roc_auc = auc(fpr, tpr) - fpr = np.flipud(fpr) - tpr = np.flipud(tpr) # select largest tpr at same fpr - plt.plot(fpr, - tpr, - color=colours[method], - lw=1, - label=('[%s (AUC = %0.4f %%)]' % - (method.split('-')[-1], roc_auc * 100))) - tpr_fpr_row = [] - tpr_fpr_row.append("%s-%s" % (method, "IJBC")) - for fpr_iter in np.arange(len(x_labels)): - _, min_index = min( - list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr))))) - tpr_fpr_row.append('%.2f' % (tpr[min_index] * 100)) - tpr_fpr_table.add_row(tpr_fpr_row) -plt.xlim([10 ** -6, 0.1]) -plt.ylim([0.3, 1.0]) -plt.grid(linestyle='--', linewidth=1) -plt.xticks(x_labels) -plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True)) -plt.xscale('log') -plt.xlabel('False Positive Rate') -plt.ylabel('True Positive Rate') -plt.title('ROC on IJB') -plt.legend(loc="lower right") -print(tpr_fpr_table) diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/roberta/README.custom_classification.md b/spaces/koajoel/PolyFormer/fairseq/examples/roberta/README.custom_classification.md deleted file mode 100644 index 7254bb7d178760ef5b847901bbcac3711af33ca2..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/roberta/README.custom_classification.md +++ /dev/null @@ -1,168 +0,0 @@ -# Finetuning RoBERTa on a custom classification task - -This example shows how to finetune RoBERTa on the IMDB dataset, but should illustrate the process for most classification tasks. - -### 1) Get the data - -```bash -wget http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz -tar zxvf aclImdb_v1.tar.gz -``` - - -### 2) Format data - -`IMDB` data has one data-sample in each file, below python code-snippet converts it one file for train and valid each for ease of processing. -```python -import argparse -import os -import random -from glob import glob - -random.seed(0) - -def main(args): - for split in ['train', 'test']: - samples = [] - for class_label in ['pos', 'neg']: - fnames = glob(os.path.join(args.datadir, split, class_label) + '/*.txt') - for fname in fnames: - with open(fname) as fin: - line = fin.readline() - samples.append((line, 1 if class_label == 'pos' else 0)) - random.shuffle(samples) - out_fname = 'train' if split == 'train' else 'dev' - f1 = open(os.path.join(args.datadir, out_fname + '.input0'), 'w') - f2 = open(os.path.join(args.datadir, out_fname + '.label'), 'w') - for sample in samples: - f1.write(sample[0] + '\n') - f2.write(str(sample[1]) + '\n') - f1.close() - f2.close() - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--datadir', default='aclImdb') - args = parser.parse_args() - main(args) -``` - - -### 3) BPE encode - -Run `multiprocessing_bpe_encoder`, you can also do this in previous step for each sample but that might be slower. -```bash -# Download encoder.json and vocab.bpe -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json' -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe' - -for SPLIT in train dev; do - python -m examples.roberta.multiprocessing_bpe_encoder \ - --encoder-json encoder.json \ - --vocab-bpe vocab.bpe \ - --inputs "aclImdb/$SPLIT.input0" \ - --outputs "aclImdb/$SPLIT.input0.bpe" \ - --workers 60 \ - --keep-empty -done -``` - - -### 4) Preprocess data - -```bash -# Download fairseq dictionary. -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt' - -fairseq-preprocess \ - --only-source \ - --trainpref "aclImdb/train.input0.bpe" \ - --validpref "aclImdb/dev.input0.bpe" \ - --destdir "IMDB-bin/input0" \ - --workers 60 \ - --srcdict dict.txt - -fairseq-preprocess \ - --only-source \ - --trainpref "aclImdb/train.label" \ - --validpref "aclImdb/dev.label" \ - --destdir "IMDB-bin/label" \ - --workers 60 - -``` - - -### 5) Run training - -```bash -TOTAL_NUM_UPDATES=7812 # 10 epochs through IMDB for bsz 32 -WARMUP_UPDATES=469 # 6 percent of the number of updates -LR=1e-05 # Peak LR for polynomial LR scheduler. -HEAD_NAME=imdb_head # Custom name for the classification head. -NUM_CLASSES=2 # Number of classes for the classification task. -MAX_SENTENCES=8 # Batch size. -ROBERTA_PATH=/path/to/roberta.large/model.pt - -CUDA_VISIBLE_DEVICES=0 fairseq-train IMDB-bin/ \ - --restore-file $ROBERTA_PATH \ - --max-positions 512 \ - --batch-size $MAX_SENTENCES \ - --max-tokens 4400 \ - --task sentence_prediction \ - --reset-optimizer --reset-dataloader --reset-meters \ - --required-batch-size-multiple 1 \ - --init-token 0 --separator-token 2 \ - --arch roberta_large \ - --criterion sentence_prediction \ - --classification-head-name $HEAD_NAME \ - --num-classes $NUM_CLASSES \ - --dropout 0.1 --attention-dropout 0.1 \ - --weight-decay 0.1 --optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-06 \ - --clip-norm 0.0 \ - --lr-scheduler polynomial_decay --lr $LR --total-num-update $TOTAL_NUM_UPDATES --warmup-updates $WARMUP_UPDATES \ - --fp16 --fp16-init-scale 4 --threshold-loss-scale 1 --fp16-scale-window 128 \ - --max-epoch 10 \ - --best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \ - --shorten-method "truncate" \ - --find-unused-parameters \ - --update-freq 4 -``` - -The above command will finetune RoBERTa-large with an effective batch-size of 32 -sentences (`--batch-size=8 --update-freq=4`). The expected -`best-validation-accuracy` after 10 epochs is ~96.5%. - -If you run out of GPU memory, try decreasing `--batch-size` and increase -`--update-freq` to compensate. - - -### 6) Load model using hub interface - -Now we can load the trained model checkpoint using the RoBERTa hub interface. - -Assuming your checkpoints are stored in `checkpoints/`: -```python -from fairseq.models.roberta import RobertaModel -roberta = RobertaModel.from_pretrained( - 'checkpoints', - checkpoint_file='checkpoint_best.pt', - data_name_or_path='IMDB-bin' -) -roberta.eval() # disable dropout -``` - -Finally you can make predictions using the `imdb_head` (or whatever you set -`--classification-head-name` to during training): -```python -label_fn = lambda label: roberta.task.label_dictionary.string( - [label + roberta.task.label_dictionary.nspecial] -) - -tokens = roberta.encode('Best movie this year') -pred = label_fn(roberta.predict('imdb_head', tokens).argmax().item()) -assert pred == '1' # positive - -tokens = roberta.encode('Worst movie ever') -pred = label_fn(roberta.predict('imdb_head', tokens).argmax().item()) -assert pred == '0' # negative -``` diff --git a/spaces/kukr3207/forex_demo/README.md b/spaces/kukr3207/forex_demo/README.md deleted file mode 100644 index f7cac6330ed94ba356bfcbb17cb493df7b686a9b..0000000000000000000000000000000000000000 --- a/spaces/kukr3207/forex_demo/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Forex Demo -emoji: 💻 -colorFrom: purple -colorTo: gray -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kukuhtw/AutoGPT/autogpt/__init__.py b/spaces/kukuhtw/AutoGPT/autogpt/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/lanbogao/ytdlp-whisper/README.md b/spaces/lanbogao/ytdlp-whisper/README.md deleted file mode 100644 index b07fb376f24feb989fc9a729981ca203051701c2..0000000000000000000000000000000000000000 --- a/spaces/lanbogao/ytdlp-whisper/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Ytdlp Whisper -emoji: 🐢 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false ---- diff --git a/spaces/latent-consistency/Real-Time-LCM-ControlNet-Lora-SD1.5/Dockerfile b/spaces/latent-consistency/Real-Time-LCM-ControlNet-Lora-SD1.5/Dockerfile deleted file mode 100644 index 93fef62cc6b50f3b9989f79327cc32cbf88e5b9f..0000000000000000000000000000000000000000 --- a/spaces/latent-consistency/Real-Time-LCM-ControlNet-Lora-SD1.5/Dockerfile +++ /dev/null @@ -1,44 +0,0 @@ -FROM nvidia/cuda:12.1.1-cudnn8-devel-ubuntu22.04 - -ARG DEBIAN_FRONTEND=noninteractive - -ENV PYTHONUNBUFFERED=1 - -RUN apt-get update && apt-get install --no-install-recommends -y \ - build-essential \ - python3.9 \ - python3-pip \ - python3-dev \ - git \ - ffmpeg \ - google-perftools \ - && apt-get clean && rm -rf /var/lib/apt/lists/* - - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt - -# Set up a new user named "user" with user ID 1000 -RUN useradd -m -u 1000 user -# Switch to the "user" user -USER user -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH \ - PYTHONPATH=$HOME/app \ - PYTHONUNBUFFERED=1 \ - SYSTEM=spaces - -RUN pip3 install --no-cache-dir --upgrade -r /code/requirements.txt - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app - -ENV LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc.so.4 -# CMD ["uvicorn", "app-img2img:app", "--host", "0.0.0.0", "--port", "7860"] -# CMD ["uvicorn", "app-txt2img:app", "--host", "0.0.0.0", "--port", "7860"] -CMD ["uvicorn", "app-controlnetlora:app", "--host", "0.0.0.0", "--port", "7860"] \ No newline at end of file diff --git a/spaces/leave7/kazunaAI2.0/losses.py b/spaces/leave7/kazunaAI2.0/losses.py deleted file mode 100644 index 41f9be6980713a46824ae9ec5eb8fd7c515d89c5..0000000000000000000000000000000000000000 --- a/spaces/leave7/kazunaAI2.0/losses.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import functional as F - -import commons - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - #print(logs_p) - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/libhost/tech/postcss.config.js b/spaces/libhost/tech/postcss.config.js deleted file mode 100644 index 33ad091d26d8a9dc95ebdf616e217d985ec215b8..0000000000000000000000000000000000000000 --- a/spaces/libhost/tech/postcss.config.js +++ /dev/null @@ -1,6 +0,0 @@ -module.exports = { - plugins: { - tailwindcss: {}, - autoprefixer: {}, - }, -} diff --git a/spaces/library-samples/image-captioning-with-blip/style.css b/spaces/library-samples/image-captioning-with-blip/style.css deleted file mode 100644 index 859cfd5467349b9a0350f65164d9e0fb656e878f..0000000000000000000000000000000000000000 --- a/spaces/library-samples/image-captioning-with-blip/style.css +++ /dev/null @@ -1,16 +0,0 @@ -h1 { - text-align: center; -} - -#duplicate-button { - margin: auto; - color: #fff; - background: #1565c0; - border-radius: 100vh; -} - -.contain { - width: 730px; - margin: auto; - padding-top: 1.5rem; -} diff --git a/spaces/limobaidandan2515/ChatGPT4/app.py b/spaces/limobaidandan2515/ChatGPT4/app.py deleted file mode 100644 index 632f0ee79c2a44a19c299e5965101cad17293e69..0000000000000000000000000000000000000000 --- a/spaces/limobaidandan2515/ChatGPT4/app.py +++ /dev/null @@ -1,191 +0,0 @@ -import gradio as gr -import os -import json -import requests - -#Streaming endpoint -API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream" - -#Inferenec function -def predict(openai_gpt4_key, system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], history=[]): - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {openai_gpt4_key}" #Users will provide their own OPENAI_API_KEY - } - print(f"system message is ^^ {system_msg}") - if system_msg.strip() == '': - initial_message = [{"role": "user", "content": f"{inputs}"},] - multi_turn_message = [] - else: - initial_message= [{"role": "system", "content": system_msg}, - {"role": "user", "content": f"{inputs}"},] - multi_turn_message = [{"role": "system", "content": system_msg},] - - if chat_counter == 0 : - payload = { - "model": "gpt-4", - "messages": initial_message , - "temperature" : 1.0, - "top_p":1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0, - } - print(f"chat_counter - {chat_counter}") - else: #if chat_counter != 0 : - messages=multi_turn_message # Of the type of - [{"role": "system", "content": system_msg},] - for data in chatbot: - user = {} - user["role"] = "user" - user["content"] = data[0] - assistant = {} - assistant["role"] = "assistant" - assistant["content"] = data[1] - messages.append(user) - messages.append(assistant) - temp = {} - temp["role"] = "user" - temp["content"] = inputs - messages.append(temp) - #messages - payload = { - "model": "gpt-4", - "messages": messages, # Of the type of [{"role": "user", "content": f"{inputs}"}], - "temperature" : temperature, #1.0, - "top_p": top_p, #1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0,} - - chat_counter+=1 - - history.append(inputs) - print(f"Logging : payload is - {payload}") - # make a POST request to the API endpoint using the requests.post method, passing in stream=True - response = requests.post(API_URL, headers=headers, json=payload, stream=True) - print(f"Logging : response code - {response}") - token_counter = 0 - partial_words = "" - - counter=0 - for chunk in response.iter_lines(): - #Skipping first chunk - if counter == 0: - counter+=1 - continue - # check whether each line is non-empty - if chunk.decode() : - chunk = chunk.decode() - # decode each line as response data is in bytes - if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']: - partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"] - if token_counter == 0: - history.append(" " + partial_words) - else: - history[-1] = partial_words - chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list - token_counter+=1 - yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history} - -#Resetting to blank -def reset_textbox(): - return gr.update(value='') - -#to set a component as visible=False -def set_visible_false(): - return gr.update(visible=False) - -#to set a component as visible=True -def set_visible_true(): - return gr.update(visible=True) - -title = """

          🔥GPT4 using Chat-Completions API & 🚀Gradio-Streaming

          """ -#display message for themes feature -theme_addon_msg = """
          🌟 This Demo also introduces you to Gradio Themes. Discover more on Gradio website using our Themeing-Guide🎨! You can develop from scratch, modify an existing Gradio theme, and share your themes with community by uploading them to huggingface-hub easily using theme.push_to_hub().
          -""" - -#Using info to add additional information about System message in GPT4 -system_msg_info = """A conversation could begin with a system message to gently instruct the assistant. -System message helps set the behavior of the AI Assistant. For example, the assistant could be instructed with 'You are a helpful assistant.'""" - -#Modifying existing Gradio Theme -theme = gr.themes.Soft(primary_hue="zinc", secondary_hue="green", neutral_hue="green", - text_size=gr.themes.sizes.text_lg) - -with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""", - theme=theme) as demo: - gr.HTML(title) - gr.HTML("""

          🔥This Huggingface Gradio Demo provides you access to GPT4 API with System Messages. Please note that you would be needing an OPENAI API key for GPT4 access🙌

          """) - gr.HTML(theme_addon_msg) - gr.HTML('''
          Duplicate SpaceDuplicate the Space and run securely with your OpenAI API Key
          ''') - - with gr.Column(elem_id = "col_container"): - #Users need to provide their own GPT4 API key, it is no longer provided by Huggingface - with gr.Row(): - openai_gpt4_key = gr.Textbox(label="OpenAI GPT4 Key", value="", type="password", placeholder="sk..", info = "You have to provide your own GPT4 keys for this app to function properly",) - with gr.Accordion(label="System message:", open=False): - system_msg = gr.Textbox(label="Instruct the AI Assistant to set its beaviour", info = system_msg_info, value="",placeholder="Type here..") - accordion_msg = gr.HTML(value="🚧 To set System message you will have to refresh the app", visible=False) - - chatbot = gr.Chatbot(label='GPT4', elem_id="chatbot") - inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") - state = gr.State([]) - with gr.Row(): - with gr.Column(scale=7): - b1 = gr.Button().style(full_width=True) - with gr.Column(scale=3): - server_status_code = gr.Textbox(label="Status code from OpenAI server", ) - - #top_p, temperature - with gr.Accordion("Parameters", open=False): - top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",) - chat_counter = gr.Number(value=0, visible=False, precision=0) - - #Event handling - inputs.submit( predict, [openai_gpt4_key, system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key - b1.click( predict, [openai_gpt4_key, system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key - - inputs.submit(set_visible_false, [], [system_msg]) - b1.click(set_visible_false, [], [system_msg]) - inputs.submit(set_visible_true, [], [accordion_msg]) - b1.click(set_visible_true, [], [accordion_msg]) - - b1.click(reset_textbox, [], [inputs]) - inputs.submit(reset_textbox, [], [inputs]) - - #Examples - with gr.Accordion(label="Examples for System message:", open=False): - gr.Examples( - examples = [["""You are an AI programming assistant. - - - Follow the user's requirements carefully and to the letter. - - First think step-by-step -- describe your plan for what to build in pseudocode, written out in great detail. - - Then output the code in a single code block. - - Minimize any other prose."""], ["""You are ComedianGPT who is a helpful assistant. You answer everything with a joke and witty replies."""], - ["You are ChefGPT, a helpful assistant who answers questions with culinary expertise and a pinch of humor."], - ["You are FitnessGuruGPT, a fitness expert who shares workout tips and motivation with a playful twist."], - ["You are SciFiGPT, an AI assistant who discusses science fiction topics with a blend of knowledge and wit."], - ["You are PhilosopherGPT, a thoughtful assistant who responds to inquiries with philosophical insights and a touch of humor."], - ["You are EcoWarriorGPT, a helpful assistant who shares environment-friendly advice with a lighthearted approach."], - ["You are MusicMaestroGPT, a knowledgeable AI who discusses music and its history with a mix of facts and playful banter."], - ["You are SportsFanGPT, an enthusiastic assistant who talks about sports and shares amusing anecdotes."], - ["You are TechWhizGPT, a tech-savvy AI who can help users troubleshoot issues and answer questions with a dash of humor."], - ["You are FashionistaGPT, an AI fashion expert who shares style advice and trends with a sprinkle of wit."], - ["You are ArtConnoisseurGPT, an AI assistant who discusses art and its history with a blend of knowledge and playful commentary."], - ["You are a helpful assistant that provides detailed and accurate information."], - ["You are an assistant that speaks like Shakespeare."], - ["You are a friendly assistant who uses casual language and humor."], - ["You are a financial advisor who gives expert advice on investments and budgeting."], - ["You are a health and fitness expert who provides advice on nutrition and exercise."], - ["You are a travel consultant who offers recommendations for destinations, accommodations, and attractions."], - ["You are a movie critic who shares insightful opinions on films and their themes."], - ["You are a history enthusiast who loves to discuss historical events and figures."], - ["You are a tech-savvy assistant who can help users troubleshoot issues and answer questions about gadgets and software."], - ["You are an AI poet who can compose creative and evocative poems on any given topic."],], - inputs = system_msg,) - -demo.queue(max_size=99, concurrency_count=20).launch(debug=True) \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Adguard Premium 6 Crack Download Full Free [PATCHED].md b/spaces/lincquiQcaudo/Top-20-Diffusion/Adguard Premium 6 Crack Download Full Free [PATCHED].md deleted file mode 100644 index ab48fd354bc666f64fe19b4a94b7108d45dee731..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Adguard Premium 6 Crack Download Full Free [PATCHED].md +++ /dev/null @@ -1,6 +0,0 @@ -

          Adguard Premium 6 Crack Download Full FREE


          Download File ✏ ✏ ✏ https://bytlly.com/2uGvB2



          -
          - 4d29de3e1b
          -
          -
          -

          diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Inception Vol.3 (ElectraX Bank).md b/spaces/lincquiQcaudo/Top-20-Diffusion/Inception Vol.3 (ElectraX Bank).md deleted file mode 100644 index df69eebde3652d7f731ca7a178caec45869adfdf..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Inception Vol.3 (ElectraX Bank).md +++ /dev/null @@ -1,6 +0,0 @@ -

          Inception Vol.3 (ElectraX Bank)


          Download Zip > https://bytlly.com/2uGwLh



          -
          -3 #Inception D-march Vol. 5 #Kingsfall Dmarch Vol. ... III #Inception Dmarch Vol. ... 1 (ElectraX) Savage Electra Bank Volume 2 Savage Mode Savage XP Vol. 1 Savage XP Vol. 2 Savage XP Vol. 3 Savage XP Vol. 4 Savage XP Vol. 5 Savage XP Vol. 6 Savage XP Vol. 7 Savage XP Vol. 8 Savage XP Vol. 9 Savage XP Vol. 10 Savage XP Vol. 11 Savage XP Vol. 12 Savage XP Vol. 13 Savage XP Vol. 14 Savage XP Vol. 15 Savage XP Vol. 16 Savage XP Vol. 17 Savage XP Vol. 18 Savage XP Vol. 19 Savage XP Vol. 20 Savage XP Vol. 21 Savage XP Vol. 22 Savage XP Vol. 23 Savage XP Vol. 24 Savage XP Vol. 25 Savage XP Vol. 26 Savage XP Vol. 27 Savage XP Vol. 28 Savage XP Vol. 8a78ff9644
          -
          -
          -

          diff --git a/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/README.md b/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/README.md deleted file mode 100644 index 2ee63a861229b68873561fa39bfa7c9a8b53b947..0000000000000000000000000000000000000000 --- a/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/README.md +++ /dev/null @@ -1,164 +0,0 @@ -# Distributed Arcface Training in Pytorch - -This is a deep learning library that makes face recognition efficient, and effective, which can train tens of millions -identity on a single server. - -## Requirements - -- Install [pytorch](http://pytorch.org) (torch>=1.6.0), our doc for [install.md](docs/install.md). -- `pip install -r requirements.txt`. -- Download the dataset - from [https://github.com/deepinsight/insightface/tree/master/recognition/_datasets_](https://github.com/deepinsight/insightface/tree/master/recognition/_datasets_) - . - -## How to Training - -To train a model, run `train.py` with the path to the configs: - -### 1. Single node, 8 GPUs: - -```shell -python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/ms1mv3_r50 -``` - -### 2. Multiple nodes, each node 8 GPUs: - -Node 0: - -```shell -python -m torch.distributed.launch --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr="ip1" --master_port=1234 train.py train.py configs/ms1mv3_r50 -``` - -Node 1: - -```shell -python -m torch.distributed.launch --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr="ip1" --master_port=1234 train.py train.py configs/ms1mv3_r50 -``` - -### 3.Training resnet2060 with 8 GPUs: - -```shell -python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/ms1mv3_r2060.py -``` - -## Model Zoo - -- The models are available for non-commercial research purposes only. -- All models can be found in here. -- [Baidu Yun Pan](https://pan.baidu.com/s/1CL-l4zWqsI1oDuEEYVhj-g): e8pw -- [onedrive](https://1drv.ms/u/s!AswpsDO2toNKq0lWY69vN58GR6mw?e=p9Ov5d) - -### Performance on [**ICCV2021-MFR**](http://iccv21-mfr.com/) - -ICCV2021-MFR testset consists of non-celebrities so we can ensure that it has very few overlap with public available face -recognition training set, such as MS1M and CASIA as they mostly collected from online celebrities. -As the result, we can evaluate the FAIR performance for different algorithms. - -For **ICCV2021-MFR-ALL** set, TAR is measured on all-to-all 1:1 protocal, with FAR less than 0.000001(e-6). The -globalised multi-racial testset contains 242,143 identities and 1,624,305 images. - -For **ICCV2021-MFR-MASK** set, TAR is measured on mask-to-nonmask 1:1 protocal, with FAR less than 0.0001(e-4). -Mask testset contains 6,964 identities, 6,964 masked images and 13,928 non-masked images. -There are totally 13,928 positive pairs and 96,983,824 negative pairs. - -| Datasets | backbone | Training throughout | Size / MB | **ICCV2021-MFR-MASK** | **ICCV2021-MFR-ALL** | -| :---: | :--- | :--- | :--- |:--- |:--- | -| MS1MV3 | r18 | - | 91 | **47.85** | **68.33** | -| Glint360k | r18 | 8536 | 91 | **53.32** | **72.07** | -| MS1MV3 | r34 | - | 130 | **58.72** | **77.36** | -| Glint360k | r34 | 6344 | 130 | **65.10** | **83.02** | -| MS1MV3 | r50 | 5500 | 166 | **63.85** | **80.53** | -| Glint360k | r50 | 5136 | 166 | **70.23** | **87.08** | -| MS1MV3 | r100 | - | 248 | **69.09** | **84.31** | -| Glint360k | r100 | 3332 | 248 | **75.57** | **90.66** | -| MS1MV3 | mobilefacenet | 12185 | 7.8 | **41.52** | **65.26** | -| Glint360k | mobilefacenet | 11197 | 7.8 | **44.52** | **66.48** | - -### Performance on IJB-C and Verification Datasets - -| Datasets | backbone | IJBC(1e-05) | IJBC(1e-04) | agedb30 | cfp_fp | lfw | log | -| :---: | :--- | :--- | :--- | :--- |:--- |:--- |:--- | -| MS1MV3 | r18 | 92.07 | 94.66 | 97.77 | 97.73 | 99.77 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r18_fp16/training.log)| -| MS1MV3 | r34 | 94.10 | 95.90 | 98.10 | 98.67 | 99.80 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r34_fp16/training.log)| -| MS1MV3 | r50 | 94.79 | 96.46 | 98.35 | 98.96 | 99.83 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r50_fp16/training.log)| -| MS1MV3 | r100 | 95.31 | 96.81 | 98.48 | 99.06 | 99.85 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r100_fp16/training.log)| -| MS1MV3 | **r2060**| 95.34 | 97.11 | 98.67 | 99.24 | 99.87 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r2060_fp16/training.log)| -| Glint360k |r18-0.1 | 93.16 | 95.33 | 97.72 | 97.73 | 99.77 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_cosface_r18_fp16_0.1/training.log)| -| Glint360k |r34-0.1 | 95.16 | 96.56 | 98.33 | 98.78 | 99.82 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_cosface_r34_fp16_0.1/training.log)| -| Glint360k |r50-0.1 | 95.61 | 96.97 | 98.38 | 99.20 | 99.83 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_cosface_r50_fp16_0.1/training.log)| -| Glint360k |r100-0.1 | 95.88 | 97.32 | 98.48 | 99.29 | 99.82 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_cosface_r100_fp16_0.1/training.log)| - -[comment]: <> (More details see [model.md](docs/modelzoo.md) in docs.) - - -## [Speed Benchmark](docs/speed_benchmark.md) - -**Arcface Torch** can train large-scale face recognition training set efficiently and quickly. When the number of -classes in training sets is greater than 300K and the training is sufficient, partial fc sampling strategy will get same -accuracy with several times faster training performance and smaller GPU memory. -Partial FC is a sparse variant of the model parallel architecture for large sacle face recognition. Partial FC use a -sparse softmax, where each batch dynamicly sample a subset of class centers for training. In each iteration, only a -sparse part of the parameters will be updated, which can reduce a lot of GPU memory and calculations. With Partial FC, -we can scale trainset of 29 millions identities, the largest to date. Partial FC also supports multi-machine distributed -training and mixed precision training. - -![Image text](https://github.com/anxiangsir/insightface_arcface_log/blob/master/partial_fc_v2.png) - -More details see -[speed_benchmark.md](docs/speed_benchmark.md) in docs. - -### 1. Training speed of different parallel methods (samples / second), Tesla V100 32GB * 8. (Larger is better) - -`-` means training failed because of gpu memory limitations. - -| Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 | -| :--- | :--- | :--- | :--- | -|125000 | 4681 | 4824 | 5004 | -|1400000 | **1672** | 3043 | 4738 | -|5500000 | **-** | **1389** | 3975 | -|8000000 | **-** | **-** | 3565 | -|16000000 | **-** | **-** | 2679 | -|29000000 | **-** | **-** | **1855** | - -### 2. GPU memory cost of different parallel methods (MB per GPU), Tesla V100 32GB * 8. (Smaller is better) - -| Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 | -| :--- | :--- | :--- | :--- | -|125000 | 7358 | 5306 | 4868 | -|1400000 | 32252 | 11178 | 6056 | -|5500000 | **-** | 32188 | 9854 | -|8000000 | **-** | **-** | 12310 | -|16000000 | **-** | **-** | 19950 | -|29000000 | **-** | **-** | 32324 | - -## Evaluation ICCV2021-MFR and IJB-C - -More details see [eval.md](docs/eval.md) in docs. - -## Test - -We tested many versions of PyTorch. Please create an issue if you are having trouble. - -- [x] torch 1.6.0 -- [x] torch 1.7.1 -- [x] torch 1.8.0 -- [x] torch 1.9.0 - -## Citation - -``` -@inproceedings{deng2019arcface, - title={Arcface: Additive angular margin loss for deep face recognition}, - author={Deng, Jiankang and Guo, Jia and Xue, Niannan and Zafeiriou, Stefanos}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={4690--4699}, - year={2019} -} -@inproceedings{an2020partical_fc, - title={Partial FC: Training 10 Million Identities on a Single Machine}, - author={An, Xiang and Zhu, Xuhan and Xiao, Yang and Wu, Lan and Zhang, Ming and Gao, Yuan and Qin, Bin and - Zhang, Debing and Fu Ying}, - booktitle={Arxiv 2010.05222}, - year={2020} -} -``` diff --git a/spaces/liuyuan-pal/SyncDreamer/hf_demo/style.css b/spaces/liuyuan-pal/SyncDreamer/hf_demo/style.css deleted file mode 100644 index 031f78fdb75e7c517d62f6b9e240828ee4b6a912..0000000000000000000000000000000000000000 --- a/spaces/liuyuan-pal/SyncDreamer/hf_demo/style.css +++ /dev/null @@ -1,33 +0,0 @@ -#model-3d-out { - height: 400px; -} - -#plot-out { - height: 450px; -} - -#duplicate-button { - margin-left: auto; - color: #fff; - background: #1565c0; - } - -.footer { - margin-bottom: 45px; - margin-top: 10px; - text-align: center; - border-bottom: 1px solid #e5e5e5; -} -.footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(15px); - background: white; -} -.dark .footer { - border-color: #303030; -} -.dark .footer>p { - background: #0b0f19; -} \ No newline at end of file diff --git a/spaces/ljjggr/bingo/src/components/ui/dialog.tsx b/spaces/ljjggr/bingo/src/components/ui/dialog.tsx deleted file mode 100644 index 925e77fe7858fb218b5115b4e225174a886e0f02..0000000000000000000000000000000000000000 --- a/spaces/ljjggr/bingo/src/components/ui/dialog.tsx +++ /dev/null @@ -1,128 +0,0 @@ -'use client' - -import * as React from 'react' -import * as DialogPrimitive from '@radix-ui/react-dialog' - -import { cn } from '@/lib/utils' -import { IconClose } from '@/components/ui/icons' - -const Dialog = DialogPrimitive.Root - -const DialogTrigger = DialogPrimitive.Trigger - -const DialogPortal = ({ - className, - children, - ...props -}: DialogPrimitive.DialogPortalProps) => ( - -
          - {children} -
          -
          -) -DialogPortal.displayName = DialogPrimitive.Portal.displayName - -const DialogOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogOverlay.displayName = DialogPrimitive.Overlay.displayName - -const DialogContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - {children} - - - Close - - - -)) -DialogContent.displayName = DialogPrimitive.Content.displayName - -const DialogHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
          -) -DialogHeader.displayName = 'DialogHeader' - -const DialogFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
          -) -DialogFooter.displayName = 'DialogFooter' - -const DialogTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogTitle.displayName = DialogPrimitive.Title.displayName - -const DialogDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogDescription.displayName = DialogPrimitive.Description.displayName - -export { - Dialog, - DialogTrigger, - DialogContent, - DialogHeader, - DialogFooter, - DialogTitle, - DialogDescription -} diff --git a/spaces/ltgoslo/ssa-perin/data/field/__init__.py b/spaces/ltgoslo/ssa-perin/data/field/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ludusc/latent-space-theories/backend/utils.py b/spaces/ludusc/latent-space-theories/backend/utils.py deleted file mode 100644 index 70b6a46b601df5c0bae786c1b3df90493e1ac3d8..0000000000000000000000000000000000000000 --- a/spaces/ludusc/latent-space-theories/backend/utils.py +++ /dev/null @@ -1,379 +0,0 @@ -import streamlit as st -import pickle - -import io -from typing import List, Optional - -import markdown -import matplotlib -import matplotlib.pyplot as plt -import pandas as pd -import plotly.graph_objects as go -import streamlit as st -from plotly import express as px -from plotly.subplots import make_subplots -from tqdm import trange - -import torch -from transformers import AutoFeatureExtractor, AutoModelForImageClassification - -@st.cache(allow_output_mutation=True) -# @st.cache_resource -def load_dataset(data_index): - with open(f'./data/preprocessed_image_net/val_data_{data_index}.pkl', 'rb') as file: - dataset = pickle.load(file) - return dataset - -@st.cache(allow_output_mutation=True) -# @st.cache_resource -def load_dataset_dict(): - dataset_dict = {} - progress_empty = st.empty() - text_empty = st.empty() - text_empty.write("Loading datasets...") - progress_bar = progress_empty.progress(0.0) - for data_index in trange(5): - dataset_dict[data_index] = load_dataset(data_index) - progress_bar.progress((data_index+1)/5) - progress_empty.empty() - text_empty.empty() - return dataset_dict - - -# @st.cache_data -@st.cache(allow_output_mutation=True) -def load_image(image_id): - dataset = load_dataset(image_id//10000) - image = dataset[image_id%10000] - return image - -# @st.cache_data -@st.cache(allow_output_mutation=True) -def load_images(image_ids): - images = [] - for image_id in image_ids: - image = load_image(image_id) - images.append(image) - return images - - -@st.cache(allow_output_mutation=True, suppress_st_warning=True, show_spinner=False) -# @st.cache_resource -def load_model(model_name): - with st.spinner(f"Loading {model_name} model! This process might take 1-2 minutes..."): - if model_name == 'ResNet': - model_file_path = 'microsoft/resnet-50' - feature_extractor = AutoFeatureExtractor.from_pretrained(model_file_path, crop_pct=1.0) - model = AutoModelForImageClassification.from_pretrained(model_file_path) - model.eval() - elif model_name == 'ConvNeXt': - model_file_path = 'facebook/convnext-tiny-224' - feature_extractor = AutoFeatureExtractor.from_pretrained(model_file_path, crop_pct=1.0) - model = AutoModelForImageClassification.from_pretrained(model_file_path) - model.eval() - else: - model = torch.hub.load('pytorch/vision:v0.10.0', 'mobilenet_v2', pretrained=True) - model.eval() - feature_extractor = None - return model, feature_extractor - - -def make_grid(cols=None,rows=None): - grid = [0]*rows - for i in range(rows): - with st.container(): - grid[i] = st.columns(cols) - return grid - - -def use_container_width_percentage(percentage_width:int = 75): - max_width_str = f"max-width: {percentage_width}%;" - st.markdown(f""" - - """, - unsafe_allow_html=True, - ) - -matplotlib.use("Agg") -COLOR = "#31333f" -BACKGROUND_COLOR = "#ffffff" - - -def grid_demo(): - """Main function. Run this to run the app""" - st.sidebar.title("Layout and Style Experiments") - st.sidebar.header("Settings") - st.markdown( - """ -# Layout and Style Experiments - -The basic question is: Can we create a multi-column dashboard with plots, numbers and text using -the [CSS Grid](https://gridbyexample.com/examples)? - -Can we do it with a nice api? -Can have a dark theme? -""" - ) - - select_block_container_style() - add_resources_section() - - # My preliminary idea of an API for generating a grid - with Grid("1 1 1", color=COLOR, background_color=BACKGROUND_COLOR) as grid: - grid.cell( - class_="a", - grid_column_start=2, - grid_column_end=3, - grid_row_start=1, - grid_row_end=2, - ).markdown("# This is A Markdown Cell") - grid.cell("b", 2, 3, 2, 3).text("The cell to the left is a dataframe") - grid.cell("c", 3, 4, 2, 3).plotly_chart(get_plotly_fig()) - grid.cell("d", 1, 2, 1, 3).dataframe(get_dataframe()) - grid.cell("e", 3, 4, 1, 2).markdown( - "Try changing the **block container style** in the sidebar!" - ) - grid.cell("f", 1, 3, 3, 4).text( - "The cell to the right is a matplotlib svg image" - ) - grid.cell("g", 3, 4, 3, 4).pyplot(get_matplotlib_plt()) - - -def add_resources_section(): - """Adds a resources section to the sidebar""" - st.sidebar.header("Add_resources_section") - st.sidebar.markdown( - """ -- [gridbyexample.com] (https://gridbyexample.com/examples/) -""" - ) - - -class Cell: - """A Cell can hold text, markdown, plots etc.""" - - def __init__( - self, - class_: str = None, - grid_column_start: Optional[int] = None, - grid_column_end: Optional[int] = None, - grid_row_start: Optional[int] = None, - grid_row_end: Optional[int] = None, - ): - self.class_ = class_ - self.grid_column_start = grid_column_start - self.grid_column_end = grid_column_end - self.grid_row_start = grid_row_start - self.grid_row_end = grid_row_end - self.inner_html = "" - - def _to_style(self) -> str: - return f""" -.{self.class_} {{ - grid-column-start: {self.grid_column_start}; - grid-column-end: {self.grid_column_end}; - grid-row-start: {self.grid_row_start}; - grid-row-end: {self.grid_row_end}; -}} -""" - - def text(self, text: str = ""): - self.inner_html = text - - def markdown(self, text): - self.inner_html = markdown.markdown(text) - - def dataframe(self, dataframe: pd.DataFrame): - self.inner_html = dataframe.to_html() - - def plotly_chart(self, fig): - self.inner_html = f""" - - -

          This should have been a plotly plot. - But since *script* tags are removed when inserting MarkDown/ HTML i cannot get it to workto work. - But I could potentially save to svg and insert that.

          -
          - - -""" - - def pyplot(self, fig=None, **kwargs): - string_io = io.StringIO() - plt.savefig(string_io, format="svg", fig=(2, 2)) - svg = string_io.getvalue()[215:] - plt.close(fig) - self.inner_html = '
          ' + svg + "
          " - - def _to_html(self): - return f"""
          {self.inner_html}
          """ - - -class Grid: - """A (CSS) Grid""" - - def __init__( - self, - template_columns="1 1 1", - gap="10px", - background_color=COLOR, - color=BACKGROUND_COLOR, - ): - self.template_columns = template_columns - self.gap = gap - self.background_color = background_color - self.color = color - self.cells: List[Cell] = [] - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - st.markdown(self._get_grid_style(), unsafe_allow_html=True) - st.markdown(self._get_cells_style(), unsafe_allow_html=True) - st.markdown(self._get_cells_html(), unsafe_allow_html=True) - - def _get_grid_style(self): - return f""" - -""" - - def _get_cells_style(self): - return ( - "" - ) - - def _get_cells_html(self): - return ( - '
          ' - + "\n".join([cell._to_html() for cell in self.cells]) - + "
          " - ) - - def cell( - self, - class_: str = None, - grid_column_start: Optional[int] = None, - grid_column_end: Optional[int] = None, - grid_row_start: Optional[int] = None, - grid_row_end: Optional[int] = None, - ): - cell = Cell( - class_=class_, - grid_column_start=grid_column_start, - grid_column_end=grid_column_end, - grid_row_start=grid_row_start, - grid_row_end=grid_row_end, - ) - self.cells.append(cell) - return cell - - -def select_block_container_style(): - """Add selection section for setting setting the max-width and padding - of the main block container""" - st.sidebar.header("Block Container Style") - max_width_100_percent = st.sidebar.checkbox("Max-width: 100%?", False) - if not max_width_100_percent: - max_width = st.sidebar.slider("Select max-width in px", 100, 2000, 1200, 100) - else: - max_width = 1200 - dark_theme = st.sidebar.checkbox("Dark Theme?", False) - padding_top = st.sidebar.number_input("Select padding top in rem", 0, 200, 5, 1) - padding_right = st.sidebar.number_input("Select padding right in rem", 0, 200, 1, 1) - padding_left = st.sidebar.number_input("Select padding left in rem", 0, 200, 1, 1) - padding_bottom = st.sidebar.number_input( - "Select padding bottom in rem", 0, 200, 10, 1 - ) - if dark_theme: - global COLOR - global BACKGROUND_COLOR - BACKGROUND_COLOR = "rgb(17,17,17)" - COLOR = "#fff" - - _set_block_container_style( - max_width, - max_width_100_percent, - padding_top, - padding_right, - padding_left, - padding_bottom, - ) - - -def _set_block_container_style( - max_width: int = 1200, - max_width_100_percent: bool = False, - padding_top: int = 5, - padding_right: int = 1, - padding_left: int = 1, - padding_bottom: int = 10, -): - if max_width_100_percent: - max_width_str = f"max-width: 100%;" - else: - max_width_str = f"max-width: {max_width}px;" - st.markdown( - f""" - -""", - unsafe_allow_html=True, - ) - - -# @st.cache -# def get_dataframe() -> pd.DataFrame(): -# """Dummy DataFrame""" -# data = [ -# {"quantity": 1, "price": 2}, -# {"quantity": 3, "price": 5}, -# {"quantity": 4, "price": 8}, -# ] -# return pd.DataFrame(data) - - -# def get_plotly_fig(): -# """Dummy Plotly Plot""" -# return px.line(data_frame=get_dataframe(), x="quantity", y="price") - - -# def get_matplotlib_plt(): -# get_dataframe().plot(kind="line", x="quantity", y="price", figsize=(5, 3)) diff --git a/spaces/luodian/LoRA-DreamBooth-Training-UI/app.py b/spaces/luodian/LoRA-DreamBooth-Training-UI/app.py deleted file mode 100644 index 1b47590d28504c5832a3fbb2fcd4f5ef121cf7d8..0000000000000000000000000000000000000000 --- a/spaces/luodian/LoRA-DreamBooth-Training-UI/app.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import os - -import gradio as gr -import torch - -from app_inference import create_inference_demo -from app_training import create_training_demo -from app_upload import create_upload_demo -from inference import InferencePipeline -from trainer import Trainer - -TITLE = '# LoRA DreamBooth Training UI' - -ORIGINAL_SPACE_ID = 'lora-library/LoRA-DreamBooth-Training-UI' -SPACE_ID = os.getenv('SPACE_ID', ORIGINAL_SPACE_ID) -SHARED_UI_WARNING = f'''# Attention - This Space doesn't work in this shared UI. You can duplicate and use it with a paid private T4 GPU. - -
          Duplicate Space
          -''' - -if os.getenv('SYSTEM') == 'spaces' and SPACE_ID != ORIGINAL_SPACE_ID: - SETTINGS = f'Settings' -else: - SETTINGS = 'Settings' -CUDA_NOT_AVAILABLE_WARNING = f'''# Attention - Running on CPU. -
          -You can assign a GPU in the {SETTINGS} tab if you are running this on HF Spaces. -"T4 small" is sufficient to run this demo. -
          -''' - -HF_TOKEN_NOT_SPECIFIED_WARNING = f'''# Attention - The environment variable `HF_TOKEN` is not specified. Please specify your Hugging Face token with write permission as the value of it. -
          -You can check and create your Hugging Face tokens here. -You can specify environment variables in the "Repository secrets" section of the {SETTINGS} tab. -
          -''' - -HF_TOKEN = os.getenv('HF_TOKEN') - - -def show_warning(warning_text: str) -> gr.Blocks: - with gr.Blocks() as demo: - with gr.Box(): - gr.Markdown(warning_text) - return demo - - -pipe = InferencePipeline(HF_TOKEN) -trainer = Trainer(HF_TOKEN) - -with gr.Blocks(css='style.css') as demo: - if os.getenv('IS_SHARED_UI'): - show_warning(SHARED_UI_WARNING) - if not torch.cuda.is_available(): - show_warning(CUDA_NOT_AVAILABLE_WARNING) - if not HF_TOKEN: - show_warning(HF_TOKEN_NOT_SPECIFIED_WARNING) - - gr.Markdown(TITLE) - with gr.Tabs(): - with gr.TabItem('Train'): - create_training_demo(trainer, pipe) - with gr.TabItem('Test'): - create_inference_demo(pipe, HF_TOKEN) - with gr.TabItem('Upload'): - gr.Markdown(''' - - You can use this tab to upload models later if you choose not to upload models in training time or if upload in training time failed. - ''') - create_upload_demo(HF_TOKEN) - -demo.queue(max_size=1).launch(share=False) diff --git a/spaces/luohy/SAIL-7B/README.md b/spaces/luohy/SAIL-7B/README.md deleted file mode 100644 index 732c7cc75f416b97418f4373a4197b6875e6b3b5..0000000000000000000000000000000000000000 --- a/spaces/luohy/SAIL-7B/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: SAIL 7B -emoji: 🐨 -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false -license: gpl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ma-xu/LIVE/thrust/thrust/detail/functional/operators/operator_adaptors.h b/spaces/ma-xu/LIVE/thrust/thrust/detail/functional/operators/operator_adaptors.h deleted file mode 100644 index 67a1f6e37d6180c0ec35f19d11a134f89c518925..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/detail/functional/operators/operator_adaptors.h +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include -#include -#include -#include - -#include - -namespace thrust -{ -namespace detail -{ -namespace functional -{ - -// Adapts a transparent unary functor from functional.h (e.g. thrust::negate<>) -// into the Eval interface. -template -struct transparent_unary_operator -{ - template - using operator_type = UnaryFunctor; - - template - using argument = - typename thrust::detail::eval_if< - thrust::tuple_size::value != 1, - thrust::detail::identity_, - thrust::detail::functional::argument_helper<0, Env> - >::type; - - template - struct result_type_impl - { - using type = decltype( - std::declval()(std::declval>())); - }; - - template - using result_type = - typename thrust::detail::eval_if< - std::is_same>::value, - thrust::detail::identity_, - result_type_impl - >::type; - - template - struct result - { - using op_type = UnaryFunctor; - using type = result_type; - }; - - template - __host__ __device__ - result_type eval(Env&& e) const - THRUST_RETURNS(UnaryFunctor{}(thrust::get<0>(THRUST_FWD(e)))) -}; - - -// Adapts a transparent binary functor from functional.h (e.g. thrust::less<>) -// into the Eval interface. -template -struct transparent_binary_operator -{ - template - using operator_type = BinaryFunctor; - - template - using first_argument = - typename thrust::detail::eval_if< - thrust::tuple_size::value != 2, - thrust::detail::identity_, - thrust::detail::functional::argument_helper<0, Env> - >::type; - - template - using second_argument = - typename thrust::detail::eval_if< - thrust::tuple_size::value != 2, - thrust::detail::identity_, - thrust::detail::functional::argument_helper<1, Env> - >::type; - - template - struct result_type_impl - { - using type = decltype( - std::declval()(std::declval>(), - std::declval>())); - }; - - template - using result_type = - typename thrust::detail::eval_if< - (std::is_same>::value || - std::is_same>::value), - thrust::detail::identity_, - result_type_impl - >::type; - - template - struct result - { - using op_type = BinaryFunctor; - using type = result_type; - }; - - template - __host__ __device__ - result_type eval(Env&& e) const - THRUST_RETURNS(BinaryFunctor{}(thrust::get<0>(e), thrust::get<1>(e))) -}; - -} // end functional -} // end detail -} // end thrust - diff --git a/spaces/mascIT/AgeGuesser/yolov5/export.py b/spaces/mascIT/AgeGuesser/yolov5/export.py deleted file mode 100644 index 2be3fd5943666328c6d5525f29dd095c4303b575..0000000000000000000000000000000000000000 --- a/spaces/mascIT/AgeGuesser/yolov5/export.py +++ /dev/null @@ -1,564 +0,0 @@ -import argparse -import json -import os -import platform -import subprocess -import sys -import time -import warnings -from pathlib import Path - -import pandas as pd -import torch -import yaml -from torch.utils.mobile_optimizer import optimize_for_mobile - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -if platform.system() != 'Windows': - ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.experimental import attempt_load -from models.yolo import Detect -from utils.datasets import LoadImages -from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_version, colorstr, - file_size, print_args, url2file) -from utils.torch_utils import select_device - - -def export_formats(): - # YOLOv5 export formats - x = [ - ['PyTorch', '-', '.pt', True, True], - ['TorchScript', 'torchscript', '.torchscript', True, True], - ['ONNX', 'onnx', '.onnx', True, True], - ['OpenVINO', 'openvino', '_openvino_model', True, False], - ['TensorRT', 'engine', '.engine', False, True], - ['CoreML', 'coreml', '.mlmodel', True, False], - ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True], - ['TensorFlow GraphDef', 'pb', '.pb', True, True], - ['TensorFlow Lite', 'tflite', '.tflite', True, False], - ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False], - ['TensorFlow.js', 'tfjs', '_web_model', False, False],] - return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) - - -def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): - # YOLOv5 TorchScript model export - try: - LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') - f = file.with_suffix('.torchscript') - - ts = torch.jit.trace(model, im, strict=False) - d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} - extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() - if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html - optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) - else: - ts.save(str(f), _extra_files=extra_files) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'{prefix} export failure: {e}') - - -def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): - # YOLOv5 ONNX export - try: - check_requirements(('onnx',)) - import onnx - - LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') - f = file.with_suffix('.onnx') - - torch.onnx.export( - model.cpu() if dynamic else model, # --dynamic only compatible with cpu - im.cpu() if dynamic else im, - f, - verbose=False, - opset_version=opset, - training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, - do_constant_folding=not train, - input_names=['images'], - output_names=['output'], - dynamic_axes={ - 'images': { - 0: 'batch', - 2: 'height', - 3: 'width'}, # shape(1,3,640,640) - 'output': { - 0: 'batch', - 1: 'anchors'} # shape(1,25200,85) - } if dynamic else None) - - # Checks - model_onnx = onnx.load(f) # load onnx model - onnx.checker.check_model(model_onnx) # check onnx model - - # Metadata - d = {'stride': int(max(model.stride)), 'names': model.names} - for k, v in d.items(): - meta = model_onnx.metadata_props.add() - meta.key, meta.value = k, str(v) - onnx.save(model_onnx, f) - - # Simplify - if simplify: - try: - check_requirements(('onnx-simplifier',)) - import onnxsim - - LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify(model_onnx, - dynamic_input_shape=dynamic, - input_shapes={'images': list(im.shape)} if dynamic else None) - assert check, 'assert check failed' - onnx.save(model_onnx, f) - except Exception as e: - LOGGER.info(f'{prefix} simplifier failure: {e}') - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'{prefix} export failure: {e}') - - -def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')): - # YOLOv5 OpenVINO export - try: - check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie - - LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') - f = str(file).replace('.pt', f'_openvino_model{os.sep}') - - cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" - subprocess.check_output(cmd.split()) # export - with open(Path(f) / file.with_suffix('.yaml').name, 'w') as g: - yaml.dump({'stride': int(max(model.stride)), 'names': model.names}, g) # add metadata.yaml - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - -def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): - # YOLOv5 CoreML export - try: - check_requirements(('coremltools',)) - import coremltools as ct - - LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') - f = file.with_suffix('.mlmodel') - - ts = torch.jit.trace(model, im, strict=False) # TorchScript model - ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) - bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) - if bits < 32: - if platform.system() == 'Darwin': # quantization only supported on macOS - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning - ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) - else: - print(f'{prefix} quantization only supported on macOS, skipping...') - ct_model.save(f) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return ct_model, f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - return None, None - - -def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): - # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt - try: - assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' - try: - import tensorrt as trt - except Exception: - if platform.system() == 'Linux': - check_requirements(('nvidia-tensorrt',), cmds=('-U --index-url https://pypi.ngc.nvidia.com',)) - import tensorrt as trt - - if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 - grid = model.model[-1].anchor_grid - model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] - export_onnx(model, im, file, 12, train, False, simplify) # opset 12 - model.model[-1].anchor_grid = grid - else: # TensorRT >= 8 - check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 13, train, False, simplify) # opset 13 - onnx = file.with_suffix('.onnx') - - LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - assert onnx.exists(), f'failed to export ONNX file: {onnx}' - f = file.with_suffix('.engine') # TensorRT engine file - logger = trt.Logger(trt.Logger.INFO) - if verbose: - logger.min_severity = trt.Logger.Severity.VERBOSE - - builder = trt.Builder(logger) - config = builder.create_builder_config() - config.max_workspace_size = workspace * 1 << 30 - # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice - - flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) - network = builder.create_network(flag) - parser = trt.OnnxParser(network, logger) - if not parser.parse_from_file(str(onnx)): - raise RuntimeError(f'failed to load ONNX file: {onnx}') - - inputs = [network.get_input(i) for i in range(network.num_inputs)] - outputs = [network.get_output(i) for i in range(network.num_outputs)] - LOGGER.info(f'{prefix} Network Description:') - for inp in inputs: - LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') - for out in outputs: - LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') - - LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') - if builder.platform_has_fast_fp16 and half: - config.set_flag(trt.BuilderFlag.FP16) - with builder.build_engine(network, config) as engine, open(f, 'wb') as t: - t.write(engine.serialize()) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - -def export_saved_model(model, - im, - file, - dynamic, - tf_nms=False, - agnostic_nms=False, - topk_per_class=100, - topk_all=100, - iou_thres=0.45, - conf_thres=0.25, - keras=False, - prefix=colorstr('TensorFlow SavedModel:')): - # YOLOv5 TensorFlow SavedModel export - try: - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - - from models.tf import TFDetect, TFModel - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = str(file).replace('.pt', '_saved_model') - batch_size, ch, *imgsz = list(im.shape) # BCHW - - tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) - im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow - _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size) - outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) - keras_model.trainable = False - keras_model.summary() - if keras: - keras_model.save(f, save_format='tf') - else: - spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(spec) - frozen_func = convert_variables_to_constants_v2(m) - tfm = tf.Module() - tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x)[0], [spec]) - tfm.__call__(im) - tf.saved_model.save(tfm, - f, - options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) - if check_version(tf.__version__, '2.6') else tf.saved_model.SaveOptions()) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return keras_model, f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - return None, None - - -def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')): - # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow - try: - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = file.with_suffix('.pb') - - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) - frozen_func = convert_variables_to_constants_v2(m) - frozen_func.graph.as_graph_def() - tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - -def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): - # YOLOv5 TensorFlow Lite export - try: - import tensorflow as tf - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - batch_size, ch, *imgsz = list(im.shape) # BCHW - f = str(file).replace('.pt', '-fp16.tflite') - - converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] - converter.target_spec.supported_types = [tf.float16] - converter.optimizations = [tf.lite.Optimize.DEFAULT] - if int8: - from models.tf import representative_dataset_gen - dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data - converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] - converter.target_spec.supported_types = [] - converter.inference_input_type = tf.uint8 # or tf.int8 - converter.inference_output_type = tf.uint8 # or tf.int8 - converter.experimental_new_quantizer = True - f = str(file).replace('.pt', '-int8.tflite') - if nms or agnostic_nms: - converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) - - tflite_model = converter.convert() - open(f, "wb").write(tflite_model) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - -def export_edgetpu(file, prefix=colorstr('Edge TPU:')): - # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ - try: - cmd = 'edgetpu_compiler --version' - help_url = 'https://coral.ai/docs/edgetpu/compiler/' - assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' - if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: - LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') - sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system - for c in ( - 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', - 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', - 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): - subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) - ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] - - LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') - f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model - f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model - - cmd = f"edgetpu_compiler -s -o {file.parent} {f_tfl}" - subprocess.run(cmd.split(), check=True) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - -def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): - # YOLOv5 TensorFlow.js export - try: - check_requirements(('tensorflowjs',)) - import re - - import tensorflowjs as tfjs - - LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') - f = str(file).replace('.pt', '_web_model') # js dir - f_pb = file.with_suffix('.pb') # *.pb path - f_json = f'{f}/model.json' # *.json path - - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ - f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' - subprocess.run(cmd.split()) - - with open(f_json) as j: - json = j.read() - with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order - subst = re.sub( - r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' - r'"Identity_1": {"name": "Identity_1"}, ' - r'"Identity_2": {"name": "Identity_2"}, ' - r'"Identity_3": {"name": "Identity_3"}}}', json) - j.write(subst) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - -@torch.no_grad() -def run( - data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=(640, 640), # image (height, width) - batch_size=1, # batch size - device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu - include=('torchscript', 'onnx'), # include formats - half=False, # FP16 half-precision export - inplace=False, # set YOLOv5 Detect() inplace=True - train=False, # model.train() mode - keras=False, # use Keras - optimize=False, # TorchScript: optimize for mobile - int8=False, # CoreML/TF INT8 quantization - dynamic=False, # ONNX/TF: dynamic axes - simplify=False, # ONNX: simplify model - opset=12, # ONNX: opset version - verbose=False, # TensorRT: verbose log - workspace=4, # TensorRT: workspace size (GB) - nms=False, # TF: add NMS to model - agnostic_nms=False, # TF: add agnostic NMS to model - topk_per_class=100, # TF.js NMS: topk per class to keep - topk_all=100, # TF.js NMS: topk for all classes to keep - iou_thres=0.45, # TF.js NMS: IoU threshold - conf_thres=0.25, # TF.js NMS: confidence threshold -): - t = time.time() - include = [x.lower() for x in include] # to lowercase - fmts = tuple(export_formats()['Argument'][1:]) # --include arguments - flags = [x in include for x in fmts] - assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}' - jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = flags # export booleans - file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights - - # Load PyTorch model - device = select_device(device) - if half: - assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0' - assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both' - model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model - nc, names = model.nc, model.names # number of classes, class names - - # Checks - imgsz *= 2 if len(imgsz) == 1 else 1 # expand - assert nc == len(names), f'Model class count {nc} != len(names) {len(names)}' - - # Input - gs = int(max(model.stride)) # grid size (max stride) - imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples - im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection - - # Update model - model.train() if train else model.eval() # training mode = no Detect() layer grid construction - for k, m in model.named_modules(): - if isinstance(m, Detect): - m.inplace = inplace - m.onnx_dynamic = dynamic - m.export = True - - for _ in range(2): - y = model(im) # dry runs - if half and not coreml: - im, model = im.half(), model.half() # to FP16 - shape = tuple(y[0].shape) # model output shape - LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)") - - # Exports - f = [''] * 10 # exported filenames - warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning - if jit: - f[0] = export_torchscript(model, im, file, optimize) - if engine: # TensorRT required before ONNX - f[1] = export_engine(model, im, file, train, half, simplify, workspace, verbose) - if onnx or xml: # OpenVINO requires ONNX - f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify) - if xml: # OpenVINO - f[3] = export_openvino(model, file, half) - if coreml: - _, f[4] = export_coreml(model, im, file, int8, half) - - # TensorFlow Exports - if any((saved_model, pb, tflite, edgetpu, tfjs)): - if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 - check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` - assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' - model, f[5] = export_saved_model(model.cpu(), - im, - file, - dynamic, - tf_nms=nms or agnostic_nms or tfjs, - agnostic_nms=agnostic_nms or tfjs, - topk_per_class=topk_per_class, - topk_all=topk_all, - iou_thres=iou_thres, - conf_thres=conf_thres, - keras=keras) - if pb or tfjs: # pb prerequisite to tfjs - f[6] = export_pb(model, file) - if tflite or edgetpu: - f[7] = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) - if edgetpu: - f[8] = export_edgetpu(file) - if tfjs: - f[9] = export_tfjs(file) - - # Finish - f = [str(x) for x in f if x] # filter out '' and None - if any(f): - h = '--half' if half else '' # --half FP16 inference arg - LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)' - f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f"\nDetect: python detect.py --weights {f[-1]} {h}" - f"\nValidate: python val.py --weights {f[-1]} {h}" - f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" - f"\nVisualize: https://netron.app") - return f # return list of exported files/dirs - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--half', action='store_true', help='FP16 half-precision export') - parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') - parser.add_argument('--train', action='store_true', help='model.train() mode') - parser.add_argument('--keras', action='store_true', help='TF: use Keras') - parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') - parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') - parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes') - parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') - parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') - parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') - parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') - parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model') - parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') - parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') - parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') - parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') - parser.add_argument('--include', - nargs='+', - default=['torchscript', 'onnx'], - help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs') - opt = parser.parse_args() - print_args(vars(opt)) - return opt - - -def main(opt): - for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) \ No newline at end of file diff --git a/spaces/merve/alpaca-tr-crowdsource/app.py b/spaces/merve/alpaca-tr-crowdsource/app.py deleted file mode 100644 index a72e70428ac4c61445d20751fc075ecaf2801e89..0000000000000000000000000000000000000000 --- a/spaces/merve/alpaca-tr-crowdsource/app.py +++ /dev/null @@ -1,87 +0,0 @@ -import os -import gradio as gr -from gradio import FlaggingCallback -from gradio.components import IOComponent - -from datasets import load_dataset -from typing import List, Optional, Any -import argilla as rg -import os - -def load_data(idx): - df = load_dataset("merve/turkish_instructions", split="train").to_pandas() - sample = df.iloc[int(idx)] - instruction = sample[1] - - if sample[2]: - input_sample = sample[2] - else: - input_sample="-" - response = sample[3] - return instruction, input_sample, response - -def create_record(text, feedback): - - status = "Validated" if feedback == "Doğru" else "Default" - instruction, input_sample, response = load_data(int(text)) - - fields = { - "talimat": instruction, - "girdi": input_sample, - "çıktı": response} - - # the label will come from the flag object in Gradio - label = "True" - - record = rg.TextClassificationRecord( - inputs=fields, - annotation=label, - status=status, - metadata={"feedback": feedback} - ) - - print(record) - return record - - - - -class ArgillaLogger(FlaggingCallback): - def __init__(self, api_url, api_key, dataset_name): - rg.init(api_url=api_url, api_key=api_key) - self.dataset_name = dataset_name - def setup(self, components: List[IOComponent], flagging_dir: str): - pass - def flag( - self, - flag_data: List[Any], - flag_option: Optional[str] = None, - flag_index: Optional[int] = None, - username: Optional[str] = None, - ) -> int: - text = flag_data[0] - inference = flag_data[1] - rg.log(name=self.dataset_name, records=create_record(text, flag_option)) - - -idx_input = gr.Slider(minimum=0, maximum=51564, label="Satır") -instruction = gr.Textbox(label="Talimat") -input_sample = gr.Textbox(label="Girdi") -response = gr.Textbox(label="Çıktı") - - -gr.Interface( - load_data, - title = "ALPACA Veriseti Düzeltme Arayüzü", - description = "Bir satır sayısı verip örnek alın. Çeviride gözünüze doğru gelmeyen bir şey olursa işaretleyin.", - allow_flagging="manual", - flagging_callback=ArgillaLogger( - api_url="https://pro.argilla.io", - api_key=os.getenv("API_KEY"), - dataset_name="alpaca-flags" - ), - inputs=[idx_input], - outputs=[instruction, input_sample, response], - flagging_options=["Doğru", "Yanlış", "Belirsiz"], - theme="gradio/soft" -).launch() \ No newline at end of file diff --git a/spaces/merve/data-leak/public/hidden-bias/script.js b/spaces/merve/data-leak/public/hidden-bias/script.js deleted file mode 100644 index 526901a0178a3ef069380410dd33fdc0334f2bae..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/public/hidden-bias/script.js +++ /dev/null @@ -1,467 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - -var ttSel = d3.select('body').selectAppend('div.tooltip.tooltip-hidden') - -var colors = { - m: '#7DDAD3', - f: '#9B86EF', - h: '#F0BD80', - l: '#FF777B', - grey: '#ccc', -} - - -var totalWidth = width = d3.select('#graph').node().offsetWidth -var r = 40 - -var sel = d3.select('#graph').html('') - .append('div') - -var extraWidth = d3.clamp(500, innerHeight - 150, innerWidth - 500) -var scale = extraWidth/500 -scale = 1 -sel.st({transform: `scale(${scale})`, transformOrigin: '0% 0%'}) - -var c = d3.conventions({ - sel, - totalWidth, - totalHeight: totalWidth, - margin: {left: 25, right: 7}, - layers: 'sd', -}) -var divSel = c.layers[1] - -c.x.domain([1, 4]).clamp(true).interpolate(d3.interpolateRound) -c.y.domain([1, 4]).clamp(true).interpolate(d3.interpolateRound) - -c.xAxis.ticks(3).tickFormat(d3.format('.1f')) -c.yAxis.ticks(3).tickFormat(d3.format('.1f')) -d3.drawAxis(c) - -var axis2Sel= c.svg.append('g.axis').append('line') - .translate(Math.round(c.y(2)) + .5, 1) - .at({x2: c.width, stroke: '#000', opacity: 0}) - -var meanGPADiff = .6 - -var seed = new Math.seedrandom('hii') -var students = d3.range(150).map((d, index) => { - var collegeGPA = d3.randomUniform.source(seed)(1, 4)() - - // if (index == 93) collegeGPA = 2.05 - // if (index == 87) collegeGPA = 2.15 - // if (index == 32) collegeGPA = 2.25 - if (index == 131) collegeGPA = 3.9 - - // var hsGPA = collegeGPA*d3.randomNormal(1, .4)() - var hsGPA = collegeGPA + d3.randomNormal.source(seed)(meanGPADiff, .8)() - var hsGPAadjusted = hsGPA - meanGPADiff - - var rand = d3.randomUniform.source(seed)(0, 1) - - var isMale = rand() < .5 - var name = names[isMale ? 'm' : 'f'][Math.floor(d/2)] - var lastName = names.last[d] - var maleOffset = rand()*(isMale ? 1 : -1)*.6 - - // if (index == 47) name = 'Mia' - // if (index == 82) name = 'Mason' - - - var compGPA0 = lerp(hsGPAadjusted, collegeGPA, rand()*.7) + maleOffset - var compGPA1 = lerp(compGPA0, collegeGPA + maleOffset, rand()*1.1) - var compGPA2 = compGPA1 + rand()/4 - 1/4/2 - // var compGPA0 = collegeGPA + d3.randomNormal.source(seed)(0, .5)() - // var compGPA1 = collegeGPA + d3.randomNormal.source(seed)(0, .3)() - - if (index == 69){ - compGPA1 = 2.0 - } - if (index == 37){ - compGPA1 = 2.0 - } - - - var isLowIncome = rand() < .5 - - var inteviewGPA = collegeGPA + d3.randomNormal.source(seed)(0, .15)() - var inteviewGPAbias = inteviewGPA + rand()*(isLowIncome ? -1 : 1)*.5 - - // if (index == 115) name = 'Mason' - // if (index == 32) name = 'Mia' - - if (name == 'Camila') name = 'Mia' - - - return {name, index, lastName, collegeGPA, hsGPA, hsGPAadjusted, compGPA0, compGPA1, compGPA2, isMale, isLowIncome, inteviewGPA, inteviewGPAbias} -}) - -students = _.sortBy(students, d => d.collegeGPA) - -students = students.filter(d => { - return d3.entries(d).every(({key, value}) => { - if (!key.includes('GPA')) return true - - return 1 < value && value < 4.0 - }) -}) - - -c.svg.append('path') - .at({ - d: ['M', 0, c.height, 'L', c.width, 0].join(' '), - stroke: '#ccc', - strokeWidth: 2, - strokeDasharray: '4 2' - }) - -!(function(){ - // return window.annotationSel = d3.select(null) - var isDrag = 0 - if (!isDrag) annotations.forEach(d => d.text = d.html ? '' : d.text) - if (isDrag){ - d3.select('#sections').st({pointerEvents: 'none'}) - } - - // copy('window.annotations = ' + JSON.stringify(annotations, null, 2)) - var swoopy = d3.swoopyDrag() - .x(d => c.x(d.x)) - .y(d => c.y(d.y)) - .draggable(isDrag) - .annotations(annotations) - .on('drag', d => { - - }) - - - var htmlAnnoSel = divSel.appendMany('div.annotation', annotations.filter(d => d.html)) - .translate(d => [c.x(d.x), c.y(d.y)]).st({position: 'absolute', opacity: 0}) - .append('div') - .translate(d => d.textOffset) - .html(d => d.html) - .st({width: 150}) - - - - var swoopySel = c.svg.append('g.annotations').call(swoopy) - - c.svg.append('marker') - .attr('id', 'arrow') - .attr('viewBox', '-10 -10 20 20') - .attr('markerWidth', 20) - .attr('markerHeight', 20) - .attr('orient', 'auto') - .append('path') - .attr('d', 'M-6.75,-6.75 L 0,0 L -6.75,6.75') - - swoopySel.selectAll('path') - .attr('marker-end', 'url(#arrow)') - .st({'opacity': d => d.path == 'M 0 0' ? 0 : 1}) - window.annotationSel = swoopySel.selectAll('g') - .st({fontSize: 12, opacity: d => d.slide == 0 ? 1 : 0}) - - window.annotationSel = d3.selectAll('g.annotations g, div.annotation') - - swoopySel.selectAll('text') - .each(function(d){ - d3.select(this) - .text('') //clear existing text - .tspans(d3.wordwrap(d.text, d.width || 20), 13) //wrap after 20 char - }) - })() - - - -students = _.sortBy(students, d => d.collegeGPA) -var lineSel = c.svg.appendMany('path', students) - .translate(d => [c.x(d.hsGPA), c.y(d.collegeGPA)]) - .at({ - // fill: d => d.hsGPA > d.collegeGPA ? 'blue' : 'orange', - fill: '#eee', - stroke: '#aaa', - strokeWidth: .5, - opacity: 0, - // strokeWidth: 1/scale, - }) - - -var circleSel = c.svg.appendMany('g', students) - .translate(d => [c.x(d.collegeGPA), c.y(d.hsGPA)]) - .call(d3.attachTooltip) - .on('mouseover', d => { - var html = '' - html += `
          ${d.name} ${d.lastName}
          ` - - if (curSlide.circleFill == 'gender'){ - html += `${d.isMale ? 'Male' : 'Female'}` - } - - if (curSlide.circleFill == 'income'){ - html += `${d.isLowIncome ? 'Low Income' : 'High Income'}` - } - html += ` -
          ${d3.format('.2f')(d[curSlide.yKey]).slice(0, 4)} ${curSlide.index ? 'Predicted' : 'High School'} GPA
          -
          ${d3.format('.2f')(d.collegeGPA).slice(0, 4)} College GPA
          ` - - ttSel.html(html) - }) - - -var innerCircleSel = circleSel.append('circle') - .at({ - r: 5, - fill: '#eee', - stroke: '#aaa' - }) - -// var textSel = circleSel.append('text').text(d => d.isMale ? 'M' : 'F') -// .at({textAnchor: 'middle', dy: '.33em', fontSize: 8, fill: '#eee'}) -// var textSel2 = circleSel.append('text').text(d => d.isLowIncome ? 'L' : 'H') -// .at({textAnchor: 'middle', dy: '.33em', fontSize: 8, opacity: 0}) - - -c.svg.select('.y').selectAll('line').filter(d => d == 4) - .remove() -c.svg.select('.y').selectAll('text').filter(d => d == 4) - .select(function() { - return this.parentNode.insertBefore(this.cloneNode(1), this.nextSibling); - }) - .text('Actual College GPA') - .at({x: c.width/2, y: c.height + 35, textAnchor: 'middle', fontWeight: 800}) - -var yLabelSel = divSel.st({pointerEvents: 'none'}).append('div.axis') - .html('High School GPA') - .translate([0, -9]) - .st({textAlign: 'left', maxWidth: 260}) - -// c.svg.append('text').text('Actual College GPA').st({fontWeight: 800}) - -var longLabel = 'high school GPA, essay, clubs, zip code, teacher recommendations, sports, AP scores, demonstrated interest, gender, SAT scores, interviews, portfolio, race, work experience' - -var slides = [ - { - yKey: 'hsGPA', - isLineVisible: 0, - yLabel: 'High School GPA', - circleFill: 'grey', - circleFillDelay: d => 0, - }, - - { - yKey: 'hsGPA', - isLineVisible: true, - yLabel: 'High School GPA' - }, - - { - yKey: 'hsGPAadjusted', - yLabel: 'high school GPA' - }, - - { - yKey: 'compGPA0', - yLabel: 'high school GPA, essay, clubs, zip code'.replace('essay', 'essay') + '' - }, - - { - yKey: 'compGPA1', - yLabel: longLabel.replace('teacher', 'teacher') + '', - circleFill: 'grey', - circleFillDelay: d => 0, - textFill: '#eee', - }, - - { - yKey: 'compGPA1', - yLabel: longLabel, - circleFill: 'gender', - circleFillDelay: (d, i) => i*20 + (d.isMale ? 0 : 2000), - textFill: '#000', - }, - - { - name: 'proxyHighlight', - yKey: 'compGPA2', - yLabel: longLabel, - circleFill: 'gender', - circleFillDelay: d => 0, - textFill: '#000', - }, - - { - textFill: '#eee', - yLabel: 'Alumni interview', - yKey: 'inteviewGPAbias', - circleFill: 'grey', - text2Opacity: 0, - }, - - { - textFill: '#eee', - yLabel: 'Alumni interview', - yKey: 'inteviewGPAbias', - circleFill: 'income', - circleFillDelay: (d, i) => i*20 + (!d.isLowIncome ? 2000 : 0), - text2Opacity: 1, - }, - - { - textFill: '#eee', - yLabel: 'Alumni interview, household income'.replace('household', 'household') + '', - yKey: 'inteviewGPA', - text2Opacity: 1, - }, -] - -slides.forEach(d => { - if (d.name == 'proxyHighlight'){ - var proxies = 'clubs, interviews, portfolio, sports'.split(', ') - d.yLabel = d.yLabel - .split(', ') - .map(d => { - if (d == 'gender') return `gender` - if (!proxies.includes(d)) return d - - return `${d}` - }) - .join(', ') - } - - - if (d.yLabel[0] != '<') d.yLabel = 'Predicted College GPA using ' + d.yLabel.replace('School', 'school') -}) - -var keys = [] -slides.forEach(d => keys = keys.concat(d3.keys(d))) -_.uniq(keys).forEach(str => { - var prev = null - slides.forEach(d => { - if (typeof(d[str]) === 'undefined'){ - d[str] = prev - } - prev = d[str] - }) -}) - -slides.forEach((d, i) => { - d.circleFillFn = { - grey: d => '#eee', - gender: d => d.isMale ? colors.m : colors.f, - income: d => d.isLowIncome ? colors.l : colors.h, - }[d.circleFill] - - d.index = i -}) - - - - -var gs = d3.graphScroll() - .container(d3.select('.container-1')) - .graph(d3.selectAll('container-1 #graph')) - .eventId('uniqueId1') - .sections(d3.selectAll('.container-1 #sections > div')) - .offset(innerWidth < 900 ? 300 : 520) - .on('active', updateSlide) - - -var prevSlide = -1 -function updateSlide(i){ - var slide = slides[i] - if (!slide) return - curSlide = slide - var {yKey} = slide - - lineSel.transition('yKey').duration(500) - .at({ - d: d => [ - 'M 5 0', - 'C 0 0', - 0, c.y(d['collegeGPA']) - c.y(d[yKey]), - 0, c.y(d['collegeGPA']) - c.y(d[yKey]), - 'S 0 0 -5.5 0' - ].join(' ') - }) - .translate(d => [c.x(d.collegeGPA), c.y(d[yKey])]) - - - circleSel.transition('yKey').duration(500) - .translate(d => [c.x(d.collegeGPA), c.y(d[yKey])]) - - innerCircleSel.transition('colorFill').duration(30) - .delay(slide.circleFillDelay) - .at({ - fill: slide.circleFillFn, - stroke: d => d3.color(slide.circleFillFn(d)).darker(1.5) - }) - - axis2Sel.transition() - .st({opacity: i == 5 ? 1 : 0}) - - lineSel.transition('opacity').duration(500) - .st({ - opacity: slide.isLineVisible ? 1 : 0 - }) - - if (slide.yLabel) yLabelSel.html(slide.yLabel) - - - annotationSel.transition() - .st({opacity: d => i == d.slide ? 1 : 0}) - - - - prevSlide = i -} - -slide = slides[0] - - - - -d3.selectAll('.circle').each(function(){ - var d = d3.select(this).attr('class').split(' ')[0] - - d3.select(this) - .st({ - backgroundColor: d3.color(colors[d]), - borderColor: d3.color(colors[d]).darker(1.5), - }) - - -}) - - - - -function lerp(a, b, t){ return a + t*(b - a) } - - - -c.svg.selectAll('g.annotations').raise() - - - -d3.selectAll('#sections img').attr('aria-hidden', true) - - - - - - - - diff --git a/spaces/merve/fill-in-the-blank/source/base-rate/script.js b/spaces/merve/fill-in-the-blank/source/base-rate/script.js deleted file mode 100644 index efc40861466afc2bb19cee8d3ef6cd5a98d80ddc..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/source/base-rate/script.js +++ /dev/null @@ -1,317 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - - - - -console.clear() -var ttSel = d3.select('body').selectAppend('div.tooltip.tooltip-hidden') - -window.renderFns = [] - -window.m = (function(){ - var rv = {b: .7, tpr: .8, fnr: .5, update, str: 'kids', titleStr: 'Children',} - - function update(obj={}){ - Object.assign(rv, obj) - window.renderFns.forEach(d => d()) - } - - return rv -})() - -window.f = (function(){ - var rv = {b: .3, tpr: .8, fnr: .5, update, str: 'adults', titleStr: 'Adults'} - - function update(obj={}){ - window.renderFns.forEach(d => d()) - } - - return rv -})() - - -var wLarge = d3.clamp(0, innerWidth/2 - 30, 300) - -d3.select('#big-matrix').html('') - .appendMany('div.big-container', [{w: wLarge, s: f, isText: 1}, {w: wLarge, s: m, isText: 1}]) - .each(drawMatrix) - - -addPattern(10, `pattern-${wLarge}-`) -addPattern(5, 'pattern-50-') - -function addPattern(s, str){ - var cColors = [colors.sick, colors.sick, colors.well, colors.well, lcolors.sick, lcolors.sick, lcolors.well, lcolors.well] - var rColors = [lcolors.sick, lcolors.well, lcolors.sick, lcolors.well, llcolors.sick, llcolors.well, llcolors.sick, llcolors.well] - - d3.select('#big-matrix') - .append('svg') - .st({height: 0, position: 'absolute'}) - .append('defs').appendMany('pattern', d3.range(8)) - .at({ id: i => str + i, width: s, height: s}) - .attr('patternUnits', 'userSpaceOnUse') - .append('rect') - .at({width: s, height: s, fill: i => rColors[i]}) - .parent().append('circle') - .at({r: s == 10 ? 2.5 : 1.5, cx: s/2, cy: s/2, fill: i => cColors[i]}) -} - - -var scale = d3.clamp(0, ((innerWidth - 50) / 3)/280, 1) -var isScaled = scale != 1 - -d3.select('#metrics').html('').st({height: 350*scale + 30}) - .appendMany('div', [0, 1, 2]) - .st({width: 280*scale, display: 'inline-block'}) - .append('div') - .st({transform: `scale(${scale})`, transformOrigin: '0% 0%'}) - .append('div.metrics-container').st({width: 280}) - .each(drawMetric) - -d3.selectAll('rect.drag') - .on('mouseover.style', d => d3.selectAll('rect.' + d).st({strokeWidth: 3, stroke: '#000'})) - .on('mouseout.style', d => d3.selectAll('rect.' + d).st({strokeWidth: 0})) - -function drawMetric(i){ - var sel = d3.select(this) - - var text = [ - // 'Percentage of sick people
          who test positive', - 'Percentage of sick people
          who test positive', - 'Percentage of positive tests
          who are actually sick', - 'Percentage of well people
          who test negative', - ][i] - - var percentFn = [ - s => s.tpr, - s => s.b*s.tpr/(s.b*s.tpr + (1 - s.b)*(s.fnr)), - s => 1 - s.fnr, - ][i] - - var colors = [ - ['#f0f', '#fcf', '#fff', '#fff'], - ['#f0f', '#fff', '#fcf', '#fff'], - ['#fff', '#fff', '#fcf', '#f0f'], - ][i] - - sel.append('h3').st({marginBottom: 20, fontSize: isScaled ? 30 : 20}).html(isScaled ? text.replace('
          ', '') : text) - - var h = 200 - var width = 100 - - var fDiv = sel.append('div').st({position: 'relative', top: -h + 7}) - .datum({w: 50, s: f, isText: 0, colors}).each(drawMatrix) - - var svg = sel.append('svg') - .at({width, height: h}) - .st({fontSize: 14, fontFamily: 'monospace'}) - - svg.append('path').at({stroke: '#ccc', d: `M ${width/2 + .5} 0 V ${h}`}) - - var errorSel = svg.append('path') - .translate(width/2 + .5, 0) - .at({stroke: 'orange', strokeWidth: 3}) - - var fSel = svg.append('g') - var mSel = svg.append('g') - - mSel.append('circle').at({r: 4, cx: width/2 + .5, fill: 'none', stroke: '#000'}) - fSel.append('circle').at({r: 4, cx: width/2 + .5, fill: 'none', stroke: '#000'}) - - var fTextSel = fSel.append('text').text('23%') - .at({dy: '.33em', textAnchor: 'middle', x: width/4 - 3, fontSize: isScaled ? 20 : 16}) - var mTextSel = mSel.append('text').text('23%') - .at({dy: '.33em', textAnchor: 'middle', x: width/4*3 + 5, fontSize: isScaled ? 20 : 16}) - - fSel.append('text').text('Adults').st({fontSize: isScaled ? 18 : 12}) - .at({textAnchor: 'middle', x: -23, y: -30}) - mSel.append('text').text('Children').st({fontSize: isScaled ? 18 : 12}) - .at({textAnchor: 'middle', x: 124, y: -30}) - - var mDiv = sel.append('div').st({position: 'relative', top: -h + 7}) - .datum({w: 50, s: m, isText: 0, colors}).each(drawMatrix) - - - renderFns.push(() => { - var fPercent = percentFn(f) - fSel.translate(h - h*fPercent, 1) - fTextSel.text(d3.format('.0%')(fPercent)) - - var mPercent = percentFn(m) - mSel.translate(h - h*mPercent, 1) - mTextSel.text(d3.format('.0%')(mPercent)) - - fDiv.translate(h - h*fPercent, 1) - mDiv.translate(h - h*mPercent, 1) - - errorSel.at({d: 'M 0 ' + (h - h*fPercent) + ' V ' + (h - h*mPercent) }) - }) -} - -function drawMatrix({s, w, isText, colors}){ - var svg = d3.select(this).append('svg') - .at({width: w, height: w}) - - - svg.append('rect').at({width: w + 1, height: w + 1}) - - if (!colors) colors = ['#000', '#000', '#000', '#000'] - - var rects = [ - {n: 'tp', x: 0, y: 0, width: _ => s.b*w, height: _ => s.tpr*w}, - {n: 'fn', x: 0, y: _ => 1 + s.tpr*w, width: _ => s.b*w, height: _ => w - s.tpr*w}, - {n: 'fp', x: _ => 1 + s.b*w, y: 0, width: _ => w - s.b*w, height: _ => s.fnr*w}, - {n: 'tn', x: _ => 1 + s.b*w, y: _ => 1 + s.fnr*w, width: _ => w - s.b*w, height: _ => w - s.fnr*w}, - ] - rects.forEach((d, i) => d.i = i) - - var rectSel = svg.appendMany('rect', rects) - .at({fill: d => `url(#pattern-${w}-${d.i}`}) - // .at({opacity: d => colors[d.i] == '#fff' ? .5 : 1}) - // .at({fill: d => `url(#pattern-${w}-${d.i + (colors[d.i] == '#ccc' ? 4 : 0)})`}) - // .at({fill: d => colors[d.i] == '#ccc' ? '#000' : `url(#pattern-${w}-${d.i + (colors[d.i] == '#ccc' ? 4 : 0)})`}) - .each(function(d){ d.sel = d3.select(this) }) - rectSel.filter(d => colors[d.i] == '#fff').at({fill: '#eee'}) - - var bh = .5 - svg.append('rect.tpr').at({height: bh}).translate(-bh/2, 1) - .datum('tpr') - - svg.append('rect.fnr').at({height: bh}).translate(-bh/2, 1) - .datum('fnr') - - svg.append('rect.b').at({width: bh, height: w}).translate(-bh/2, 0) - .datum('b') - - var bh = 20 - svg.append('rect.drag.tpr').at({height: bh}).translate(-bh/2, 1) - .call(makeDrag('tpr', 1)).datum('tpr').call(d3.attachTooltip).on('mouseover', ttFormat) - - svg.append('rect.drag.fnr').at({height: bh}).translate(-bh/2, 1) - .call(makeDrag('fnr', 1)).datum('fnr').call(d3.attachTooltip).on('mouseover', ttFormat) - - svg.append('rect.drag.b').at({width: bh, height: w}).translate(-bh/2, 0) - .call(makeDrag('b', 0)).datum('b').call(d3.attachTooltip).on('mouseover', ttFormat) - - - var tprRect = svg.selectAll('rect.tpr') - var fnrRect = svg.selectAll('rect.fnr') - var bRect = svg.selectAll('rect.b') - - function ttFormat(str){ - var html = '' - if (str == 'tpr') html = `${d3.format('.0%')(s.tpr)} of sick ${s.titleStr.toLowerCase()} test positive` - if (str == 'fnr') html = `${d3.format('.0%')(s.fnr)} of well ${s.titleStr.toLowerCase()} test negative` - if (str == 'b') html = `${d3.format('.0%')(s.b)} of ${s.titleStr.toLowerCase()} are sick` - ttSel.html(html) - } - - function makeDrag(str, index){ - - return d3.drag() - .on('drag', function(){ - var percent = d3.mouse(this)[index]/w - s[str] = d3.clamp(.15, percent, .85) - - window.basetimer.stop() - s.update() - - ttMove() - ttFormat(str) - }) - .on('start', _ => svg.classed('dragging', 1)) - .on('end', _ => svg.classed('dragging', 0)) - } - - renderFns.push(() => { - rectSel.each(d => d.sel.at(d)) - - tprRect.at({width: w*s.b, y: w*s.tpr}) - fnrRect.at({x: w*s.b, width: w - w*s.b, y: w*s.fnr}) - bRect.at({x: w*s.b}) - - // s => s.tpr, - // s => s.b*s.tpr/(s.b*s.tpr + (1 - s.b)*(s.fnr)), - // s => 1 - s.fnr, - if (!isText) return - }) - - - if (!isText) return - - svg.append('text').text(s.titleStr).at({textAnchor: 'middle', x: w/2, y: -8, fontSize: 20}) - - if (innerWidth < 800) return - // if (true) - - svg.appendMany('text', d3.range(4)).each(function(i){ - var isSick = i < 2 - var isPos = i % 2 - - var pad = 5 - d3.select(this) - .translate([isSick ? pad : w - pad, isPos ? 13 : w - 23]) - .at({ - textAnchor: isSick ? 'start' : 'end', - fill: '#000', - fontSize: 12, - fontFamily: 'monospace', - pointerEvents: 'none', - }) - .tspans([ - ' test : ' + (isPos ? 'sick' : 'well'), - 'truth: ' + (isSick ? 'sick' : 'well')]) - }) -} - - -if (window.basetimer) window.basetimer.stop() -window.basetimer = d3.timer(t => { - - var val = t/1000 % (Math.PI*4) - - if (val < Math.PI*2){ - m.b = (Math.sin(val + Math.PI/2))/4 + .4 - } else if (Math.PI*3 < val && val < Math.PI*5 || true){ - f.tpr = (Math.sin(val + Math.PI/2))/4 + .4 - } - m.update() -}) - - - - - -m.update() - - - -function ttMove(d){ - if (!ttSel.size()) return; - - var e = d3.event.sourceEvent, - x = e.clientX, - y = e.clientY, - bb = ttSel.node().getBoundingClientRect(), - left = d3.clamp(20, (x-bb.width/2), window.innerWidth - bb.width - 20), - top = innerHeight > y + 20 + bb.height ? y + 20 : y - bb.height - 20; - - ttSel - .style('left', left +'px') - .style('top', top + 'px'); -} - diff --git a/spaces/merve/hidden-bias/server-side/fill-in-the-blank/gender-over-time-colab/watch-files.js b/spaces/merve/hidden-bias/server-side/fill-in-the-blank/gender-over-time-colab/watch-files.js deleted file mode 100644 index c243ec0c0726b96afe9727d6648fdbc18b4e8ad8..0000000000000000000000000000000000000000 --- a/spaces/merve/hidden-bias/server-side/fill-in-the-blank/gender-over-time-colab/watch-files.js +++ /dev/null @@ -1,38 +0,0 @@ -function watchFile(path, type){ - var lastStr = '' - - console.log(path) - function check(){ - d3.text(path + '?' + Math.random(), (err, nextStr) => { - if (err){ - console.log(err) - return check() - } - - if (nextStr == lastStr) return - lastStr = nextStr - - if (path.includes('.js')){ - console.clear() - console.log('js', new Date()) - - Function(nextStr.replace('\n', ';').replace('\n', ';'))() - } - - if (path.includes('.css')){ - console.log('css', new Date()) - - Array.from(document.querySelectorAll('link')) - .filter(d => d.href.includes(path)) - .forEach(d => d.href = d.href.split('?')[0] + '?' + Math.random()) - } - }) - - setTimeout(check, window.timeoutMS || 9999999999) - } - check() -} - - -watchFile('https://roadtolarissa.com/colab/gender-over-time-colab/style.css', 'js') -watchFile('https://roadtolarissa.com/colab/gender-over-time-colab/script.js', 'js') diff --git a/spaces/merve/measuring-fairness/source/hidden-bias/script.js b/spaces/merve/measuring-fairness/source/hidden-bias/script.js deleted file mode 100644 index 526901a0178a3ef069380410dd33fdc0334f2bae..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/source/hidden-bias/script.js +++ /dev/null @@ -1,467 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - -var ttSel = d3.select('body').selectAppend('div.tooltip.tooltip-hidden') - -var colors = { - m: '#7DDAD3', - f: '#9B86EF', - h: '#F0BD80', - l: '#FF777B', - grey: '#ccc', -} - - -var totalWidth = width = d3.select('#graph').node().offsetWidth -var r = 40 - -var sel = d3.select('#graph').html('') - .append('div') - -var extraWidth = d3.clamp(500, innerHeight - 150, innerWidth - 500) -var scale = extraWidth/500 -scale = 1 -sel.st({transform: `scale(${scale})`, transformOrigin: '0% 0%'}) - -var c = d3.conventions({ - sel, - totalWidth, - totalHeight: totalWidth, - margin: {left: 25, right: 7}, - layers: 'sd', -}) -var divSel = c.layers[1] - -c.x.domain([1, 4]).clamp(true).interpolate(d3.interpolateRound) -c.y.domain([1, 4]).clamp(true).interpolate(d3.interpolateRound) - -c.xAxis.ticks(3).tickFormat(d3.format('.1f')) -c.yAxis.ticks(3).tickFormat(d3.format('.1f')) -d3.drawAxis(c) - -var axis2Sel= c.svg.append('g.axis').append('line') - .translate(Math.round(c.y(2)) + .5, 1) - .at({x2: c.width, stroke: '#000', opacity: 0}) - -var meanGPADiff = .6 - -var seed = new Math.seedrandom('hii') -var students = d3.range(150).map((d, index) => { - var collegeGPA = d3.randomUniform.source(seed)(1, 4)() - - // if (index == 93) collegeGPA = 2.05 - // if (index == 87) collegeGPA = 2.15 - // if (index == 32) collegeGPA = 2.25 - if (index == 131) collegeGPA = 3.9 - - // var hsGPA = collegeGPA*d3.randomNormal(1, .4)() - var hsGPA = collegeGPA + d3.randomNormal.source(seed)(meanGPADiff, .8)() - var hsGPAadjusted = hsGPA - meanGPADiff - - var rand = d3.randomUniform.source(seed)(0, 1) - - var isMale = rand() < .5 - var name = names[isMale ? 'm' : 'f'][Math.floor(d/2)] - var lastName = names.last[d] - var maleOffset = rand()*(isMale ? 1 : -1)*.6 - - // if (index == 47) name = 'Mia' - // if (index == 82) name = 'Mason' - - - var compGPA0 = lerp(hsGPAadjusted, collegeGPA, rand()*.7) + maleOffset - var compGPA1 = lerp(compGPA0, collegeGPA + maleOffset, rand()*1.1) - var compGPA2 = compGPA1 + rand()/4 - 1/4/2 - // var compGPA0 = collegeGPA + d3.randomNormal.source(seed)(0, .5)() - // var compGPA1 = collegeGPA + d3.randomNormal.source(seed)(0, .3)() - - if (index == 69){ - compGPA1 = 2.0 - } - if (index == 37){ - compGPA1 = 2.0 - } - - - var isLowIncome = rand() < .5 - - var inteviewGPA = collegeGPA + d3.randomNormal.source(seed)(0, .15)() - var inteviewGPAbias = inteviewGPA + rand()*(isLowIncome ? -1 : 1)*.5 - - // if (index == 115) name = 'Mason' - // if (index == 32) name = 'Mia' - - if (name == 'Camila') name = 'Mia' - - - return {name, index, lastName, collegeGPA, hsGPA, hsGPAadjusted, compGPA0, compGPA1, compGPA2, isMale, isLowIncome, inteviewGPA, inteviewGPAbias} -}) - -students = _.sortBy(students, d => d.collegeGPA) - -students = students.filter(d => { - return d3.entries(d).every(({key, value}) => { - if (!key.includes('GPA')) return true - - return 1 < value && value < 4.0 - }) -}) - - -c.svg.append('path') - .at({ - d: ['M', 0, c.height, 'L', c.width, 0].join(' '), - stroke: '#ccc', - strokeWidth: 2, - strokeDasharray: '4 2' - }) - -!(function(){ - // return window.annotationSel = d3.select(null) - var isDrag = 0 - if (!isDrag) annotations.forEach(d => d.text = d.html ? '' : d.text) - if (isDrag){ - d3.select('#sections').st({pointerEvents: 'none'}) - } - - // copy('window.annotations = ' + JSON.stringify(annotations, null, 2)) - var swoopy = d3.swoopyDrag() - .x(d => c.x(d.x)) - .y(d => c.y(d.y)) - .draggable(isDrag) - .annotations(annotations) - .on('drag', d => { - - }) - - - var htmlAnnoSel = divSel.appendMany('div.annotation', annotations.filter(d => d.html)) - .translate(d => [c.x(d.x), c.y(d.y)]).st({position: 'absolute', opacity: 0}) - .append('div') - .translate(d => d.textOffset) - .html(d => d.html) - .st({width: 150}) - - - - var swoopySel = c.svg.append('g.annotations').call(swoopy) - - c.svg.append('marker') - .attr('id', 'arrow') - .attr('viewBox', '-10 -10 20 20') - .attr('markerWidth', 20) - .attr('markerHeight', 20) - .attr('orient', 'auto') - .append('path') - .attr('d', 'M-6.75,-6.75 L 0,0 L -6.75,6.75') - - swoopySel.selectAll('path') - .attr('marker-end', 'url(#arrow)') - .st({'opacity': d => d.path == 'M 0 0' ? 0 : 1}) - window.annotationSel = swoopySel.selectAll('g') - .st({fontSize: 12, opacity: d => d.slide == 0 ? 1 : 0}) - - window.annotationSel = d3.selectAll('g.annotations g, div.annotation') - - swoopySel.selectAll('text') - .each(function(d){ - d3.select(this) - .text('') //clear existing text - .tspans(d3.wordwrap(d.text, d.width || 20), 13) //wrap after 20 char - }) - })() - - - -students = _.sortBy(students, d => d.collegeGPA) -var lineSel = c.svg.appendMany('path', students) - .translate(d => [c.x(d.hsGPA), c.y(d.collegeGPA)]) - .at({ - // fill: d => d.hsGPA > d.collegeGPA ? 'blue' : 'orange', - fill: '#eee', - stroke: '#aaa', - strokeWidth: .5, - opacity: 0, - // strokeWidth: 1/scale, - }) - - -var circleSel = c.svg.appendMany('g', students) - .translate(d => [c.x(d.collegeGPA), c.y(d.hsGPA)]) - .call(d3.attachTooltip) - .on('mouseover', d => { - var html = '' - html += `
          ${d.name} ${d.lastName}
          ` - - if (curSlide.circleFill == 'gender'){ - html += `${d.isMale ? 'Male' : 'Female'}` - } - - if (curSlide.circleFill == 'income'){ - html += `${d.isLowIncome ? 'Low Income' : 'High Income'}` - } - html += ` -
          ${d3.format('.2f')(d[curSlide.yKey]).slice(0, 4)} ${curSlide.index ? 'Predicted' : 'High School'} GPA
          -
          ${d3.format('.2f')(d.collegeGPA).slice(0, 4)} College GPA
          ` - - ttSel.html(html) - }) - - -var innerCircleSel = circleSel.append('circle') - .at({ - r: 5, - fill: '#eee', - stroke: '#aaa' - }) - -// var textSel = circleSel.append('text').text(d => d.isMale ? 'M' : 'F') -// .at({textAnchor: 'middle', dy: '.33em', fontSize: 8, fill: '#eee'}) -// var textSel2 = circleSel.append('text').text(d => d.isLowIncome ? 'L' : 'H') -// .at({textAnchor: 'middle', dy: '.33em', fontSize: 8, opacity: 0}) - - -c.svg.select('.y').selectAll('line').filter(d => d == 4) - .remove() -c.svg.select('.y').selectAll('text').filter(d => d == 4) - .select(function() { - return this.parentNode.insertBefore(this.cloneNode(1), this.nextSibling); - }) - .text('Actual College GPA') - .at({x: c.width/2, y: c.height + 35, textAnchor: 'middle', fontWeight: 800}) - -var yLabelSel = divSel.st({pointerEvents: 'none'}).append('div.axis') - .html('High School GPA') - .translate([0, -9]) - .st({textAlign: 'left', maxWidth: 260}) - -// c.svg.append('text').text('Actual College GPA').st({fontWeight: 800}) - -var longLabel = 'high school GPA, essay, clubs, zip code, teacher recommendations, sports, AP scores, demonstrated interest, gender, SAT scores, interviews, portfolio, race, work experience' - -var slides = [ - { - yKey: 'hsGPA', - isLineVisible: 0, - yLabel: 'High School GPA', - circleFill: 'grey', - circleFillDelay: d => 0, - }, - - { - yKey: 'hsGPA', - isLineVisible: true, - yLabel: 'High School GPA' - }, - - { - yKey: 'hsGPAadjusted', - yLabel: 'high school GPA' - }, - - { - yKey: 'compGPA0', - yLabel: 'high school GPA, essay, clubs, zip code'.replace('essay', 'essay') + '' - }, - - { - yKey: 'compGPA1', - yLabel: longLabel.replace('teacher', 'teacher') + '', - circleFill: 'grey', - circleFillDelay: d => 0, - textFill: '#eee', - }, - - { - yKey: 'compGPA1', - yLabel: longLabel, - circleFill: 'gender', - circleFillDelay: (d, i) => i*20 + (d.isMale ? 0 : 2000), - textFill: '#000', - }, - - { - name: 'proxyHighlight', - yKey: 'compGPA2', - yLabel: longLabel, - circleFill: 'gender', - circleFillDelay: d => 0, - textFill: '#000', - }, - - { - textFill: '#eee', - yLabel: 'Alumni interview', - yKey: 'inteviewGPAbias', - circleFill: 'grey', - text2Opacity: 0, - }, - - { - textFill: '#eee', - yLabel: 'Alumni interview', - yKey: 'inteviewGPAbias', - circleFill: 'income', - circleFillDelay: (d, i) => i*20 + (!d.isLowIncome ? 2000 : 0), - text2Opacity: 1, - }, - - { - textFill: '#eee', - yLabel: 'Alumni interview, household income'.replace('household', 'household') + '', - yKey: 'inteviewGPA', - text2Opacity: 1, - }, -] - -slides.forEach(d => { - if (d.name == 'proxyHighlight'){ - var proxies = 'clubs, interviews, portfolio, sports'.split(', ') - d.yLabel = d.yLabel - .split(', ') - .map(d => { - if (d == 'gender') return `gender` - if (!proxies.includes(d)) return d - - return `${d}` - }) - .join(', ') - } - - - if (d.yLabel[0] != '<') d.yLabel = 'Predicted College GPA using ' + d.yLabel.replace('School', 'school') -}) - -var keys = [] -slides.forEach(d => keys = keys.concat(d3.keys(d))) -_.uniq(keys).forEach(str => { - var prev = null - slides.forEach(d => { - if (typeof(d[str]) === 'undefined'){ - d[str] = prev - } - prev = d[str] - }) -}) - -slides.forEach((d, i) => { - d.circleFillFn = { - grey: d => '#eee', - gender: d => d.isMale ? colors.m : colors.f, - income: d => d.isLowIncome ? colors.l : colors.h, - }[d.circleFill] - - d.index = i -}) - - - - -var gs = d3.graphScroll() - .container(d3.select('.container-1')) - .graph(d3.selectAll('container-1 #graph')) - .eventId('uniqueId1') - .sections(d3.selectAll('.container-1 #sections > div')) - .offset(innerWidth < 900 ? 300 : 520) - .on('active', updateSlide) - - -var prevSlide = -1 -function updateSlide(i){ - var slide = slides[i] - if (!slide) return - curSlide = slide - var {yKey} = slide - - lineSel.transition('yKey').duration(500) - .at({ - d: d => [ - 'M 5 0', - 'C 0 0', - 0, c.y(d['collegeGPA']) - c.y(d[yKey]), - 0, c.y(d['collegeGPA']) - c.y(d[yKey]), - 'S 0 0 -5.5 0' - ].join(' ') - }) - .translate(d => [c.x(d.collegeGPA), c.y(d[yKey])]) - - - circleSel.transition('yKey').duration(500) - .translate(d => [c.x(d.collegeGPA), c.y(d[yKey])]) - - innerCircleSel.transition('colorFill').duration(30) - .delay(slide.circleFillDelay) - .at({ - fill: slide.circleFillFn, - stroke: d => d3.color(slide.circleFillFn(d)).darker(1.5) - }) - - axis2Sel.transition() - .st({opacity: i == 5 ? 1 : 0}) - - lineSel.transition('opacity').duration(500) - .st({ - opacity: slide.isLineVisible ? 1 : 0 - }) - - if (slide.yLabel) yLabelSel.html(slide.yLabel) - - - annotationSel.transition() - .st({opacity: d => i == d.slide ? 1 : 0}) - - - - prevSlide = i -} - -slide = slides[0] - - - - -d3.selectAll('.circle').each(function(){ - var d = d3.select(this).attr('class').split(' ')[0] - - d3.select(this) - .st({ - backgroundColor: d3.color(colors[d]), - borderColor: d3.color(colors[d]).darker(1.5), - }) - - -}) - - - - -function lerp(a, b, t){ return a + t*(b - a) } - - - -c.svg.selectAll('g.annotations').raise() - - - -d3.selectAll('#sections img').attr('aria-hidden', true) - - - - - - - - diff --git a/spaces/miyaaa666/bingo/README.md b/spaces/miyaaa666/bingo/README.md deleted file mode 100644 index 90fab5f716b39d7cb21063693c1f53dd3f9ad781..0000000000000000000000000000000000000000 --- a/spaces/miyaaa666/bingo/README.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -title: bingo -emoji: 📉 -colorFrom: red -colorTo: red -sdk: docker -pinned: true -license: mit -duplicated_from: hf4all/bingo ---- - -
          - -# Bingo - -Bingo,一个让你呼吸顺畅 New Bing。 - -高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。 - -![Github stars](https://badgen.net/github/stars/weaigc/bingo?icon=github&label=stars) -![Gthub issues](https://img.shields.io/github/issues/weaigc/bingo) -[![docker build](https://github.com/weaigc/bingo/actions/workflows/docker.yml/badge.svg)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![docker hub](https://badgen.net/docker/size/weaigc/bingo?icon=docker&label=image%20size)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![MIT License](https://img.shields.io/badge/license-MIT-97c50f)](https://github.com/weaigc/bingo/blob/main/license) - -
          - -## 演示站点 - -https://bing.github1s.tk - - - -[![img](./docs/images/demo.png)](https://bing.github1s.tk) - -## 功能和特点 - -- 完全基于 Next.js 重写,高度还原 New Bing Web 版 UI,使用体验和 Bing AI 基本一致。 -- 支持 Docker 构建,方便快捷地部署和访问。 -- Cookie 可全局配置,全局共享。 -- 支持持续语音对话 - -## RoadMap - - - [x] 支持 wss 转发 - - [x] 支持一键部署 - - [x] 优化移动端展示 - - [x] 支持画图 - - [x] 支持语音输入(支持语音指令,目前仅支持 PC 版 Edge 及 Chrome 浏览器) - - [x] 支持语音输出(需要手动开启) - - [x] 支持图片输入 - - [x] 支持自定义域名 - - [ ] 支持历史记录 - - [ ] 适配深色模式 - - [ ] 支持内置提示词 - - [ ] 支持离线访问 - - [ ] 国际化翻译 - -## 一键部署 -你也可以一键部署自己的 New Bing AI 到 🤗 HuggingFace 。 - -### 部署到 Huggingface -1. 点击此图标 -[![Deploy to HuggingFace](https://img.shields.io/badge/%E7%82%B9%E5%87%BB%E9%83%A8%E7%BD%B2-%F0%9F%A4%97-fff)](https://huggingface.co/login?next=%2Fspaces%2Fhf4all%2Fbingo%3Fduplicate%3Dtrue%26visibility%3Dpublic),配置可以不改。 - -2. 部署署完成后,点击“设置” 》“站点域名”,点一下,复制一下 HF 域名信息,然后分享给别人即可。 - -> Huggingface 不支持绑定自己的域名,不过我们可以使用曲线救国的方式来达到这个目的 -> 1. 方式二,借助 Cloudflare Workers [部署Cloudflare Workers](#使用Cloudflare-Workers自定义域名) -> 2. 方式一,借助 Github Pages 及 iframe [如何绑定域名](https://github.com/weaigc/bingo/issues/4) - -### 使用Cloudflare Workers自定义域名 - -> 核心代码 [worker.js](./cloudflare/worker.js) - -- [注册 Cloudflare 账号](https://dash.cloudflare.com/sign-up) - -- 添加一个新的网站,需要你有自己的域名并且将域名`Name Server`托管给 Cloudflare 才行(更多信息可自行 Google) - -- 通过左侧菜单进入「Workers」,并点击「Create a Worker」。 - -- 创建 Worker 服务,复制 [worker.js](./cloudflare/worker.js) 全部代码,粘贴至创建的服务中,根据注释进行改动,保存并部署。 - -- 触发器 中自定义访问域名。 - -### 部署其它平台 -
          - -由于其他平台目前遭到 New Bing 封杀,会遇到很多问题,不再做推荐,有需要的可以自行查看 - - -#### 部署到 Netlify -[![Deploy to Netlify Button](https://www.netlify.com/img/deploy/button.svg)](https://app.netlify.com/start/deploy?repository=https://github.com/weaigc/bingo) - -#### 部署到 Vercel -如果你是 Vercel 付费用户,可以点以下链接一键部署到 Vercel。免费版本有[接口超时限制](https://vercel.com/docs/concepts/limits/overview),不推荐使用 - -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?demo-title=bingo&demo-description=bingo&demo-url=https%3A%2F%2Fbing.github1s.tk%2F&project-name=bingo&repository-name=bingo&repository-url=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo&from=templates&skippable-integrations=1&env=BING_HEADER&envDescription=%E5%A6%82%E6%9E%9C%E4%B8%8D%E7%9F%A5%E9%81%93%E6%80%8E%E4%B9%88%E9%85%8D%E7%BD%AE%E8%AF%B7%E7%82%B9%E5%8F%B3%E4%BE%A7Learn+More&envLink=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo%2Fblob%2Fmain%2F.env.example) - -#### 部署到 Render - -[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://render.com/deploy?repo=https://github.com/weaigc/bingo) -
          - -## 环境和依赖 - -- Node.js >= 18 -- Bing AI 的[身份信息](#如何获取-BING_HEADER)) - -## 安装和使用 - -> 由于目前微软封杀比较严重,推荐优先使用 [部署 Huggingface](#部署到-huggingface) 。 - -* 使用 Node 启动 - -```bash -git clone https://github.com/weaigc/bingo.git -npm i # 推荐使用 pnpm i -npm run build -npm run start -``` - -* 使用 Docker 启动 -```bash -docker pull weaigc/bingo -docker run --rm -it -p 7860:7860 weaigc/bingo -# 或者 -docker run --rm -it -e BING_HEADER=xxxx -p 7860:7860 weaigc/bingo -``` - -## 如何获取 BING_HEADER -> 配置了 BING_HEADER 意味着你将自己的账号共享给所有使用此服务的人,如果不需要免登录画图的功能,不建议设置此变量 - -打开 https://www.bing.com 并登录,然后访问 https://www.bing.com/turing/captcha/challenge,通过人机校验,然后 - -![BING HEADER](./docs/images/curl.png) - -> 复制出来的内容应该如下所示。确认格式无误后,打开 https://effulgent-bubblegum-e2f5df.netlify.app/#dialog=%22settings%22 ,粘贴进去,点击“转成 BING_HEADER 并复制”,然后从剪切板粘贴即可得到。(你也可以先在网页上进行验证) - -以下是格式参考,需要注意的是,网页端保存的格式是以`curl`开头, 而服务端配置的 `BING_HEADER` 是 `base64` 格式,两者不能互通。 -
          -正常格式/网页端保存的格式(格式仅供参考) - -``` -curl 'https://www.bing.com/turing/captcha/challenge' \ - -H 'authority: www.bing.com' \ - -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \ - -H 'accept-language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6' \ - -H 'cache-control: max-age=0' \ - -H 'cookie: MicrosoftApplicationsTelemetryDeviceId=3399c004-fd0e-48ec-bb92-d82a27b2bbd4; _EDGE_V=1; SRCHD=AF=NOFORM; SRCHUID=V=2&GUID=29EBDDA4E6674329ACCF1A0A423C3E98&dmnchg=1; _UR=QS=0&TQS=0; _HPVN=CS=eyJQbiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiUCJ9LCJTYyI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiSCJ9LCJReiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiVCJ9LCJBcCI6dHJ1ZSwiTXV0ZSI6dHJ1ZSwiTGFkIjoiMjAyMy0wNy0yNVQwMDowMDowMFoiLCJJb3RkIjowLCJHd2IiOjAsIkRmdCI6bnVsbCwiTXZzIjowLCJGbHQiOjAsIkltcCI6Mn0=; _RwBf=ilt=1&ihpd=1&ispd=0&rc=0&rb=0&gb=0&rg=200&pc=0&mtu=0&rbb=0&g=0&cid=&clo=0&v=1&l=2023-07-25T07:00:00.0000000Z&lft=0001-01-01T00:00:00.0000000&aof=0&o=2&p=&c=&t=0&s=0001-01-01T00:00:00.0000000+00:00&ts=2023-07-25T11:00:31.7111548+00:00&rwred=0&wls=&lka=0&lkt=0&TH=&dci=0; ANON=A=0043C6590EA808ED6E395059FFFFFFFF&E=1c8b&W=1; NAP=V=1.9&E=1c31&C=DnaMSbDN_4efZ_xXqBF3Daorjr53kYqYoaP8YHsupjmiXnysX7a37A&W=1; PPLState=1; KievRPSSecAuth=FABSBBRaTOJILtFsMkpLVWSG6AN6C/svRwNmAAAEgAAACMGUA7EGVSjGEAQBGHtNsc5sNL7unmJsfPJ2t6imfo4BeUJlAia3IpMTtMUy4PU/C5QAzRI5pODtsIee0+blgllXt/5IiWwGjwmdhivsFM597pRPkjARPfwsPhNLPNbJrCPNPHdje4Is78MnCADXw6/NBq2FL8V2/byw2fH6IuAMD2MvN/VvqpEa9ZxiDjZtENj4HEj0mO2SgzjfyEhVAkjvznJqU2rw/Q2tHmX94NAM2kzlzKF/hWPhCCUmu8IHLvCnHDS6mSptvJDDP/sp3ovtzOXkP1mlM/Xju5ftesUvccVEQGffXORa1dE5hEMbKIiKXz1tDdduSXE19g9/+mRMAjaQhpwhI8XmilCTx1adb1Ll5qK+VjC9GNfEZzcbsGBPVaOl+anG8rEMq+Xnhjo7J+NqTNolavHgcuV8kJsCeJZIged33UA8eOZeFo+wAECMguxMoSqgpGH+sthqynvD/FJD6r/tiU2N3uqVq8NE8V37asrN6T14Z0FGBJOe6ET1+PGApm3s11OY9/xhFEB9T5BEPUGEbvRcLcW2ncFQX0EU+xweiPqo1Q1hNUg/dCtSI+lZ7c2H8XheePZavZ0TJQ8oNCSAuKiTqJmI0fVGpwbXwfaADkEipuawz3fIuMJBNgMU0OtA7Hm59v2fGLIBuvi6YeKS6GgVk3BIPf+P/eKahwozrxQZaFnoHTSqMkvct7xCP4atBROfXKf5Ww0CcFKp+2WX9BIskTOo2jjk6bAyyYJ+ElUB1fgLKNk5m/YSMc9iYCLIBMIGN8F0Yvy3tZ7cvh7Ue5Klo98US/I+nW1G7ZJMHRgUO8h8lpneHqEMegKd8gynO4VF7RpCjJkunDmW0Ta+RkXAP619pg0dqHMFkoOgknN78oBbGTV6fJUKotv+vi61kLhAeXZGWoHGCRXh2wUC6YgfPgKA6ESRNHtFn7E5B3HHpLc5rVMDSNhKZYfdhupV4Ezf6+5DhMcZLZhi0kk+ivDiN1gdHlVtSN55xpvf+c+XZDzR0uhgcvgy0LAbmzgk6y4WbYH+LQsMpzNNj+aC72vMiWovWrKh9jY4MYCmdgxsS/skPtLdp18muiEIRXTbZQGUmhxFpJAIbBIsCscMpzL0BgeujxUwM5wr79Sd9r4xwbgSMwmBlBfUHRVBdNyg8feepeJbCS63nD6eHOuLqMRsPIio3w/ki/EAa92UUEiZeavLsMUD/y/qAvWUdzdP5Y+C/TM+CMGS/kGL4LEdY/28MQeTvU1qv1X21kQt2aiaj3pPVL36hAzxbcLgqcMo9oymDRy87kdCXW/+g4oKLtMh6fm/G6W6Y/B01JlxohyyvueHQIG557uzkEkTJ3FnOVODSKBKpb3WZ65rExfV71zSZa25F3GmpaIG6HiYrX2YYhQAkIE9pKEQBHbnwHuwNDGottZTXZw=; WLS=C=9df3f9d8518fae19&N=wen; WLID=pGY8HgWCu4p5XYCOk2oa0+DBdftkMUfmNIn8XtSjSTKsgv/Il7GUlYs0Jpjf/E12jZMgV7x44Dy3fXOgjjUoJx7Y/ClLrLhsk20THksJJoI=; _EDGE_S=F=1&SID=17CF6EE006426448213C7DB907436588&mkt=zh-CN; MUID=225621093D8A6C27301632413C0E6D08; MUIDB=225621093D8A6C27301632413C0E6D08; SUID=A; SNRHOP=I=&TS=; _U=nGyzKQruEsDwLiu65fZFIG6e12hf2lwTJmroW__k8joUJIKmG3OIjayXKGW9dCVR3sNhF76mEVxyW6yjUGPodOfjtSa3s3J_DxMOrEK1BqXCOBI9bC66spAIASV7prsYFlVAJz73jVNENp_tBubLHJy6EbT0BKRe4AjrYkH-9uMnmCKB8Zmyg; _SS=SID=17CF6EE006426448213C7DB907436588&R=0&RB=0&GB=0&RG=200&RP=0&PC=U531; SRCHS=PC=U531; USRLOC=HS=1&ELOC=LAT=22.501529693603516|LON=113.9263687133789|N=%E5%8D%97%E5%B1%B1%E5%8C%BA%EF%BC%8C%E5%B9%BF%E4%B8%9C%E7%9C%81|ELT=2|&CLOC=LAT=22.50153029046461|LON=113.92637070632928|A=733.4464586120832|TS=230726151034|SRC=W; SRCHUSR=DOB=20230725&T=1690384908000&POEX=W; ipv6=hit=1690388509974&t=6; SRCHHPGUSR=HV=1690384945&SRCHLANG=zh-Hans&PV=15.0.0&BRW=MW&BRH=MT&CW=410&CH=794&SCW=410&SCH=794&DPR=1.5&UTC=480&DM=0&WTS=63825879627&PRVCW=410&PRVCH=794&PR=1.5; cct=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpny6Y_CVyi_MSyM94VyMWnjdYkkccVtm3czoIAtXUGQA; GC=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpR3Y_D9Ytcks4Ht6XhadXk75dvhzP4YOUS0UmoEyqyxw' \ - -H 'dnt: 1' \ - -H 'sec-ch-ua: "Chromium";v="116", "Not)A;Brand";v="24", "Microsoft Edge";v="116"' \ - -H 'sec-ch-ua-arch: "x86"' \ - -H 'sec-ch-ua-bitness: "64"' \ - -H 'sec-ch-ua-full-version: "116.0.1938.29"' \ - -H 'sec-ch-ua-full-version-list: "Chromium";v="116.0.5845.42", "Not)A;Brand";v="24.0.0.0", "Microsoft Edge";v="116.0.1938.29"' \ - -H 'sec-ch-ua-mobile: ?0' \ - -H 'sec-ch-ua-model: ""' \ - -H 'sec-ch-ua-platform: "Windows"' \ - -H 'sec-ch-ua-platform-version: "15.0.0"' \ - -H 'sec-fetch-dest: document' \ - -H 'sec-fetch-mode: navigate' \ - -H 'sec-fetch-site: none' \ - -H 'sec-fetch-user: ?1' \ - -H 'sec-ms-gec: B3F47AD4A283CAB374C0451C46AAFD147C6A4DACAFF6A1C13F34B2C72B024494' \ - -H 'sec-ms-gec-version: 1-116.0.1938.29' \ - -H 'upgrade-insecure-requests: 1' \ - -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.0.0' \ - -H 'x-client-data: eyIxIjoiMiIsIjEwIjoiXCJTMGg3R05HOTF2aDQ1TUZSUnZ5NHN2akRmMWdlaVJKenNxNlA3aU1WbnF3PVwiIiwiMiI6IjEiLCIzIjoiMSIsIjQiOiIyMTU4ODQ5NTM4MjY4OTM5NTA3IiwiNSI6IlwiSm9GUWpPTDk3OS9MbkRRZnlCd2N1M2FsOUN3eTZTQmdaMGNYMXBtOWVMZz1cIiIsIjYiOiJiZXRhIiwiNyI6IjE4MDM4ODYyNjQzNSIsIjkiOiJkZXNrdG9wIn0=' \ - -H 'x-edge-shopping-flag: 1' \ - --compressed -``` -
          - -
          -转成base64之后的格式(BING_HEADER只能使用 base64 之后的格式) - -``` -Y3VybCAnaHR0cHM6Ly93d3cuYmluZy5jb20vdHVyaW5nL2NvbnZlcnNhdGlvbi9jcmVhdGUnIFwgICAtSCAnYXV0aG9yaXR5OiB3d3cuYmluZy5jb20nIFwgICAtSCAnYWNjZXB0OiB0ZXh0L2h0bWwsYXBwbGljYXRpb24veGh0bWwreG1sLGFwcGxpY2F0aW9uL3htbDtxPTAuOSxpbWFnZS93ZWJwLGltYWdlL2FwbmcsKi8qO3E9MC44LGFwcGxpY2F0aW9uL3NpZ25lZC1leGNoYW5nZTt2PWIzO3E9MC43JyBcICAgLUggJ2FjY2VwdC1sYW5ndWFnZTogemgtQ04semg7cT0wLjksZW47cT0wLjgsZW4tR0I7cT0wLjcsZW4tVVM7cT0wLjYnIFwgICAtSCAnY2FjaGUtY29udHJvbDogbWF4LWFnZT0wJyBcICAgLUggJ2Nvb2tpZTogTWljcm9zb2Z0QXBwbGljYXRpb25zVGVsZW1ldHJ5RGV2aWNlSWQ9MzM5OWMwMDQtZmQwZS00OGVjLWJiOTItZDgyYTI3YjJiYmQ0OyBfRURHRV9WPTE7IFNSQ0hEPUFGPU5PRk9STTsgU1JDSFVJRD1WPTImR1VJRD0yOUVCRERBNEU2Njc0MzI5QUNDRjFBMEE0MjNDM0U5OCZkbW5jaGc9MTsgX1VSPVFTPTAmVFFTPTA7IF9IUFZOPUNTPWV5SlFiaUk2ZXlKRGJpSTZNU3dpVTNRaU9qQXNJbEZ6SWpvd0xDSlFjbTlrSWpvaVVDSjlMQ0pUWXlJNmV5SkRiaUk2TVN3aVUzUWlPakFzSWxGeklqb3dMQ0pRY205a0lqb2lTQ0o5TENKUmVpSTZleUpEYmlJNk1Td2lVM1FpT2pBc0lsRnpJam93TENKUWNtOWtJam9pVkNKOUxDSkJjQ0k2ZEhKMVpTd2lUWFYwWlNJNmRISjFaU3dpVEdGa0lqb2lNakF5TXkwd055MHlOVlF3TURvd01Eb3dNRm9pTENKSmIzUmtJam93TENKSGQySWlPakFzSWtSbWRDSTZiblZzYkN3aVRYWnpJam93TENKR2JIUWlPakFzSWtsdGNDSTZNbjA9OyBfUndCZj1pbHQ9MSZpaHBkPTEmaXNwZD0wJnJjPTAmcmI9MCZnYj0wJnJnPTIwMCZwYz0wJm10dT0wJnJiYj0wJmc9MCZjaWQ9JmNsbz0wJnY9MSZsPTIwMjMtMDctMjVUMDc6MDA6MDAuMDAwMDAwMFombGZ0PTAwMDEtMDEtMDFUMDA6MDA6MDAuMDAwMDAwMCZhb2Y9MCZvPTImcD0mYz0mdD0wJnM9MDAwMS0wMS0wMVQwMDowMDowMC4wMDAwMDAwKzAwOjAwJnRzPTIwMjMtMDctMjVUMTE6MDA6MzEuNzExMTU0OCswMDowMCZyd3JlZD0wJndscz0mbGthPTAmbGt0PTAmVEg9JmRjaT0wOyBBTk9OPUE9MDA0M0M2NTkwRUE4MDhFRDZFMzk1MDU5RkZGRkZGRkYmRT0xYzhiJlc9MTsgTkFQPVY9MS45JkU9MWMzMSZDPURuYU1TYkROXzRlZlpfeFhxQkYzRGFvcmpyNTNrWXFZb2FQOFlIc3Vwam1pWG55c1g3YTM3QSZXPTE7IFBQTFN0YXRlPTE7IEtpZXZSUFNTZWNBdXRoPUZBQlNCQlJhVE9KSUx0RnNNa3BMVldTRzZBTjZDL3N2UndObUFBQUVnQUFBQ01HVUE3RUdWU2pHRUFRQkdIdE5zYzVzTkw3dW5tSnNmUEoydDZpbWZvNEJlVUpsQWlhM0lwTVR0TVV5NFBVL0M1UUF6Ukk1cE9EdHNJZWUwK2JsZ2xsWHQvNUlpV3dHandtZGhpdnNGTTU5N3BSUGtqQVJQZndzUGhOTFBOYkpyQ1BOUEhkamU0SXM3OE1uQ0FEWHc2L05CcTJGTDhWMi9ieXcyZkg2SXVBTUQyTXZOL1Z2cXBFYTlaeGlEalp0RU5qNEhFajBtTzJTZ3pqZnlFaFZBa2p2em5KcVUycncvUTJ0SG1YOTROQU0ya3psektGL2hXUGhDQ1VtdThJSEx2Q25IRFM2bVNwdHZKRERQL3NwM292dHpPWGtQMW1sTS9YanU1ZnRlc1V2Y2NWRVFHZmZYT1JhMWRFNWhFTWJLSWlLWHoxdERkZHVTWEUxOWc5LyttUk1BamFRaHB3aEk4WG1pbENUeDFhZGIxTGw1cUsrVmpDOUdOZkVaemNic0dCUFZhT2wrYW5HOHJFTXErWG5oam83SitOcVROb2xhdkhnY3VWOGtKc0NlSlpJZ2VkMzNVQThlT1plRm8rd0FFQ01ndXhNb1NxZ3BHSCtzdGhxeW52RC9GSkQ2ci90aVUyTjN1cVZxOE5FOFYzN2Fzck42VDE0WjBGR0JKT2U2RVQxK1BHQXBtM3MxMU9ZOS94aEZFQjlUNUJFUFVHRWJ2UmNMY1cybmNGUVgwRVUreHdlaVBxbzFRMWhOVWcvZEN0U0krbFo3YzJIOFhoZWVQWmF2WjBUSlE4b05DU0F1S2lUcUptSTBmVkdwd2JYd2ZhQURrRWlwdWF3ejNmSXVNSkJOZ01VME90QTdIbTU5djJmR0xJQnV2aTZZZUtTNkdnVmszQklQZitQL2VLYWh3b3pyeFFaYUZub0hUU3FNa3ZjdDd4Q1A0YXRCUk9mWEtmNVd3MENjRktwKzJXWDlCSXNrVE9vMmpqazZiQXl5WUorRWxVQjFmZ0xLTms1bS9ZU01jOWlZQ0xJQk1JR044RjBZdnkzdFo3Y3ZoN1VlNUtsbzk4VVMvSStuVzFHN1pKTUhSZ1VPOGg4bHBuZUhxRU1lZ0tkOGd5bk80VkY3UnBDakprdW5EbVcwVGErUmtYQVA2MTlwZzBkcUhNRmtvT2drbk43OG9CYkdUVjZmSlVLb3R2K3ZpNjFrTGhBZVhaR1dvSEdDUlhoMndVQzZZZ2ZQZ0tBNkVTUk5IdEZuN0U1QjNISHBMYzVyVk1EU05oS1pZZmRodXBWNEV6ZjYrNURoTWNaTFpoaTBraytpdkRpTjFnZEhsVnRTTjU1eHB2ZitjK1haRHpSMHVoZ2N2Z3kwTEFibXpnazZ5NFdiWUgrTFFzTXB6Tk5qK2FDNzJ2TWlXb3ZXcktoOWpZNE1ZQ21kZ3hzUy9za1B0TGRwMThtdWlFSVJYVGJaUUdVbWh4RnBKQUliQklzQ3NjTXB6TDBCZ2V1anhVd001d3I3OVNkOXI0eHdiZ1NNd21CbEJmVUhSVkJkTnlnOGZlZXBlSmJDUzYzbkQ2ZUhPdUxxTVJzUElpbzN3L2tpL0VBYTkyVVVFaVplYXZMc01VRC95L3FBdldVZHpkUDVZK0MvVE0rQ01HUy9rR0w0TEVkWS8yOE1RZVR2VTFxdjFYMjFrUXQyYWlhajNwUFZMMzZoQXp4YmNMZ3FjTW85b3ltRFJ5ODdrZENYVy8rZzRvS0x0TWg2Zm0vRzZXNlkvQjAxSmx4b2h5eXZ1ZUhRSUc1NTd1emtFa1RKM0ZuT1ZPRFNLQktwYjNXWjY1ckV4ZlY3MXpTWmEyNUYzR21wYUlHNkhpWXJYMllZaFFBa0lFOXBLRVFCSGJud0h1d05ER290dFpUWFp3PTsgV0xTPUM9OWRmM2Y5ZDg1MThmYWUxOSZOPXdlbjsgV0xJRD1wR1k4SGdXQ3U0cDVYWUNPazJvYTArREJkZnRrTVVmbU5JbjhYdFNqU1RLc2d2L0lsN0dVbFlzMEpwamYvRTEyalpNZ1Y3eDQ0RHkzZlhPZ2pqVW9KeDdZL0NsTHJMaHNrMjBUSGtzSkpvST07IF9FREdFX1M9Rj0xJlNJRD0xN0NGNkVFMDA2NDI2NDQ4MjEzQzdEQjkwNzQzNjU4OCZta3Q9emgtQ047IE1VSUQ9MjI1NjIxMDkzRDhBNkMyNzMwMTYzMjQxM0MwRTZEMDg7IE1VSURCPTIyNTYyMTA5M0Q4QTZDMjczMDE2MzI0MTNDMEU2RDA4OyBTVUlEPUE7IFNOUkhPUD1JPSZUUz07IF9VPW5HeXpLUXJ1RXNEd0xpdTY1ZlpGSUc2ZTEyaGYybHdUSm1yb1dfX2s4am9VSklLbUczT0lqYXlYS0dXOWRDVlIzc05oRjc2bUVWeHlXNnlqVUdQb2RPZmp0U2EzczNKX0R4TU9yRUsxQnFYQ09CSTliQzY2c3BBSUFTVjdwcnNZRmxWQUp6NzNqVk5FTnBfdEJ1YkxISnk2RWJUMEJLUmU0QWpyWWtILTl1TW5tQ0tCOFpteWc7IF9TUz1TSUQ9MTdDRjZFRTAwNjQyNjQ0ODIxM0M3REI5MDc0MzY1ODgmUj0wJlJCPTAmR0I9MCZSRz0yMDAmUlA9MCZQQz1VNTMxOyBTUkNIUz1QQz1VNTMxOyBVU1JMT0M9SFM9MSZFTE9DPUxBVD0yMi41MDE1Mjk2OTM2MDM1MTZ8TE9OPTExMy45MjYzNjg3MTMzNzg5fE49JUU1JThEJTk3JUU1JUIxJUIxJUU1JThDJUJBJUVGJUJDJThDJUU1JUI5JUJGJUU0JUI4JTlDJUU3JTlDJTgxfEVMVD0yfCZDTE9DPUxBVD0yMi41MDE1MzAyOTA0NjQ2MXxMT049MTEzLjkyNjM3MDcwNjMyOTI4fEE9NzMzLjQ0NjQ1ODYxMjA4MzJ8VFM9MjMwNzI2MTUxMDM0fFNSQz1XOyBTUkNIVVNSPURPQj0yMDIzMDcyNSZUPTE2OTAzODQ5MDgwMDAmUE9FWD1XOyBpcHY2PWhpdD0xNjkwMzg4NTA5OTc0JnQ9NjsgU1JDSEhQR1VTUj1IVj0xNjkwMzg0OTQ1JlNSQ0hMQU5HPXpoLUhhbnMmUFY9MTUuMC4wJkJSVz1NVyZCUkg9TVQmQ1c9NDEwJkNIPTc5NCZTQ1c9NDEwJlNDSD03OTQmRFBSPTEuNSZVVEM9NDgwJkRNPTAmV1RTPTYzODI1ODc5NjI3JlBSVkNXPTQxMCZQUlZDSD03OTQmUFI9MS41OyBjY3Q9QWpXSUJZT29WUC1BZnE2Z1d3dHg4MElmNnlIbjZpQnVFVkhBMVhIZEFLcG55NllfQ1Z5aV9NU3lNOTRWeU1XbmpkWWtrY2NWdG0zY3pvSUF0WFVHUUE7IEdDPUFqV0lCWU9vVlAtQWZxNmdXd3R4ODBJZjZ5SG42aUJ1RVZIQTFYSGRBS3BSM1lfRDlZdGNrczRIdDZYaGFkWGs3NWR2aHpQNFlPVVMwVW1vRXlxeXh3JyBcICAgLUggJ2RudDogMScgXCAgIC1IICdzZWMtY2gtdWE6ICJDaHJvbWl1bSI7dj0iMTE2IiwgIk5vdClBO0JyYW5kIjt2PSIyNCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2IicgXCAgIC1IICdzZWMtY2gtdWEtYXJjaDogIng4NiInIFwgICAtSCAnc2VjLWNoLXVhLWJpdG5lc3M6ICI2NCInIFwgICAtSCAnc2VjLWNoLXVhLWZ1bGwtdmVyc2lvbjogIjExNi4wLjE5MzguMjkiJyBcICAgLUggJ3NlYy1jaC11YS1mdWxsLXZlcnNpb24tbGlzdDogIkNocm9taXVtIjt2PSIxMTYuMC41ODQ1LjQyIiwgIk5vdClBO0JyYW5kIjt2PSIyNC4wLjAuMCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2LjAuMTkzOC4yOSInIFwgICAtSCAnc2VjLWNoLXVhLW1vYmlsZTogPzAnIFwgICAtSCAnc2VjLWNoLXVhLW1vZGVsOiAiIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm06ICJXaW5kb3dzIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm0tdmVyc2lvbjogIjE1LjAuMCInIFwgICAtSCAnc2VjLWZldGNoLWRlc3Q6IGRvY3VtZW50JyBcICAgLUggJ3NlYy1mZXRjaC1tb2RlOiBuYXZpZ2F0ZScgXCAgIC1IICdzZWMtZmV0Y2gtc2l0ZTogbm9uZScgXCAgIC1IICdzZWMtZmV0Y2gtdXNlcjogPzEnIFwgICAtSCAnc2VjLW1zLWdlYzogQjNGNDdBRDRBMjgzQ0FCMzc0QzA0NTFDNDZBQUZEMTQ3QzZBNERBQ0FGRjZBMUMxM0YzNEIyQzcyQjAyNDQ5NCcgXCAgIC1IICdzZWMtbXMtZ2VjLXZlcnNpb246IDEtMTE2LjAuMTkzOC4yOScgXCAgIC1IICd1cGdyYWRlLWluc2VjdXJlLXJlcXVlc3RzOiAxJyBcICAgLUggJ3VzZXItYWdlbnQ6IE1vemlsbGEvNS4wIChXaW5kb3dzIE5UIDEwLjA7IFdpbjY0OyB4NjQpIEFwcGxlV2ViS2l0LzUzNy4zNiAoS0hUTUwsIGxpa2UgR2Vja28pIENocm9tZS8xMTYuMC4wLjAgU2FmYXJpLzUzNy4zNiBFZGcvMTE2LjAuMC4wJyBcICAgLUggJ3gtY2xpZW50LWRhdGE6IGV5SXhJam9pTWlJc0lqRXdJam9pWENKVE1HZzNSMDVIT1RGMmFEUTFUVVpTVW5aNU5ITjJha1JtTVdkbGFWSktlbk54TmxBM2FVMVdibkYzUFZ3aUlpd2lNaUk2SWpFaUxDSXpJam9pTVNJc0lqUWlPaUl5TVRVNE9EUTVOVE00TWpZNE9UTTVOVEEzSWl3aU5TSTZJbHdpU205R1VXcFBURGszT1M5TWJrUlJabmxDZDJOMU0yRnNPVU4zZVRaVFFtZGFNR05ZTVhCdE9XVk1aejFjSWlJc0lqWWlPaUppWlhSaElpd2lOeUk2SWpFNE1ETTRPRFl5TmpRek5TSXNJamtpT2lKa1pYTnJkRzl3SW4wPScgXCAgIC1IICd4LWVkZ2Utc2hvcHBpbmctZmxhZzogMScgXCAgIC0tY29tcHJlc3NlZA== -``` -
          - - -## 鸣谢 - - 感谢 [EdgeGPT](https://github.com/acheong08/EdgeGPT) 提供的代理 API 的方法。 - - 感谢 [Vercel AI](https://github.com/vercel-labs/ai-chatbot) 提供的基础脚手架和 [ChatHub](https://github.com/chathub-dev/chathub) [go-proxy-bingai](https://github.com/adams549659584/go-proxy-bingai) 提供的部分代码。 - - -## 答疑及交流 - - - -## License - -MIT © [LICENSE](https://github.com/weaigc/bingo/blob/main/LICENSE). - - diff --git a/spaces/mmecheri/Rakuten_Streamlit/submultiapp.py b/spaces/mmecheri/Rakuten_Streamlit/submultiapp.py deleted file mode 100644 index 5f6b4b4288488ef846c80c353b473437a837123d..0000000000000000000000000000000000000000 --- a/spaces/mmecheri/Rakuten_Streamlit/submultiapp.py +++ /dev/null @@ -1,50 +0,0 @@ -"""Frameworks for running multiple Streamlit applications as a single app. -""" -import streamlit as st - - - -class SubMultiApp: - - - def __init__(self, bar_header=None, radio_label= None): - self.apps = [] - self.bar_header = bar_header - self.radio_label = radio_label - - - - def add_app(self, title, func): - """Adds a new application. - Parameters - ---------- - func: - the python function to render this app. - title: - title of the app. Appears in the dropdown in the sidebar. - """ - self.apps.append({"title": title, "function": func}) -# @st.cache - def run(self): - app_state = st.experimental_get_query_params() - app_state = { - #k: v[0] if isinstance(v, list) else v for k, v in list(app_state.items()) - } # fetch the first item in each query string as we don't have multiple values for each query string key in this example - - - titles = [a["title"] for a in self.apps] - functions = [a["function"] for a in self.apps] - default_radio = titles.index(app_state["page"]) if "page" in app_state else 0 - - if self.bar_header != None: - st.sidebar.header(self.bar_header) - - if self.radio_label == None: - self.radio_label ='Menu' - - - title = st.sidebar.radio(self.radio_label, titles, index=default_radio, key="radio") - - st.experimental_set_query_params(**st.session_state.to_dict()) - functions[titles.index(title)]() - diff --git a/spaces/mrm8488/PromptSource/utils.py b/spaces/mrm8488/PromptSource/utils.py deleted file mode 100644 index 1ecf3a45bd18d089969c8e6e229a9ef13a63af26..0000000000000000000000000000000000000000 --- a/spaces/mrm8488/PromptSource/utils.py +++ /dev/null @@ -1,126 +0,0 @@ -# coding=utf-8 - -import datasets -import requests - -from promptsource.templates import INCLUDED_USERS - - -def removeHyphen(example): - example_clean = {} - for key in example.keys(): - if "-" in key: - new_key = key.replace("-", "_") - example_clean[new_key] = example[key] - else: - example_clean[key] = example[key] - example = example_clean - return example - - -def renameDatasetColumn(dataset): - col_names = dataset.column_names - for cols in col_names: - if "-" in cols: - dataset = dataset.rename_column(cols, cols.replace("-", "_")) - return dataset - - -# -# Helper functions for datasets library -# - - -def get_dataset_builder(path, conf=None): - "Get a dataset builder from name and conf." - module_path = datasets.load.prepare_module(path, dataset=True) - builder_cls = datasets.load.import_main_class(module_path[0], dataset=True) - if conf: - builder_instance = builder_cls(name=conf, cache_dir=None, hash=module_path[1]) - else: - builder_instance = builder_cls(cache_dir=None, hash=module_path[1]) - return builder_instance - - -def get_dataset(path, conf=None): - "Get a dataset from name and conf." - builder_instance = get_dataset_builder(path, conf) - if builder_instance.manual_download_instructions is None and builder_instance.info.size_in_bytes is not None: - builder_instance.download_and_prepare() - return builder_instance.as_dataset() - else: - return datasets.load_dataset(path, conf) - - -def get_dataset_confs(path): - "Get the list of confs for a dataset." - module_path = datasets.load.prepare_module(path, dataset=True) - # Get dataset builder class from the processing script - builder_cls = datasets.load.import_main_class(module_path[0], dataset=True) - # Instantiate the dataset builder - confs = builder_cls.BUILDER_CONFIGS - if confs and len(confs) > 1: - return confs - return [] - - -def render_features(features): - """Recursively render the dataset schema (i.e. the fields).""" - if isinstance(features, dict): - return {k: render_features(v) for k, v in features.items()} - if isinstance(features, datasets.features.ClassLabel): - return features.names - - if isinstance(features, datasets.features.Value): - return features.dtype - - if isinstance(features, datasets.features.Sequence): - return {"[]": render_features(features.feature)} - return features - - -# -# Loads dataset information -# - - -def filter_english_datasets(): - """ - Filter English datasets based on language tags in metadata. - - Also includes the datasets of any users listed in INCLUDED_USERS - """ - english_datasets = [] - - response = requests.get("https://huggingface.co/api/datasets?full=true") - tags = response.json() - - for dataset in tags: - dataset_name = dataset["id"] - - is_community_dataset = "/" in dataset_name - if is_community_dataset: - user = dataset_name.split("/")[0] - if user in INCLUDED_USERS: - english_datasets.append(dataset_name) - continue - - if "card_data" not in dataset: - continue - metadata = dataset["card_data"] - - if "languages" not in metadata: - continue - languages = metadata["languages"] - - if "en" in languages or "en-US" in languages: - english_datasets.append(dataset_name) - - return sorted(english_datasets) - - -def list_datasets(template_collection, _state): - """Get all the datasets to work with.""" - dataset_list = filter_english_datasets() - dataset_list.sort(key=lambda x: x.lower()) - return dataset_list diff --git a/spaces/mrungta8/CitationalAmnesia/app.py b/spaces/mrungta8/CitationalAmnesia/app.py deleted file mode 100644 index bc78e5e5e6601ce82493a5dfc798452913baf9a3..0000000000000000000000000000000000000000 --- a/spaces/mrungta8/CitationalAmnesia/app.py +++ /dev/null @@ -1,192 +0,0 @@ -import gradio as gr -import json -import os -import sys -import csv -import requests -import json -import pandas as pd -import concurrent.futures -from tqdm import tqdm -import shutil -import numpy as np -import seaborn as sns -from matplotlib import pyplot as plt -import pickle -from scipy.stats import percentileofscore - - -mean_citation_list = [] - -# Open the file and read the content in a list -with open('mean_citation_list.txt', 'r') as filehandle: - for line in filehandle: - temp = float(line[:-1]) - mean_citation_list.append(temp) - -# # Read list to memory -# def read_list(): -# # for reading also binary mode is important -# with open('mean_aoc_all_papers.pkl', 'rb') as fp: -# n_list = pickle.load(fp) -# return n_list - -# mean_citation_list = read_list() - -def generate_plot_maoc(input_maoc): - plt.clf() - sns.set(font_scale = 8) - sns.set(rc={'figure.figsize':(10,6)}) - sns.set_style(style='whitegrid') - - ax = sns.histplot(mean_citation_list, bins=100, kde=True, color='skyblue') - kdeline = ax.lines[0] - xs = kdeline.get_xdata() - ys = kdeline.get_ydata() - - interpolated_y_maoc = np.interp(input_maoc, kdeline.get_xdata(), kdeline.get_ydata()) - ax.scatter(input_maoc, interpolated_y_maoc,c='r', marker='*',linewidths=5, zorder=2) - ax.vlines(input_maoc, 0, interpolated_y_maoc, color='tomato', ls='--', lw=2) - epsilon = 0.3 - ax.text(input_maoc + epsilon, interpolated_y_maoc + epsilon, 'Your paper', {'color': '#DC143C', 'fontsize': 13}) - ax.set_xlabel("mean Age of Citation(mAoC)",fontsize=15) - ax.set_ylabel("Number of papers",fontsize=15) - ax.tick_params(axis='both', which='major', labelsize=12) - percentile_of_input_maoc = percentileofscore(mean_citation_list, input_maoc) - percentile_of_input_maoc = round(percentile_of_input_maoc, 2) - title = "The mAoC of your paper is at " + r"$\bf{" + str(percentile_of_input_maoc) + "}$" + "-th percentile of all the papers \n in our database (papers published until 2021 years)" - plt.title(title, fontsize=12) - return plt - -# sent a request -def request_to_respose(request_url): - request_response = requests.get(request_url, headers={'x-api-key': 'qZWKkOKyzP5g9fgjyMmBt1MN2NTC6aT61UklAiyw'}) - return request_response - -def return_clear(): - return None, None, None, None, None - - -def compute_output(ssid_paper_id): - output_num_ref = 0 - output_maoc = 0 - oldest_paper_list = "" - - request_url = f'https://api.semanticscholar.org/graph/v1/paper/{ssid_paper_id}?fields=references,title,venue,year' - r = request_to_respose(request_url) - if r.status_code == 200: # if successful request - s2_ref_paper_keys = [reference_paper_tuple['paperId'] for reference_paper_tuple in r.json()['references']] - filtered_s2_ref_paper_keys = [s2_ref_paper_key for s2_ref_paper_key in s2_ref_paper_keys if s2_ref_paper_key is not None] - total_references = len(s2_ref_paper_keys) - none_references = (len(s2_ref_paper_keys) - len(filtered_s2_ref_paper_keys)) - s2_ref_paper_keys = filtered_s2_ref_paper_keys - - # print(r.json()) - - s2_paper_key, title, venue, year = r.json()['paperId'], r.json()['title'], r.json()['venue'], r.json()['year'] - reference_year_list = [] - reference_title_list = [] - for ref_paper_key in s2_ref_paper_keys: - request_url_ref = f'https://api.semanticscholar.org/graph/v1/paper/{ref_paper_key}?fields=references,title,venue,year' - r_ref = request_to_respose(request_url_ref) - if r_ref.status_code == 200: - s2_paper_key_ref, title_ref, venue_ref, year_ref = r_ref.json()['paperId'], r_ref.json()['title'], r_ref.json()['venue'], r_ref.json()['year'] - reference_year_list.append(year_ref) - reference_title_list.append(title_ref) - - # print(f'Number of references for which we got the year = {len(reference_year_list)}') - output_num_ref = len(reference_year_list) - aoc_list = [year - year_ref for year_ref in reference_year_list] - output_maoc = sum(aoc_list)/len(aoc_list) - - sorted_ref_title_list = [x for _,x in sorted(zip(reference_year_list,reference_title_list))] - sorted_ref_year_list = [x for x,_ in sorted(zip(reference_year_list,reference_title_list))] - text = "" - sorted_ref_title_list = sorted_ref_title_list[:min(len(sorted_ref_title_list), 5)] - sorted_ref_year_list = sorted_ref_year_list[:min(len(sorted_ref_year_list), 5)] - for i in range(len(sorted_ref_year_list)): - text += '[' + str(sorted_ref_year_list[i]) + ']' + " Title: " + sorted_ref_title_list[i] + '\n' - - oldest_paper_list = text - plot_maoc = generate_plot_maoc(output_maoc) - # print(plot_maoc) - - return output_num_ref, output_maoc, oldest_paper_list, gr.update(value=plot_maoc) - -with gr.Blocks(theme=gr.themes.Soft()) as demo: - with gr.Row(): - gr.Markdown( - """ -

          Citational Amnesia

          -
          -
          Demo to predict the number of references, mean age of citation(mAoC), and comparison of mAoC with all the papers in the ACL Anthology.
          -
          Kindly enter the Semantic Scholar ID(SSID) of the paper in the box and click "Generate"
          -
          -

          Retrieving SSID

          For paper : https://www.semanticscholar.org/paper/Geographic-Citation-Gaps-in-NLP-Research-Rungta-Singh/6f8ab0fa15c87cee55b2ca5b8877e6784887ddbf
          -
          SSID is : 6f8ab0fa15c87cee55b2ca5b8877e6784887ddbf
          -
          Note: Currently we only support SSID as the input format
          - """ - ) - with gr.Row(): - ss_paper_id = gr.Textbox(label='Semantic Scholar ID',placeholder="Enter the Semantic Scholar ID here and press enter...", lines=1) - with gr.Row(): - submit_btn = gr.Button("Generate") - with gr.Row(): - num_ref = gr.Textbox(label="Number of references") - mAoc = gr.Textbox(label="Mean AoC") - with gr.Row(): - oldest_paper_list = gr.Textbox(label="Top 5 oldest papers cited:",lines=5) - with gr.Row(): - mAocPlot = gr.Plot(label="Plot") - with gr.Row(): - clear_btn = gr.Button("Clear") - - submit_btn.click(fn = compute_output, inputs = [ss_paper_id], outputs = [num_ref, mAoc, oldest_paper_list, mAocPlot]) - # clear_btn.click(lambda: None, None, None, queue=False) - clear_btn.click(fn = return_clear, inputs=[], outputs=[ss_paper_id, num_ref, mAoc, oldest_paper_list, mAocPlot]) - -demo.queue(concurrency_count=3) -demo.launch() - - -# with gr.Blocks() as demo: -# ss_paper_id = gr.Textbox(label='Semantic Scholar ID',placeholder="Enter the Semantic Scholar ID here and press enter...", lines=1) -# submit_btn = gr.Button("Generate") -# with gr.Row(): -# num_ref = gr.Textbox(label="Number of references") -# mAoc = gr.Textbox(label="Mean AoC") -# with gr.Row(): -# oldest_paper_list = gr.Textbox(label="Top 5 oldest papers cited:",lines=5) -# with gr.Row(): -# mAocPlot = gr.Plot(label="Plot") - -# clear_btn = gr.Button("Clear") - -# submit_btn.click(fn = compute_output, inputs = [ss_paper_id], outputs = [num_ref, mAoc, oldest_paper_list, mAocPlot]) -# # clear_btn.click(lambda: None, None, None, queue=False) -# clear_btn.click(fn = return_clear, inputs=[], outputs=[ss_paper_id, num_ref, mAoc, oldest_paper_list, mAocPlot]) - -# demo.launch() - -# import openai -# import gradio - -# openai.api_key = "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" - -# messages = [{"role": "system", "content": "Anhub Online Education Tutor for Any Subjects:"}] - -# def CustomChatGPT(user_input): -# messages.append({"role": "user", "content": user_input}) -# response = openai.ChatCompletion.create( -# model = "gpt-3.5-turbo", -# messages = messages -# ) -# ChatGPT_reply = response["choices"][0]["message"]["content"] -# messages.append({"role": "assistant", "content": ChatGPT_reply}) -# return ChatGPT_reply - -# demo = gradio.Interface(fn=CustomChatGPT, inputs = "text", outputs = "text", title = "Anhub Metaverse Education Online Tutor for Any Subjects and any Languages @ 24 x 7:") - - - -# demo.launch() \ No newline at end of file diff --git a/spaces/mygyasir/Real-Time-Voice-Cloning/samples/README.md b/spaces/mygyasir/Real-Time-Voice-Cloning/samples/README.md deleted file mode 100644 index 1a392d86e42f72e83954619f563f4881da327236..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/Real-Time-Voice-Cloning/samples/README.md +++ /dev/null @@ -1,22 +0,0 @@ -The audio files in this folder are provided for toolbox testing and -benchmarking purposes. These are the same reference utterances -used by the SV2TTS authors to generate the audio samples located at: -https://google.github.io/tacotron/publications/speaker_adaptation/index.html - -The `p240_00000.mp3` and `p260_00000.mp3` files are compressed -versions of audios from the VCTK corpus available at: -https://datashare.is.ed.ac.uk/handle/10283/3443 -VCTK.txt contains the copyright notices and licensing information. - -The `1320_00000.mp3`, `3575_00000.mp3`, `6829_00000.mp3` -and `8230_00000.mp3` files are compressed versions of audios -from the LibriSpeech dataset available at: https://openslr.org/12 -For these files, the following notice applies: -``` -LibriSpeech (c) 2014 by Vassil Panayotov - -LibriSpeech ASR corpus is licensed under a -Creative Commons Attribution 4.0 International License. - -See . -``` diff --git a/spaces/mygyasir/Real-Time-Voice-Cloning/toolbox/ui.py b/spaces/mygyasir/Real-Time-Voice-Cloning/toolbox/ui.py deleted file mode 100644 index d56b5740e276751f954aae1ca17e5ed485b48937..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/Real-Time-Voice-Cloning/toolbox/ui.py +++ /dev/null @@ -1,611 +0,0 @@ -import matplotlib.pyplot as plt -from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas -from matplotlib.figure import Figure -from PyQt5.QtCore import Qt, QStringListModel -from PyQt5.QtWidgets import * -from encoder.inference import plot_embedding_as_heatmap -from toolbox.utterance import Utterance -from pathlib import Path -from typing import List, Set -import sounddevice as sd -import soundfile as sf -import numpy as np -# from sklearn.manifold import TSNE # You can try with TSNE if you like, I prefer UMAP -from time import sleep -import umap -import sys -from warnings import filterwarnings, warn -filterwarnings("ignore") - - -colormap = np.array([ - [0, 127, 70], - [255, 0, 0], - [255, 217, 38], - [0, 135, 255], - [165, 0, 165], - [255, 167, 255], - [97, 142, 151], - [0, 255, 255], - [255, 96, 38], - [142, 76, 0], - [33, 0, 127], - [0, 0, 0], - [183, 183, 183], - [76, 255, 0], -], dtype=np.float) / 255 - -default_text = \ - "Welcome to the toolbox! To begin, load an utterance from your datasets or record one " \ - "yourself.\nOnce its embedding has been created, you can synthesize any text written here.\n" \ - "The synthesizer expects to generate " \ - "outputs that are somewhere between 5 and 12 seconds.\nTo mark breaks, write a new line. " \ - "Each line will be treated separately.\nThen, they are joined together to make the final " \ - "spectrogram. Use the vocoder to generate audio.\nThe vocoder generates almost in constant " \ - "time, so it will be more time efficient for longer inputs like this one.\nOn the left you " \ - "have the embedding projections. Load or record more utterances to see them.\nIf you have " \ - "at least 2 or 3 utterances from a same speaker, a cluster should form.\nSynthesized " \ - "utterances are of the same color as the speaker whose voice was used, but they're " \ - "represented with a cross." - - -class UI(QDialog): - min_umap_points = 4 - max_log_lines = 5 - max_saved_utterances = 20 - - def draw_utterance(self, utterance: Utterance, which): - self.draw_spec(utterance.spec, which) - self.draw_embed(utterance.embed, utterance.name, which) - - def draw_embed(self, embed, name, which): - embed_ax, _ = self.current_ax if which == "current" else self.gen_ax - embed_ax.figure.suptitle("" if embed is None else name) - - ## Embedding - # Clear the plot - if len(embed_ax.images) > 0: - embed_ax.images[0].colorbar.remove() - embed_ax.clear() - - # Draw the embed - if embed is not None: - plot_embedding_as_heatmap(embed, embed_ax) - embed_ax.set_title("embedding") - embed_ax.set_aspect("equal", "datalim") - embed_ax.set_xticks([]) - embed_ax.set_yticks([]) - embed_ax.figure.canvas.draw() - - def draw_spec(self, spec, which): - _, spec_ax = self.current_ax if which == "current" else self.gen_ax - - ## Spectrogram - # Draw the spectrogram - spec_ax.clear() - if spec is not None: - im = spec_ax.imshow(spec, aspect="auto", interpolation="none") - # spec_ax.figure.colorbar(mappable=im, shrink=0.65, orientation="horizontal", - # spec_ax=spec_ax) - spec_ax.set_title("mel spectrogram") - - spec_ax.set_xticks([]) - spec_ax.set_yticks([]) - spec_ax.figure.canvas.draw() - if which != "current": - self.vocode_button.setDisabled(spec is None) - - def draw_umap_projections(self, utterances: Set[Utterance]): - self.umap_ax.clear() - - speakers = np.unique([u.speaker_name for u in utterances]) - colors = {speaker_name: colormap[i] for i, speaker_name in enumerate(speakers)} - embeds = [u.embed for u in utterances] - - # Display a message if there aren't enough points - if len(utterances) < self.min_umap_points: - self.umap_ax.text(.5, .5, "Add %d more points to\ngenerate the projections" % - (self.min_umap_points - len(utterances)), - horizontalalignment='center', fontsize=15) - self.umap_ax.set_title("") - - # Compute the projections - else: - if not self.umap_hot: - self.log( - "Drawing UMAP projections for the first time, this will take a few seconds.") - self.umap_hot = True - - reducer = umap.UMAP(int(np.ceil(np.sqrt(len(embeds)))), metric="cosine") - # reducer = TSNE() - projections = reducer.fit_transform(embeds) - - speakers_done = set() - for projection, utterance in zip(projections, utterances): - color = colors[utterance.speaker_name] - mark = "x" if "_gen_" in utterance.name else "o" - label = None if utterance.speaker_name in speakers_done else utterance.speaker_name - speakers_done.add(utterance.speaker_name) - self.umap_ax.scatter(projection[0], projection[1], c=[color], marker=mark, - label=label) - # self.umap_ax.set_title("UMAP projections") - self.umap_ax.legend(prop={'size': 10}) - - # Draw the plot - self.umap_ax.set_aspect("equal", "datalim") - self.umap_ax.set_xticks([]) - self.umap_ax.set_yticks([]) - self.umap_ax.figure.canvas.draw() - - def save_audio_file(self, wav, sample_rate): - dialog = QFileDialog() - dialog.setDefaultSuffix(".wav") - fpath, _ = dialog.getSaveFileName( - parent=self, - caption="Select a path to save the audio file", - filter="Audio Files (*.flac *.wav)" - ) - if fpath: - #Default format is wav - if Path(fpath).suffix == "": - fpath += ".wav" - sf.write(fpath, wav, sample_rate) - - def setup_audio_devices(self, sample_rate): - input_devices = [] - output_devices = [] - for device in sd.query_devices(): - # Check if valid input - try: - sd.check_input_settings(device=device["name"], samplerate=sample_rate) - input_devices.append(device["name"]) - except: - pass - - # Check if valid output - try: - sd.check_output_settings(device=device["name"], samplerate=sample_rate) - output_devices.append(device["name"]) - except Exception as e: - # Log a warning only if the device is not an input - if not device["name"] in input_devices: - warn("Unsupported output device %s for the sample rate: %d \nError: %s" % (device["name"], sample_rate, str(e))) - - if len(input_devices) == 0: - self.log("No audio input device detected. Recording may not work.") - self.audio_in_device = None - else: - self.audio_in_device = input_devices[0] - - if len(output_devices) == 0: - self.log("No supported output audio devices were found! Audio output may not work.") - self.audio_out_devices_cb.addItems(["None"]) - self.audio_out_devices_cb.setDisabled(True) - else: - self.audio_out_devices_cb.clear() - self.audio_out_devices_cb.addItems(output_devices) - self.audio_out_devices_cb.currentTextChanged.connect(self.set_audio_device) - - self.set_audio_device() - - def set_audio_device(self): - - output_device = self.audio_out_devices_cb.currentText() - if output_device == "None": - output_device = None - - # If None, sounddevice queries portaudio - sd.default.device = (self.audio_in_device, output_device) - - def play(self, wav, sample_rate): - try: - sd.stop() - sd.play(wav, sample_rate) - except Exception as e: - print(e) - self.log("Error in audio playback. Try selecting a different audio output device.") - self.log("Your device must be connected before you start the toolbox.") - - def stop(self): - sd.stop() - - def record_one(self, sample_rate, duration): - self.record_button.setText("Recording...") - self.record_button.setDisabled(True) - - self.log("Recording %d seconds of audio" % duration) - sd.stop() - try: - wav = sd.rec(duration * sample_rate, sample_rate, 1) - except Exception as e: - print(e) - self.log("Could not record anything. Is your recording device enabled?") - self.log("Your device must be connected before you start the toolbox.") - return None - - for i in np.arange(0, duration, 0.1): - self.set_loading(i, duration) - sleep(0.1) - self.set_loading(duration, duration) - sd.wait() - - self.log("Done recording.") - self.record_button.setText("Record") - self.record_button.setDisabled(False) - - return wav.squeeze() - - @property - def current_dataset_name(self): - return self.dataset_box.currentText() - - @property - def current_speaker_name(self): - return self.speaker_box.currentText() - - @property - def current_utterance_name(self): - return self.utterance_box.currentText() - - def browse_file(self): - fpath = QFileDialog().getOpenFileName( - parent=self, - caption="Select an audio file", - filter="Audio Files (*.mp3 *.flac *.wav *.m4a)" - ) - return Path(fpath[0]) if fpath[0] != "" else "" - - @staticmethod - def repopulate_box(box, items, random=False): - """ - Resets a box and adds a list of items. Pass a list of (item, data) pairs instead to join - data to the items - """ - box.blockSignals(True) - box.clear() - for item in items: - item = list(item) if isinstance(item, tuple) else [item] - box.addItem(str(item[0]), *item[1:]) - if len(items) > 0: - box.setCurrentIndex(np.random.randint(len(items)) if random else 0) - box.setDisabled(len(items) == 0) - box.blockSignals(False) - - def populate_browser(self, datasets_root: Path, recognized_datasets: List, level: int, - random=True): - # Select a random dataset - if level <= 0: - if datasets_root is not None: - datasets = [datasets_root.joinpath(d) for d in recognized_datasets] - datasets = [d.relative_to(datasets_root) for d in datasets if d.exists()] - self.browser_load_button.setDisabled(len(datasets) == 0) - if datasets_root is None or len(datasets) == 0: - msg = "Warning: you d" + ("id not pass a root directory for datasets as argument" \ - if datasets_root is None else "o not have any of the recognized datasets" \ - " in %s" % datasets_root) - self.log(msg) - msg += ".\nThe recognized datasets are:\n\t%s\nFeel free to add your own. You " \ - "can still use the toolbox by recording samples yourself." % \ - ("\n\t".join(recognized_datasets)) - print(msg, file=sys.stderr) - - self.random_utterance_button.setDisabled(True) - self.random_speaker_button.setDisabled(True) - self.random_dataset_button.setDisabled(True) - self.utterance_box.setDisabled(True) - self.speaker_box.setDisabled(True) - self.dataset_box.setDisabled(True) - self.browser_load_button.setDisabled(True) - self.auto_next_checkbox.setDisabled(True) - return - self.repopulate_box(self.dataset_box, datasets, random) - - # Select a random speaker - if level <= 1: - speakers_root = datasets_root.joinpath(self.current_dataset_name) - speaker_names = [d.stem for d in speakers_root.glob("*") if d.is_dir()] - self.repopulate_box(self.speaker_box, speaker_names, random) - - # Select a random utterance - if level <= 2: - utterances_root = datasets_root.joinpath( - self.current_dataset_name, - self.current_speaker_name - ) - utterances = [] - for extension in ['mp3', 'flac', 'wav', 'm4a']: - utterances.extend(Path(utterances_root).glob("**/*.%s" % extension)) - utterances = [fpath.relative_to(utterances_root) for fpath in utterances] - self.repopulate_box(self.utterance_box, utterances, random) - - def browser_select_next(self): - index = (self.utterance_box.currentIndex() + 1) % len(self.utterance_box) - self.utterance_box.setCurrentIndex(index) - - @property - def current_encoder_fpath(self): - return self.encoder_box.itemData(self.encoder_box.currentIndex()) - - @property - def current_synthesizer_fpath(self): - return self.synthesizer_box.itemData(self.synthesizer_box.currentIndex()) - - @property - def current_vocoder_fpath(self): - return self.vocoder_box.itemData(self.vocoder_box.currentIndex()) - - def populate_models(self, encoder_models_dir: Path, synthesizer_models_dir: Path, - vocoder_models_dir: Path): - # Encoder - encoder_fpaths = list(encoder_models_dir.glob("*.pt")) - if len(encoder_fpaths) == 0: - raise Exception("No encoder models found in %s" % encoder_models_dir) - self.repopulate_box(self.encoder_box, [(f.stem, f) for f in encoder_fpaths]) - - # Synthesizer - synthesizer_fpaths = list(synthesizer_models_dir.glob("**/*.pt")) - if len(synthesizer_fpaths) == 0: - raise Exception("No synthesizer models found in %s" % synthesizer_models_dir) - self.repopulate_box(self.synthesizer_box, [(f.stem, f) for f in synthesizer_fpaths]) - - # Vocoder - vocoder_fpaths = list(vocoder_models_dir.glob("**/*.pt")) - vocoder_items = [(f.stem, f) for f in vocoder_fpaths] + [("Griffin-Lim", None)] - self.repopulate_box(self.vocoder_box, vocoder_items) - - @property - def selected_utterance(self): - return self.utterance_history.itemData(self.utterance_history.currentIndex()) - - def register_utterance(self, utterance: Utterance): - self.utterance_history.blockSignals(True) - self.utterance_history.insertItem(0, utterance.name, utterance) - self.utterance_history.setCurrentIndex(0) - self.utterance_history.blockSignals(False) - - if len(self.utterance_history) > self.max_saved_utterances: - self.utterance_history.removeItem(self.max_saved_utterances) - - self.play_button.setDisabled(False) - self.generate_button.setDisabled(False) - self.synthesize_button.setDisabled(False) - - def log(self, line, mode="newline"): - if mode == "newline": - self.logs.append(line) - if len(self.logs) > self.max_log_lines: - del self.logs[0] - elif mode == "append": - self.logs[-1] += line - elif mode == "overwrite": - self.logs[-1] = line - log_text = '\n'.join(self.logs) - - self.log_window.setText(log_text) - self.app.processEvents() - - def set_loading(self, value, maximum=1): - self.loading_bar.setValue(value * 100) - self.loading_bar.setMaximum(maximum * 100) - self.loading_bar.setTextVisible(value != 0) - self.app.processEvents() - - def populate_gen_options(self, seed, trim_silences): - if seed is not None: - self.random_seed_checkbox.setChecked(True) - self.seed_textbox.setText(str(seed)) - self.seed_textbox.setEnabled(True) - else: - self.random_seed_checkbox.setChecked(False) - self.seed_textbox.setText(str(0)) - self.seed_textbox.setEnabled(False) - - if not trim_silences: - self.trim_silences_checkbox.setChecked(False) - self.trim_silences_checkbox.setDisabled(True) - - def update_seed_textbox(self): - if self.random_seed_checkbox.isChecked(): - self.seed_textbox.setEnabled(True) - else: - self.seed_textbox.setEnabled(False) - - def reset_interface(self): - self.draw_embed(None, None, "current") - self.draw_embed(None, None, "generated") - self.draw_spec(None, "current") - self.draw_spec(None, "generated") - self.draw_umap_projections(set()) - self.set_loading(0) - self.play_button.setDisabled(True) - self.generate_button.setDisabled(True) - self.synthesize_button.setDisabled(True) - self.vocode_button.setDisabled(True) - self.replay_wav_button.setDisabled(True) - self.export_wav_button.setDisabled(True) - [self.log("") for _ in range(self.max_log_lines)] - - def __init__(self): - ## Initialize the application - self.app = QApplication(sys.argv) - super().__init__(None) - self.setWindowTitle("SV2TTS toolbox") - - - ## Main layouts - # Root - root_layout = QGridLayout() - self.setLayout(root_layout) - - # Browser - browser_layout = QGridLayout() - root_layout.addLayout(browser_layout, 0, 0, 1, 2) - - # Generation - gen_layout = QVBoxLayout() - root_layout.addLayout(gen_layout, 0, 2, 1, 2) - - # Projections - self.projections_layout = QVBoxLayout() - root_layout.addLayout(self.projections_layout, 1, 0, 1, 1) - - # Visualizations - vis_layout = QVBoxLayout() - root_layout.addLayout(vis_layout, 1, 1, 1, 3) - - - ## Projections - # UMap - fig, self.umap_ax = plt.subplots(figsize=(3, 3), facecolor="#F0F0F0") - fig.subplots_adjust(left=0.02, bottom=0.02, right=0.98, top=0.98) - self.projections_layout.addWidget(FigureCanvas(fig)) - self.umap_hot = False - self.clear_button = QPushButton("Clear") - self.projections_layout.addWidget(self.clear_button) - - - ## Browser - # Dataset, speaker and utterance selection - i = 0 - self.dataset_box = QComboBox() - browser_layout.addWidget(QLabel("Dataset"), i, 0) - browser_layout.addWidget(self.dataset_box, i + 1, 0) - self.speaker_box = QComboBox() - browser_layout.addWidget(QLabel("Speaker"), i, 1) - browser_layout.addWidget(self.speaker_box, i + 1, 1) - self.utterance_box = QComboBox() - browser_layout.addWidget(QLabel("Utterance"), i, 2) - browser_layout.addWidget(self.utterance_box, i + 1, 2) - self.browser_load_button = QPushButton("Load") - browser_layout.addWidget(self.browser_load_button, i + 1, 3) - i += 2 - - # Random buttons - self.random_dataset_button = QPushButton("Random") - browser_layout.addWidget(self.random_dataset_button, i, 0) - self.random_speaker_button = QPushButton("Random") - browser_layout.addWidget(self.random_speaker_button, i, 1) - self.random_utterance_button = QPushButton("Random") - browser_layout.addWidget(self.random_utterance_button, i, 2) - self.auto_next_checkbox = QCheckBox("Auto select next") - self.auto_next_checkbox.setChecked(True) - browser_layout.addWidget(self.auto_next_checkbox, i, 3) - i += 1 - - # Utterance box - browser_layout.addWidget(QLabel("Use embedding from:"), i, 0) - self.utterance_history = QComboBox() - browser_layout.addWidget(self.utterance_history, i, 1, 1, 3) - i += 1 - - # Random & next utterance buttons - self.browser_browse_button = QPushButton("Browse") - browser_layout.addWidget(self.browser_browse_button, i, 0) - self.record_button = QPushButton("Record") - browser_layout.addWidget(self.record_button, i, 1) - self.play_button = QPushButton("Play") - browser_layout.addWidget(self.play_button, i, 2) - self.stop_button = QPushButton("Stop") - browser_layout.addWidget(self.stop_button, i, 3) - i += 1 - - - # Model and audio output selection - self.encoder_box = QComboBox() - browser_layout.addWidget(QLabel("Encoder"), i, 0) - browser_layout.addWidget(self.encoder_box, i + 1, 0) - self.synthesizer_box = QComboBox() - browser_layout.addWidget(QLabel("Synthesizer"), i, 1) - browser_layout.addWidget(self.synthesizer_box, i + 1, 1) - self.vocoder_box = QComboBox() - browser_layout.addWidget(QLabel("Vocoder"), i, 2) - browser_layout.addWidget(self.vocoder_box, i + 1, 2) - - self.audio_out_devices_cb=QComboBox() - browser_layout.addWidget(QLabel("Audio Output"), i, 3) - browser_layout.addWidget(self.audio_out_devices_cb, i + 1, 3) - i += 2 - - #Replay & Save Audio - browser_layout.addWidget(QLabel("Toolbox Output:"), i, 0) - self.waves_cb = QComboBox() - self.waves_cb_model = QStringListModel() - self.waves_cb.setModel(self.waves_cb_model) - self.waves_cb.setToolTip("Select one of the last generated waves in this section for replaying or exporting") - browser_layout.addWidget(self.waves_cb, i, 1) - self.replay_wav_button = QPushButton("Replay") - self.replay_wav_button.setToolTip("Replay last generated vocoder") - browser_layout.addWidget(self.replay_wav_button, i, 2) - self.export_wav_button = QPushButton("Export") - self.export_wav_button.setToolTip("Save last generated vocoder audio in filesystem as a wav file") - browser_layout.addWidget(self.export_wav_button, i, 3) - i += 1 - - - ## Embed & spectrograms - vis_layout.addStretch() - - gridspec_kw = {"width_ratios": [1, 4]} - fig, self.current_ax = plt.subplots(1, 2, figsize=(10, 2.25), facecolor="#F0F0F0", - gridspec_kw=gridspec_kw) - fig.subplots_adjust(left=0, bottom=0.1, right=1, top=0.8) - vis_layout.addWidget(FigureCanvas(fig)) - - fig, self.gen_ax = plt.subplots(1, 2, figsize=(10, 2.25), facecolor="#F0F0F0", - gridspec_kw=gridspec_kw) - fig.subplots_adjust(left=0, bottom=0.1, right=1, top=0.8) - vis_layout.addWidget(FigureCanvas(fig)) - - for ax in self.current_ax.tolist() + self.gen_ax.tolist(): - ax.set_facecolor("#F0F0F0") - for side in ["top", "right", "bottom", "left"]: - ax.spines[side].set_visible(False) - - - ## Generation - self.text_prompt = QPlainTextEdit(default_text) - gen_layout.addWidget(self.text_prompt, stretch=1) - - self.generate_button = QPushButton("Synthesize and vocode") - gen_layout.addWidget(self.generate_button) - - layout = QHBoxLayout() - self.synthesize_button = QPushButton("Synthesize only") - layout.addWidget(self.synthesize_button) - self.vocode_button = QPushButton("Vocode only") - layout.addWidget(self.vocode_button) - gen_layout.addLayout(layout) - - layout_seed = QGridLayout() - self.random_seed_checkbox = QCheckBox("Random seed:") - self.random_seed_checkbox.setToolTip("When checked, makes the synthesizer and vocoder deterministic.") - layout_seed.addWidget(self.random_seed_checkbox, 0, 0) - self.seed_textbox = QLineEdit() - self.seed_textbox.setMaximumWidth(80) - layout_seed.addWidget(self.seed_textbox, 0, 1) - self.trim_silences_checkbox = QCheckBox("Enhance vocoder output") - self.trim_silences_checkbox.setToolTip("When checked, trims excess silence in vocoder output." - " This feature requires `webrtcvad` to be installed.") - layout_seed.addWidget(self.trim_silences_checkbox, 0, 2, 1, 2) - gen_layout.addLayout(layout_seed) - - self.loading_bar = QProgressBar() - gen_layout.addWidget(self.loading_bar) - - self.log_window = QLabel() - self.log_window.setAlignment(Qt.AlignBottom | Qt.AlignLeft) - gen_layout.addWidget(self.log_window) - self.logs = [] - gen_layout.addStretch() - - - ## Set the size of the window and of the elements - max_size = QDesktopWidget().availableGeometry(self).size() * 0.8 - self.resize(max_size) - - ## Finalize the display - self.reset_interface() - self.show() - - def start(self): - self.app.exec_() diff --git a/spaces/nakas/Time-Domain-Audio-Style-Transfer/audio_style_transfer/utils.py b/spaces/nakas/Time-Domain-Audio-Style-Transfer/audio_style_transfer/utils.py deleted file mode 100644 index f4d25fb1e17fafb435e3ebcbcaced13de55e1bea..0000000000000000000000000000000000000000 --- a/spaces/nakas/Time-Domain-Audio-Style-Transfer/audio_style_transfer/utils.py +++ /dev/null @@ -1,199 +0,0 @@ -"""NIPS2017 "Time Domain Neural Audio Style Transfer" code repository -Parag K. Mital -""" -import glob -import numpy as np -from scipy.signal import hann -import librosa -import matplotlib -import matplotlib.pyplot as plt -import os - - -def limiter(signal, - delay=40, - threshold=0.9, - release_coeff=0.9995, - attack_coeff=0.9): - - delay_index = 0 - envelope = 0 - gain = 1 - delay = delay - delay_line = np.zeros(delay) - release_coeff = release_coeff - attack_coeff = attack_coeff - threshold = threshold - - for idx, sample in enumerate(signal): - delay_line[delay_index] = sample - delay_index = (delay_index + 1) % delay - - # calculate an envelope of the signal - envelope = max(np.abs(sample), envelope * release_coeff) - - if envelope > threshold: - target_gain = threshold / envelope - else: - target_gain = 1.0 - - # have gain go towards a desired limiter gain - gain = (gain * attack_coeff + target_gain * (1 - attack_coeff)) - - # limit the delayed signal - signal[idx] = delay_line[delay_index] * gain - return signal - - -def chop(signal, hop_size=256, frame_size=512): - n_hops = len(signal) // hop_size - frames = [] - hann_win = hann(frame_size) - for hop_i in range(n_hops): - frame = signal[(hop_i * hop_size):(hop_i * hop_size + frame_size)] - frame = np.pad(frame, (0, frame_size - len(frame)), 'constant') - frame *= hann_win - frames.append(frame) - frames = np.array(frames) - return frames - - -def unchop(frames, hop_size=256, frame_size=512): - signal = np.zeros((frames.shape[0] * hop_size + frame_size,)) - for hop_i, frame in enumerate(frames): - signal[(hop_i * hop_size):(hop_i * hop_size + frame_size)] += frame - return signal - - -def matrix_dft(V): - N = len(V) - w = np.exp(-2j * np.pi / N) - col = np.vander([w], N, True) - W = np.vander(col.flatten(), N, True) / np.sqrt(N) - return np.dot(W, V) - - -def dft_np(signal, hop_size=256, fft_size=512): - s = chop(signal, hop_size, fft_size) - N = s.shape[-1] - k = np.reshape( - np.linspace(0.0, 2 * np.pi / N * (N // 2), N // 2), [1, N // 2]) - x = np.reshape(np.linspace(0.0, N - 1, N), [N, 1]) - freqs = np.dot(x, k) - real = np.dot(s, np.cos(freqs)) * (2.0 / N) - imag = np.dot(s, np.sin(freqs)) * (2.0 / N) - return real, imag - - -def idft_np(re, im, hop_size=256, fft_size=512): - N = re.shape[1] * 2 - k = np.reshape( - np.linspace(0.0, 2 * np.pi / N * (N // 2), N // 2), [N // 2, 1]) - x = np.reshape(np.linspace(0.0, N - 1, N), [1, N]) - freqs = np.dot(k, x) - signal = np.zeros((re.shape[0] * hop_size + fft_size,)) - recon = np.dot(re, np.cos(freqs)) + np.dot(im, np.sin(freqs)) - for hop_i, frame in enumerate(recon): - signal[(hop_i * hop_size):(hop_i * hop_size + fft_size)] += frame - return signal - - -def rainbowgram(path, - ax, - peak=70.0, - use_cqt=False, - n_fft=1024, - hop_length=256, - sr=22050, - over_sample=4, - res_factor=0.8, - octaves=5, - notes_per_octave=10): - audio = librosa.load(path, sr=sr)[0] - if use_cqt: - C = librosa.cqt(audio, - sr=sr, - hop_length=hop_length, - bins_per_octave=int(notes_per_octave * over_sample), - n_bins=int(octaves * notes_per_octave * over_sample), - filter_scale=res_factor, - fmin=librosa.note_to_hz('C2')) - else: - C = librosa.stft( - audio, - n_fft=n_fft, - win_length=n_fft, - hop_length=hop_length, - center=True) - mag, phase = librosa.core.magphase(C) - phase_angle = np.angle(phase) - phase_unwrapped = np.unwrap(phase_angle) - dphase = phase_unwrapped[:, 1:] - phase_unwrapped[:, :-1] - dphase = np.concatenate([phase_unwrapped[:, 0:1], dphase], axis=1) / np.pi - mag = (librosa.logamplitude( - mag**2, amin=1e-13, top_db=peak, ref_power=np.max) / peak) + 1 - cdict = { - 'red': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)), - 'green': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)), - 'blue': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)), - 'alpha': ((0.0, 1.0, 1.0), (1.0, 0.0, 0.0)) - } - my_mask = matplotlib.colors.LinearSegmentedColormap('MyMask', cdict) - plt.register_cmap(cmap=my_mask) - ax.matshow(dphase[::-1, :], cmap=plt.cm.rainbow) - ax.matshow(mag[::-1, :], cmap=my_mask) - - -def rainbowgrams(list_of_paths, - saveto=None, - rows=2, - cols=4, - col_labels=[], - row_labels=[], - use_cqt=True, - figsize=(15, 20), - peak=70.0): - """Build a CQT rowsXcols. - """ - N = len(list_of_paths) - assert N == rows * cols - fig, axes = plt.subplots( - rows, cols, sharex=True, sharey=True, figsize=figsize) - fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05, hspace=0.1) - # fig = plt.figure(figsize=(18, N * 1.25)) - for i, path in enumerate(list_of_paths): - row = int(i / cols) - col = i % cols - if rows == 1 and cols == 1: - ax = axes - elif rows == 1: - ax = axes[col] - elif cols == 1: - ax = axes[row] - else: - ax = axes[row, col] - rainbowgram(path, ax, peak, use_cqt) - ax.set_axis_bgcolor('white') - ax.set_xticks([]) - ax.set_yticks([]) - if col == 0 and row_labels: - ax.set_ylabel(row_labels[row]) - if row == rows - 1 and col_labels: - ax.set_xlabel(col_labels[col]) - if saveto is not None: - fig.savefig(filename='{}.png'.format(saveto)) - - -def plot_rainbowgrams(): - for root in ['target', 'corpus', 'results']: - files = glob.glob('{}/**/*.wav'.format(root), recursive=True) - for f in files: - fname = '{}.png'.format(f) - if not os.path.exists(fname): - rainbowgrams( - [f], - saveto=fname, - figsize=(20, 5), - rows=1, - cols=1) - plt.close('all') diff --git a/spaces/nasttam/Image-and-3D-Model-Creator/PIFu/apps/eval.py b/spaces/nasttam/Image-and-3D-Model-Creator/PIFu/apps/eval.py deleted file mode 100644 index a0ee3fa66c75a144da5c155b927f63170b7e923c..0000000000000000000000000000000000000000 --- a/spaces/nasttam/Image-and-3D-Model-Creator/PIFu/apps/eval.py +++ /dev/null @@ -1,153 +0,0 @@ -import tqdm -import glob -import torchvision.transforms as transforms -from PIL import Image -from lib.model import * -from lib.train_util import * -from lib.sample_util import * -from lib.mesh_util import * -# from lib.options import BaseOptions -from torch.utils.data import DataLoader -import torch -import numpy as np -import json -import time -import sys -import os - -sys.path.insert(0, os.path.abspath( - os.path.join(os.path.dirname(__file__), '..'))) -ROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - - -# # get options -# opt = BaseOptions().parse() - -class Evaluator: - def __init__(self, opt, projection_mode='orthogonal'): - self.opt = opt - self.load_size = self.opt.loadSize - self.to_tensor = transforms.Compose([ - transforms.Resize(self.load_size), - transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) - ]) - # set cuda - cuda = torch.device( - 'cuda:%d' % opt.gpu_id) if torch.cuda.is_available() else torch.device('cpu') - - # create net - netG = HGPIFuNet(opt, projection_mode).to(device=cuda) - print('Using Network: ', netG.name) - - if opt.load_netG_checkpoint_path: - netG.load_state_dict(torch.load( - opt.load_netG_checkpoint_path, map_location=cuda)) - - if opt.load_netC_checkpoint_path is not None: - print('loading for net C ...', opt.load_netC_checkpoint_path) - netC = ResBlkPIFuNet(opt).to(device=cuda) - netC.load_state_dict(torch.load( - opt.load_netC_checkpoint_path, map_location=cuda)) - else: - netC = None - - os.makedirs(opt.results_path, exist_ok=True) - os.makedirs('%s/%s' % (opt.results_path, opt.name), exist_ok=True) - - opt_log = os.path.join(opt.results_path, opt.name, 'opt.txt') - with open(opt_log, 'w') as outfile: - outfile.write(json.dumps(vars(opt), indent=2)) - - self.cuda = cuda - self.netG = netG - self.netC = netC - - def load_image(self, image_path, mask_path): - # Name - img_name = os.path.splitext(os.path.basename(image_path))[0] - # Calib - B_MIN = np.array([-1, -1, -1]) - B_MAX = np.array([1, 1, 1]) - projection_matrix = np.identity(4) - projection_matrix[1, 1] = -1 - calib = torch.Tensor(projection_matrix).float() - # Mask - mask = Image.open(mask_path).convert('L') - mask = transforms.Resize(self.load_size)(mask) - mask = transforms.ToTensor()(mask).float() - # image - image = Image.open(image_path).convert('RGB') - image = self.to_tensor(image) - image = mask.expand_as(image) * image - return { - 'name': img_name, - 'img': image.unsqueeze(0), - 'calib': calib.unsqueeze(0), - 'mask': mask.unsqueeze(0), - 'b_min': B_MIN, - 'b_max': B_MAX, - } - - def load_image_from_memory(self, image_path, mask_path, img_name): - # Calib - B_MIN = np.array([-1, -1, -1]) - B_MAX = np.array([1, 1, 1]) - projection_matrix = np.identity(4) - projection_matrix[1, 1] = -1 - calib = torch.Tensor(projection_matrix).float() - # Mask - mask = Image.fromarray(mask_path).convert('L') - mask = transforms.Resize(self.load_size)(mask) - mask = transforms.ToTensor()(mask).float() - # image - image = Image.fromarray(image_path).convert('RGB') - image = self.to_tensor(image) - image = mask.expand_as(image) * image - return { - 'name': img_name, - 'img': image.unsqueeze(0), - 'calib': calib.unsqueeze(0), - 'mask': mask.unsqueeze(0), - 'b_min': B_MIN, - 'b_max': B_MAX, - } - - def eval(self, data, use_octree=False): - ''' - Evaluate a data point - :param data: a dict containing at least ['name'], ['image'], ['calib'], ['b_min'] and ['b_max'] tensors. - :return: - ''' - opt = self.opt - with torch.no_grad(): - self.netG.eval() - if self.netC: - self.netC.eval() - save_path = '%s/%s/result_%s.obj' % ( - opt.results_path, opt.name, data['name']) - if self.netC: - gen_mesh_color(opt, self.netG, self.netC, self.cuda, - data, save_path, use_octree=use_octree) - else: - gen_mesh(opt, self.netG, self.cuda, data, - save_path, use_octree=use_octree) - - -if __name__ == '__main__': - evaluator = Evaluator(opt) - - test_images = glob.glob(os.path.join(opt.test_folder_path, '*')) - test_images = [f for f in test_images if ( - 'png' in f or 'jpg' in f) and (not 'mask' in f)] - test_masks = [f[:-4]+'_mask.png' for f in test_images] - - print("num; ", len(test_masks)) - - for image_path, mask_path in tqdm.tqdm(zip(test_images, test_masks)): - try: - print(image_path, mask_path) - data = evaluator.load_image(image_path, mask_path) - evaluator.eval(data, True) - except Exception as e: - print("error:", e.args) diff --git a/spaces/nathanTQ/ChatDev/camel/typing.py b/spaces/nathanTQ/ChatDev/camel/typing.py deleted file mode 100644 index 4a63153de6cb752568512a6744172304fe65009a..0000000000000000000000000000000000000000 --- a/spaces/nathanTQ/ChatDev/camel/typing.py +++ /dev/null @@ -1,82 +0,0 @@ -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -# Licensed under the Apache License, Version 2.0 (the “License”); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an “AS IS” BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -from enum import Enum - - -class TaskType(Enum): - AI_SOCIETY = "ai_society" - CODE = "code" - MISALIGNMENT = "misalignment" - TRANSLATION = "translation" - EVALUATION = "evaluation" - SOLUTION_EXTRACTION = "solution_extraction" - CHATDEV = "chat_dev" - DEFAULT = "default" - - -class RoleType(Enum): - ASSISTANT = "assistant" - USER = "user" - CRITIC = "critic" - EMBODIMENT = "embodiment" - DEFAULT = "default" - CHATDEV = "AgentTech" - CHATDEV_COUNSELOR = "counselor" - CHATDEV_CEO = "chief executive officer (CEO)" - CHATDEV_CHRO = "chief human resource officer (CHRO)" - CHATDEV_CPO = "chief product officer (CPO)" - CHATDEV_CTO = "chief technology officer (CTO)" - CHATDEV_PROGRAMMER = "programmer" - CHATDEV_REVIEWER = "code reviewer" - CHATDEV_TESTER = "software test engineer" - CHATDEV_CCO = "chief creative officer (CCO)" - - -class ModelType(Enum): - GPT_3_5_TURBO = "gpt-3.5-turbo-16k-0613" - GPT_4 = "gpt-4" - GPT_4_32k = "gpt-4-32k" - STUB = "stub" - - @property - def value_for_tiktoken(self): - return self.value if self.name != "STUB" else "gpt-3.5-turbo-16k-0613" - - -class PhaseType(Enum): - REFLECTION = "reflection" - RECRUITING_CHRO = "recruiting CHRO" - RECRUITING_CPO = "recruiting CPO" - RECRUITING_CTO = "recruiting CTO" - DEMAND_ANALYSIS = "demand analysis" - BRAINSTORMING = "brainstorming" - CHOOSING_LANGUAGE = "choosing language" - RECRUITING_PROGRAMMER = "recruiting programmer" - RECRUITING_REVIEWER = "recruiting reviewer" - RECRUITING_TESTER = "recruiting software test engineer" - RECRUITING_CCO = "recruiting chief creative officer" - CODING = "coding" - CODING_COMPLETION = "coding completion" - CODING_AUTOMODE = "coding auto mode" - REVIEWING_COMMENT = "review comment" - REVIEWING_MODIFICATION = "code modification after reviewing" - ERROR_SUMMARY = "error summary" - MODIFICATION = "code modification" - ART_ELEMENT_ABSTRACTION = "art element abstraction" - ART_ELEMENT_INTEGRATION = "art element integration" - CREATING_ENVIRONMENT_DOCUMENT = "environment document" - CREATING_USER_MANUAL = "user manual" - - -__all__ = ["TaskType", "RoleType", "ModelType", "PhaseType"] diff --git a/spaces/nbroad/openai-detector-base/app.py b/spaces/nbroad/openai-detector-base/app.py deleted file mode 100644 index a6fac4cfc145be7984ab513cce589808daf6a8bb..0000000000000000000000000000000000000000 --- a/spaces/nbroad/openai-detector-base/app.py +++ /dev/null @@ -1,98 +0,0 @@ -import os -import requests - -import gradio as gr -import torch -from transformers import ( - RobertaForSequenceClassification, - RobertaTokenizer, - RobertaConfig, -) - -HF_TOKEN = os.environ["HF_TOKEN"] - -os.system( - "wget https://openaipublic.azureedge.net/gpt-2/detector-models/v1/detector-base.pt" -) - -config = RobertaConfig.from_pretrained("roberta-base") -model = RobertaForSequenceClassification(config) -model.load_state_dict(torch.load("detector-base.pt")["model_state_dict"]) - -tokenizer = RobertaTokenizer.from_pretrained("roberta-base") - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -model.to(device) - - -def call_inference_api(query): - url = "https://api-inference.huggingface.co/models/roberta-base-openai-detector" - - headers = {"Authorization": f"Bearer {HF_TOKEN}"} - response = requests.post(url, json={"inputs": query}, headers=headers) - - code = response.status_code - if code == 200: - fake, real = response.json()[0] - - fake_score = fake["score"] - real_score = real["score"] - - return f"Fake: {fake_score:.2%} | Real: {real_score:.2%}" - - else: - error = response.json()["error"] - warning = response.json()["warnings"] - return f"Error: {error} | Warning: {warning}" - - -def local_call(query): - # Copied from https://github.com/openai/gpt-2-output-dataset/tree/master/detector#L35-L46 - tokens = tokenizer.encode(query) - all_tokens = len(tokens) - tokens = tokens[: tokenizer.max_len - 2] - used_tokens = len(tokens) - tokens = torch.tensor( - [tokenizer.bos_token_id] + tokens + [tokenizer.eos_token_id] - ).unsqueeze(0) - mask = torch.ones_like(tokens) - - with torch.no_grad(): - logits = model(tokens.to(device), attention_mask=mask.to(device))[0] - probs = logits.softmax(dim=-1) - - fake, real = probs.detach().cpu().flatten().numpy().tolist() - - return f"Fake: {fake:.2%} | Real: {real:.2%} | Used tokens: {used_tokens} | All tokens: {all_tokens}" - - -def main_function(query): - hosted_output = call_inference_api(query) - local_output = local_call(query) - - return hosted_output, local_output - - -text_input = gr.Textbox( - lines=5, - label="Enter text to compare output with the model hosted here: https://huggingface.co/roberta-base-openai-detector", -) -hosted_output = gr.Textbox(label="Output from model hosted on Hugging Face") -local_output = gr.Textbox( - label="Output from model running locally on transformers 2.0.0, tokenizers 0.7.0, and torch 1.4.0" -) - -description = "The original repository for the model used an older version of \ -transformers, tokenziers, and torch which results in slightly different results \ - compared to the model hosted on Hugging Face. This app compares the two models." - -demo = gr.Interface( - fn=main_function, - inputs="text", - outputs=[hosted_output, local_output], - title="Compare OpenAI detector models", - description=description, -) - -demo.launch() diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Aimersoft Drm Media Converter Full Crack Software.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Aimersoft Drm Media Converter Full Crack Software.md deleted file mode 100644 index 040eb99b1e06c1f20861fb234a6b1042becc38ab..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Aimersoft Drm Media Converter Full Crack Software.md +++ /dev/null @@ -1,24 +0,0 @@ -
          -

          How to Crack Aimersoft DRM Media Converter and Enjoy Its Full Features

          -

          Aimersoft DRM Media Converter is a powerful software that can remove DRM protection from video and audio files, and convert them to various formats. It supports a wide range of input and output formats, such as MP4, WMV, AVI, MOV, MKV, MP3, WMA, AAC, etc. It can also rip DVDs to any video or audio format, and burn videos to DVD with customized menus. With Aimersoft DRM Media Converter, you can enjoy your media files on any device or player without any restriction.

          -

          However, Aimersoft DRM Media Converter is not a free software. You need to pay $39.95 to get the full version with lifetime updates and technical support. If you don't want to spend money on this software, you may be tempted to look for a cracked version online. But is it safe and legal to do so? What are the risks and consequences of using a cracked Aimersoft DRM Media Converter?

          -

          aimersoft drm media converter full crack software


          Download File >>> https://urlcod.com/2uIb1l



          -

          The Dangers of Using Aimersoft DRM Media Converter Full Crack Software

          -

          Using a cracked Aimersoft DRM Media Converter may seem like a good idea at first, but it actually comes with many drawbacks and risks. Here are some of the dangers of using a cracked Aimersoft DRM Media Converter:

          -
            -
          • It may contain viruses or malware. Many websites that offer cracked software are not trustworthy. They may infect your computer with viruses, malware, spyware, ransomware, or other harmful programs that can damage your system, steal your personal information, or lock your files until you pay a ransom.
          • -
          • It may not work properly. A cracked Aimersoft DRM Media Converter may not function as well as the original one. It may have bugs, errors, crashes, or compatibility issues that can affect your user experience and the quality of your converted files. It may also lack some features or updates that are available in the official version.
          • -
          • It may violate the law. A cracked Aimersoft DRM Media Converter is an illegal copy of the software that infringes the copyright of Aimersoft Studio. By downloading and using a cracked Aimersoft DRM Media Converter, you are breaking the law and exposing yourself to potential legal actions from the software developer or other parties.
          • -
          • It may be unethical. A cracked Aimersoft DRM Media Converter is a form of piracy that deprives the software developer of their rightful income and recognition. By using a cracked Aimersoft DRM Media Converter, you are not supporting the hard work and innovation of Aimersoft Studio and other software developers who create useful and high-quality products for users.
          • -
          -

          The Best Way to Use Aimersoft DRM Media Converter

          -

          The best way to use Aimersoft DRM Media Converter is to purchase the official version from the official website[^1^]. By doing so, you can enjoy the following benefits:

          -
            -
          • You can get a clean and safe software. The official version of Aimersoft DRM Media Converter is free of viruses, malware, or any other harmful programs. You can download and install it without any worry.
          • -
          • You can get a fully functional software. The official version of Aimersoft DRM Media Converter has all the features and updates that you need to remove DRM protection and convert media files with ease and quality. You can also get lifetime updates and technical support from Aimersoft Studio.
          • -
          • You can get a legal and ethical software. The official version of Aimersoft DRM Media Converter is a legitimate copy of the software that respects the copyright of Aimersoft Studio. By purchasing and using the official version of Aimersoft DRM Media Converter, you are complying with the law and showing your appreciation for the software developer.
          • -
          -

          To purchase the official version of Aimersoft DRM Media Converter, you can visit the official website[^1^] and click on the "Buy Now" button. You can choose between a 1-year license for $29.95 or a lifetime license for $39.95. You can also get a free trial version that allows you to convert 1 minute of each file for evaluation purposes.

          - 7196e7f11a
          -
          -
          \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Clash Of Clans For Pc Free VERIFIED Download Windows 7 32-bit Version.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Clash Of Clans For Pc Free VERIFIED Download Windows 7 32-bit Version.md deleted file mode 100644 index 17cb90a0e545196d2d8d39efac8944682818c947..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Clash Of Clans For Pc Free VERIFIED Download Windows 7 32-bit Version.md +++ /dev/null @@ -1,44 +0,0 @@ - -

          How to Play Clash of Clans on Windows 7 32-bit Version

          -

          Clash of Clans is a popular strategy game that lets you build your own village, train your army, and fight against other players. The game is available for Android and iOS devices, but what if you want to play it on your Windows 7 PC?

          -

          clash of clans for pc free download windows 7 32-bit version


          Download File >>> https://urlcod.com/2uI9vC



          -

          Unfortunately, there is no official version of Clash of Clans for Windows, but you can still enjoy the game on your computer using an Android emulator. An emulator is a software that mimics the Android operating system on your PC, allowing you to run Android apps and games.

          -

          There are many Android emulators out there, but not all of them are compatible with Windows 7 32-bit version. In this article, we will show you how to use Tencent Gaming Buddy, a free and lightweight emulator that works well with Clash of Clans.

          -

          Steps to Download and Install Tencent Gaming Buddy

          -
            -
          1. Go to the official website of Tencent Gaming Buddy and click on the Download button.
          2. -
          3. Run the installer and follow the instructions to install the emulator on your PC.
          4. -
          5. Launch the emulator and log in with your Google account. If you don't have one, you can create one for free.
          6. -
          7. On the home screen, click on the Game Center tab and search for Clash of Clans.
          8. -
          9. Click on the Install button and wait for the game to download and install.
          10. -
          11. Once the game is installed, click on the Play button to launch it.
          12. -
          -

          How to Play Clash of Clans on Windows 7 32-bit Version

          -

          Playing Clash of Clans on Windows 7 32-bit version is similar to playing it on your mobile device. You can use your mouse to click and drag on the screen, or use the keyboard shortcuts to perform various actions. Here are some of the keyboard shortcuts you can use:

          -
            -
          • WASD keys: Move the camera
          • -
          • Spacebar: Select all troops
          • -
          • F1-F8 keys: Select specific troops
          • -
          • G key: Deploy troops
          • -
          • H key: Return home
          • -
          • V key: Visit a friend's village
          • -
          • B key: Open shop
          • -
          • N key: Open clan chat
          • -
          • M key: Open global chat
          • -
          • C key: Open settings
          • -
          -

          You can also customize the keyboard controls by clicking on the Keyboard icon on the right side of the emulator window.

          -

          -

          Tips and Tricks for Playing Clash of Clans on Windows 7 32-bit Version

          -

          Here are some tips and tricks to help you get the most out of playing Clash of Clans on Windows 7 32-bit version:

          -
            -
          • Make sure you have a stable internet connection and enough disk space to run the emulator and the game smoothly.
          • -
          • Update the emulator and the game regularly to get the latest features and bug fixes.
          • -
          • Connect your game account to Facebook or Supercell ID to save your progress and sync it across devices.
          • -
          • Join a clan or create your own to chat with other players, request and donate troops, and participate in clan wars.
          • -
          • Watch replays of your attacks and defenses to learn from your mistakes and improve your strategy.
          • -
          • Use gems wisely to speed up building, training, or research times. You can earn gems by completing achievements, clearing obstacles, or opening chests.
          • -
          • Have fun and enjoy the game!
          • -

          e93f5a0c3f
          -
          -
          \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Motagua Multipurpose PowerPoint Template Rar.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Motagua Multipurpose PowerPoint Template Rar.md deleted file mode 100644 index d143cef8ca9e732df948f1f51473ccd8201efd79..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Motagua Multipurpose PowerPoint Template Rar.md +++ /dev/null @@ -1,55 +0,0 @@ -
          - - - - - -
          -

          Motagua Multipurpose PowerPoint Template rar >Education slide example 3

          Education slide example 4
          -

          Creative

          -

          Motagua PowerPoint Template can be used for creative presentations, such as portfolios, projects, showcases, etc. It has slides that can help you display your work, skills, achievements, awards, etc. It also has slides that can help you demonstrate your creativity, such as mood boards, sketches, wireframes, prototypes, etc. You can use the mockups and images to make your slides more attractive and realistic. Here are some examples of slides and layouts that are suitable for creative:

          -

          Motagua Multipurpose PowerPoint Template rar


          DOWNLOAD · https://urlcod.com/2uIaZZ



          - - - - - - - - - -
          Creative slide example 1Creative slide example 2
          Creative slide example 3Creative slide example 4
          -

          What are the features and benefits of Motagua PowerPoint Template?

          -

          Clean, modern and creative design

          -

          Motagua PowerPoint Template has a clean, modern and creative design that can impress any audience. It has a minimalist and elegant style that can suit any theme or brand. It also has a variety of design elements and icons that can enhance your slides and make them more appealing. You can choose from over 3000 vector icons that are fully editable and scalable. You can also use the maps of different countries and regions that are included in the template.

          -

          Animated and non-animated versions

          -

          Motagua PowerPoint Template has both animated and non-animated versions that can suit different preferences and needs. You can choose the animated version if you want to add some motion and excitement to your slides. You can use the animations and transitions that are included in the template, such as fade, zoom, slide, flip, etc. You can also customize the speed and duration of the animations according to your liking. You can choose the non-animated version if you want to keep your slides simple and static. You can still use the same slides and layouts without any animations or transitions.

          -

          Data charts and infographics

          -

          Motagua PowerPoint Template has data charts and infographics that can help you present data and information in a clear and engaging way. You can use the data charts to show trends, comparisons, percentages, etc. You can use the infographics to show processes, steps, cycles, etc. You can easily edit the data charts and infographics using Excel or PowerPoint tools. You can also change the colors and styles of the data charts and infographics to match your theme or brand.

          -

          Customizable colors and fonts

          -

          Motagua PowerPoint Template has customizable colors and fonts that can help you match any theme or brand. You can choose from 15 color schemes that are already provided in the template, or you can create your own color scheme using the color palette tool. You can also choose from over 40 fonts that are already provided in the template, or you can use your own fonts if you prefer. You can easily change the colors and fonts of any slide or element using the format options in PowerPoint.

          -

          -

          Master slide layout and easy editing

          -

          Motagua PowerPoint Template has a master slide layout that can help you create consistent and professional slides. You can use the master slide to set the background, header, footer, logo, etc. of all your slides at once. You can also use the master slide to apply any changes or updates to all your slides at once. You can easily edit the template using drag-and-drop image replacement and Excel data editing. You can also use the placeholders and guides to align your content and elements.

          -

          How to download Motagua PowerPoint Template rar?

          -

          If you want to download Motagua PowerPoint Template rar, you have several options online. You can download it from GraphicRiver for $15, which is a reasonable price for such a high-quality template. You can also download it from other websites that offer free or paid downloads of the template, such as SlideSalad, SlideModel, SlideHunter, etc. However, you should be careful about the quality and security of these downloads, as they may not be as reliable or trustworthy as GraphicRiver. You should always check the reviews and ratings of the websites before downloading anything from them. To download Motagua PowerPoint Template rar, you need to follow these steps: - Choose the website that you want to download from and click on the download link or button. - You may need to create an account or sign in to the website before downloading. You may also need to pay a fee or complete a survey if the website requires it. - You will get a rar file that contains the template files and other resources. Save the rar file to your computer or device. - You will need a software that can unzip or extract the rar file, such as WinRAR, 7-Zip, etc. Install the software if you don't have it already. - Right-click on the rar file and choose the option to unzip or extract it. You will get a folder that contains the template files and other resources. - Open the folder and look for the pptx or ppt files that are the template files. You can choose the file that matches your aspect ratio, color scheme, and animation preference. - Double-click on the pptx or ppt file to open it in PowerPoint. You can now edit and customize the template as you wish.

          Conclusion

          -

          Motagua PowerPoint Template rar is a great choice for anyone who wants to create stunning presentations for any purpose. It is a multipurpose template that can be used for business, marketing, education, or creative presentations. It has a clean, modern and creative design, with animated and non-animated versions, data charts and infographics, customizable colors and fonts, and easy editing features. It is compatible with PowerPoint 2007 or higher versions on Windows and Mac devices. You can download it from GraphicRiver for $15, or from other websites that offer free or paid downloads of the template. However, you should be careful about the quality and security of these downloads, as they may not be as reliable or trustworthy as GraphicRiver. You should always check the reviews and ratings of the websites before downloading anything from them. If you want to impress any audience with your slides, you should try out Motagua PowerPoint Template rar for your next presentation. It is a template that can help you present your content in a clear and engaging way. It is a template that can help you showcase your work, skills, achievements, etc. It is a template that can help you demonstrate your creativity, strategy, data, etc. It is a template that can help you achieve your presentation goals. So what are you waiting for? Download Motagua PowerPoint Template rar today and start creating amazing presentations!

          -

          FAQs

          -

          Here are some frequently asked questions about Motagua PowerPoint Template rar with brief answers:

          -
            -
          • Q: How many slides are included in Motagua PowerPoint Template?
          • -
          • A: Motagua PowerPoint Template includes over 600 unique slides that can be used for various presentation topics and purposes.
          • -
          • Q: How can I change the color scheme of Motagua PowerPoint Template?
          • -
          • A: You can change the color scheme of Motagua PowerPoint Template by choosing from 15 color schemes that are already provided in the template, or by creating your own color scheme using the color palette tool.
          • -
          • Q: How can I edit the data charts and infographics in Motagua PowerPoint Template?
          • -
          • A: You can edit the data charts and infographics in Motagua PowerPoint Template by using Excel or PowerPoint tools. You can also change the colors and styles of the data charts and infographics to match your theme or brand.
          • -
          • Q: How can I add animations and transitions to Motagua PowerPoint Template?
          • -
          • A: You can add animations and transitions to Motagua PowerPoint Template by choosing the animated version of the template, which has animations and transitions already applied to the slides. You can also customize the speed and duration of the animations according to your liking.
          • -
          • Q: How can I get support for Motagua PowerPoint Template?
          • -
          • A: You can get support for Motagua PowerPoint Template by contacting Jetfabrik, the creator of the template, through their profile page on GraphicRiver. You can also check their documentation file that is included in the template folder.
          • -
          - - -

          b2dd77e56b
          -
          -
          \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Tax Program For Mac.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Tax Program For Mac.md deleted file mode 100644 index 87d68f23b0b7509cba75f2165a75830f0ac548ae..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Tax Program For Mac.md +++ /dev/null @@ -1,19 +0,0 @@ - -

          How to Choose the Best Tax Program For Mac in 2023

          -

          If you are a Mac user and you need to file your taxes, you might be wondering what are the best tax programs for Mac in 2023. There are many options available, but not all of them are compatible with Mac or offer the features and support you need. Here are some factors to consider when choosing a tax program for Mac:

          -
            -
          • Compatibility: Make sure the tax program you choose is compatible with your Mac operating system and hardware. Some tax programs may require a specific version of macOS or a certain amount of memory or disk space. You can check the system requirements on the tax program's website or contact their customer service.
          • -
          • Features: Depending on your tax situation, you may need a tax program that offers certain features, such as importing data from previous years, supporting multiple forms and schedules, calculating deductions and credits, providing audit protection, etc. Compare the features of different tax programs and see which one meets your needs.
          • -
          • Support: Filing taxes can be confusing and stressful, so you may need some help along the way. Look for a tax program that offers various types of support, such as live chat, phone, email, online community, etc. You may also want to check the availability and quality of the support before you buy the tax program.
          • -
          • Price: Tax programs vary in price depending on the level of service and features they offer. Some tax programs are free for simple returns, while others charge a fee for more complex returns or additional services. You may also have to pay extra for state filing or e-filing. Compare the prices of different tax programs and see which one fits your budget.
          • -
          -

          To help you narrow down your choices, here are some of the best tax programs for Mac in 2023 based on customer reviews and ratings:

          -

          Tax Program For Mac


          Download Zip ——— https://urlcod.com/2uIbgo



          -
            -
          1. TurboTax: TurboTax is one of the most popular and trusted tax programs for Mac. It offers more than 350 tax deductions and credits, keeps you updated with the latest tax laws, checks your entries for accuracy, and guarantees your maximum refund. You can choose from different versions depending on your needs, such as Free Edition, Deluxe, Premier, Self-Employed, etc. You can also get live help from a tax expert or have them do your taxes for you with TurboTax Live.
          2. -
          3. H&R Block: H&R Block is another well-known and reliable tax program for Mac. It offers five free federal e-files and unlimited federal preparation, guidance on maximizing mortgage interest and real estate tax deductions, free live product help via chat, and expert review before you file. You can choose from different versions depending on your needs, such as Basic, Deluxe + State, Premium + State, Self-Employed + State, etc. You can also get in-person assistance at one of their offices if you need it.
          4. -
          5. TaxAct: TaxAct is a more affordable option for filing your taxes on Mac. It saves your time by importing your last year's data or capturing a photo of your W-2 with your phone. It gives you full access to the latest IRS and state tax reform changes, helps you enter your charitable donations with Donation Assistant, and provides you with a free BluPrint financial assessment. You can choose from different versions depending on your needs, such as Free Edition, Deluxe+, Premier+, Self-Employed+, etc. You can also get dedicated phone support and prioritized service with TaxAct Professional.
          6. -
          7. TaxSlayer: TaxSlayer is another budget-friendly option for filing your taxes on Mac. It offers five versions to suit your needs, such as SimplyFree, Classic, Premium, Military, and Self-Employed. It helps you prepare, print, or e-file your taxes with accuracy and confidence. It has a simple interface that guides you through the tax forms as basic digital information. It also provides IRS assistance and audit defense if you need it.
          8. -
          9. Credit Karma: Credit Karma is a free option for filing your taxes on Mac. It can import your tax returns from TurboTax or H&R Block if you have used them before. It supports most common forms and schedules, calculates your refund or liability in real time, checks for errors and audit flags, and guarantees accurate

            cec2833e83
            -
            -
            \ No newline at end of file diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/tests/config/dir1/load_rel.py b/spaces/nikitaPDL2023/assignment4/detectron2/tests/config/dir1/load_rel.py deleted file mode 100644 index 22d10db7fe28ad66819aeb8e991f129301095ea1..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/tests/config/dir1/load_rel.py +++ /dev/null @@ -1,5 +0,0 @@ -# test that load_rel can work -from detectron2.config import LazyConfig - -x = LazyConfig.load_rel("dir1_a.py", "dir1a_dict") -assert x["a"] == 1 diff --git a/spaces/nontGcob/T2E_Vocabulary_Exam_Generator/app.py b/spaces/nontGcob/T2E_Vocabulary_Exam_Generator/app.py deleted file mode 100644 index 829eeb6ee9a5fd62dca8f6b62bc1865eb50f216d..0000000000000000000000000000000000000000 --- a/spaces/nontGcob/T2E_Vocabulary_Exam_Generator/app.py +++ /dev/null @@ -1,82 +0,0 @@ -from flask import Flask, render_template, request, send_file -from model import model - -app = Flask(__name__) - -@app.route('/process', methods=['POST']) -def process(): - T2E_exam = str(request.remote_addr) + ".txt" - text = request.form['text'] - cefr_level = request.form['cefr_level'] - - # Call your Python function here to process the data - output = model(text, cefr_level) - - user_data = {cefr_level: text} - with open("user_data_log.txt", "a") as file: - file.write(str(user_data) + "\n\n") - - # Save the output to a file - count = 0 - max_choice = 4 - - with open(T2E_exam, "w") as file: - file.write("__________ T2E Vocabulary Exam Generator __________\n") - file.write("| Welcome to T2E Vocabulary Exam Generator! |\n") - file.write("| We are glad that our service is useful to you. |\n") - file.write("| |\n") - file.write("| Copyrights 2023, Nutnornont Chamadol |\n") - file.write("| Email: nontc49@gmail.com |\n") - file.write("| Visit https://nontgcob.com to learn more |\n") - file.write("| |\n") - file.write("| Your exam is generated below. |\n") - file.write("| - Happy using T2E Vocabulary Exam Generator! - |\n") - file.write("|__________________________________________________|\n") - file.write("\n") - file.write("If you don't see any text on the Result page, try changing ") - file.write("the CEFR difficulty level selected or choose ALL CEFR level ") - file.write("to make sure you get all the questions that the AI can generate. ") - file.write("Another possible reason why nothing comes out of the program is ") - file.write("because there is no word that can be turned into an exam, try ") - file.write("putting a longer text passage as an input into the textbox instead.\n") - file.write("Visit https://scribehow.com/shared/How_to_use_T2E_Vocabulary_Exam_Generator__vyYu396JT_qZ0jKATVUqeQ#89cd5f52 for more information.\n") - file.write("\n") - file.write("Note: The first choice of each question is the correct answer, the rest are trick choices!\n") - file.write("\n") - file.write("\n") - - for key, value in output.items(): - vvocab, sentence = key.split(" = ") - # print(f'What does the word "{vvocab}" means in this sentence "{sentence}"?') - with open(T2E_exam, "a") as file: - file.write(f'What is the meaning of the word "{vvocab}" in this sentence "{sentence}"?\n') - - for choice in value: - # print(f"- {choice}") - with open(T2E_exam, "a") as file: - file.write(f"- {choice}\n") - count += 1 - # if count > (max_choice + 1): - # break - with open(T2E_exam, "a") as file: - file.write("\n") - - # print("output:", output) - # print(type(output)) - - return render_template('result.html', output=output, file_path="T2E_exam.txt") - -@app.route('/') -def index(): - return render_template('index.html') - -@app.route('/send') -def get_file(): - T2E_exam = str(request.remote_addr) + ".txt" - return send_file( - str(request.remote_addr) + ".txt", - # download_name = "T2E_exam.txt" - ) - -if __name__ == "__main__": - app.run(host='0.0.0.0', port=7860) diff --git a/spaces/oguzakif/video-object-remover/FGT_codes/FGT/data/__init__.py b/spaces/oguzakif/video-object-remover/FGT_codes/FGT/data/__init__.py deleted file mode 100644 index 53a0ac88410c95930e2019dbbb2fcb5057eea12d..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/FGT_codes/FGT/data/__init__.py +++ /dev/null @@ -1,49 +0,0 @@ -import logging -import torch -import torch.utils.data -from importlib import import_module - - -def create_dataloader(phase, dataset, dataset_opt, opt=None, sampler=None): - logger = logging.getLogger('base') - if phase == 'train': - num_workers = dataset_opt['n_workers'] * opt['world_size'] - batch_size = dataset_opt['batch_size'] - if sampler is not None: - logger.info('N_workers: {}, batch_size: {} DDP train dataloader has been established'.format(num_workers, - batch_size)) - return torch.utils.data.DataLoader(dataset, batch_size=batch_size, - num_workers=num_workers, sampler=sampler, - pin_memory=True) - else: - logger.info('N_workers: {}, batch_size: {} train dataloader has been established'.format(num_workers, - batch_size)) - return torch.utils.data.DataLoader(dataset, batch_size=batch_size, - num_workers=num_workers, shuffle=True, - pin_memory=True) - - else: - logger.info( - 'N_workers: {}, batch_size: {} validate/test dataloader has been established'.format( - dataset_opt['n_workers'], - dataset_opt['batch_size'])) - return torch.utils.data.DataLoader(dataset, batch_size=dataset_opt['batch_size'], shuffle=False, - num_workers=dataset_opt['n_workers'], - pin_memory=False) - - -def create_dataset(dataset_opt, dataInfo, phase, dataset_name): - if phase == 'train': - dataset_package = import_module('data.{}'.format(dataset_name)) - dataset = dataset_package.VideoBasedDataset(dataset_opt, dataInfo) - - mode = dataset_opt['mode'] - logger = logging.getLogger('base') - logger.info( - '{} train dataset [{:s} - {:s} - {:s}] is created.'.format(dataset_opt['type'].upper(), - dataset.__class__.__name__, - dataset_opt['name'], mode)) - else: # validate and test dataset - return ValueError('No dataset initialized for valdataset') - - return dataset diff --git a/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/models/utils/network_blocks_2d.py b/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/models/utils/network_blocks_2d.py deleted file mode 100644 index c14b5cb1f47d16c930f395061a2ba3b4fc5a43fa..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/models/utils/network_blocks_2d.py +++ /dev/null @@ -1,186 +0,0 @@ -import torch -import torch.nn as nn -import numpy as np -import torch.nn.functional as F - - -class VanillaConv2d(nn.Module): - def __init__( - self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, - groups=1, bias=True, norm="SN", activation=nn.LeakyReLU(0.2, inplace=True) - ): - - super().__init__() - if padding == -1: - if isinstance(kernel_size, int): - kernel_size = (kernel_size, kernel_size) - if isinstance(dilation, int): - dilation = (dilation, dilation) - self.padding = tuple(((np.array(kernel_size) - 1) * np.array(dilation)) // 2) if padding == -1 else padding - self.featureConv = nn.Conv2d( - in_channels, out_channels, kernel_size, - stride, self.padding, dilation, groups, bias) - - self.norm = norm - if norm == "BN": - self.norm_layer = nn.BatchNorm2d(out_channels) - elif norm == "IN": - self.norm_layer = nn.InstanceNorm2d(out_channels, track_running_stats=True) - elif norm == "SN": - self.norm = None - self.featureConv = nn.utils.spectral_norm(self.featureConv) - else: - self.norm = None - - self.activation = activation - - def forward(self, xs): - out = self.featureConv(xs) - if self.activation: - out = self.activation(out) - if self.norm is not None: - out = self.norm_layer(out) - return out - - -class VanillaDeconv2d(nn.Module): - def __init__( - self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, - groups=1, bias=True, norm="SN", activation=nn.LeakyReLU(0.2, inplace=True), - scale_factor=2 - ): - super().__init__() - self.conv = VanillaConv2d( - in_channels, out_channels, kernel_size, stride, padding, dilation, - groups, bias, norm, activation) - self.scale_factor = scale_factor - - def forward(self, xs): - xs_resized = F.interpolate(xs, scale_factor=self.scale_factor) - return self.conv(xs_resized) - - -class GatedConv2d(VanillaConv2d): - def __init__( - self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, - groups=1, bias=True, norm="SN", activation=nn.LeakyReLU(0.2, inplace=True) - ): - super().__init__( - in_channels, out_channels, kernel_size, stride, padding, dilation, - groups, bias, norm, activation - ) - self.gatingConv = nn.Conv2d( - in_channels, out_channels, kernel_size, - stride, self.padding, dilation, groups, bias) - if norm == 'SN': - self.gatingConv = nn.utils.spectral_norm(self.gatingConv) - self.sigmoid = nn.Sigmoid() - self.store_gated_values = False - - def gated(self, mask): - # return torch.clamp(mask, -1, 1) - out = self.sigmoid(mask) - if self.store_gated_values: - self.gated_values = out.detach().cpu() - return out - - def forward(self, xs): - gating = self.gatingConv(xs) - feature = self.featureConv(xs) - if self.activation: - feature = self.activation(feature) - out = self.gated(gating) * feature - if self.norm is not None: - out = self.norm_layer(out) - return out - - -class GatedDeconv2d(VanillaDeconv2d): - def __init__( - self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, - groups=1, bias=True, norm="SN", activation=nn.LeakyReLU(0.2, inplace=True), - scale_factor=2 - ): - super().__init__( - in_channels, out_channels, kernel_size, stride, padding, dilation, - groups, bias, norm, activation, scale_factor - ) - self.conv = GatedConv2d( - in_channels, out_channels, kernel_size, stride, padding, dilation, - groups, bias, norm, activation) - - -class PartialConv2d(VanillaConv2d): - def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, - groups=1, bias=True, norm="SN", activation=nn.LeakyReLU(0.2, inplace=True)): - super().__init__( - in_channels, out_channels, kernel_size, stride, padding, dilation, - groups, bias, norm, activation - ) - self.mask_sum_conv = nn.Conv2d(1, 1, kernel_size, - stride, self.padding, dilation, groups, False) - nn.init.constant_(self.mask_sum_conv.weight, 1.0) - - # mask conv needs not update - for param in self.mask_sum_conv.parameters(): - param.requires_grad = False - - def forward(self, input_tuple): - # http://masc.cs.gmu.edu/wiki/partialconv - # C(X) = W^T * X + b, C(0) = b, D(M) = 1 * M + 0 = sum(M) - # output = W^T* (M .* X) / sum(M) + b = [C(M .* X) – C(0)] / D(M) + C(0), if sum(M) != 0 - # = 0, if sum(M) == 0 - inp, mask = input_tuple - # print(inp.shape, mask.shape) - - # C(M .* X) - output = self.featureConv(mask * inp) - - # C(0) = b - if self.featureConv.bias is not None: - output_bias = self.featureConv.bias.view(1, -1, 1, 1) - else: - output_bias = torch.zeros([1, 1, 1, 1]).to(inp.device) - - # D(M) = sum(M) - with torch.no_grad(): - mask_sum = self.mask_sum_conv(mask) - - # find those sum(M) == 0 - no_update_holes = (mask_sum == 0) - - # Just to prevent devided by 0 - mask_sum_no_zero = mask_sum.masked_fill_(no_update_holes, 1.0) - - # output = [C(M .* X) – C(0)] / D(M) + C(0), if sum(M) != 0 - # = 0, if sum (M) == 0 - output = (output - output_bias) / mask_sum_no_zero + output_bias - output = output.masked_fill_(no_update_holes, 0.0) - - # create a new mask with only 1 or 0 - new_mask = torch.ones_like(mask_sum) - new_mask = new_mask.masked_fill_(no_update_holes, 0.0) - - if self.activation is not None: - output = self.activation(output) - if self.norm is not None: - output = self.norm_layer(output) - return output, new_mask - - -class PartialDeconv2d(nn.Module): - def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, - groups=1, bias=True, norm="SN", activation=nn.LeakyReLU(0.2, inplace=True), - scale_factor=2): - super().__init__() - self.conv = PartialConv2d( - in_channels, out_channels, kernel_size, stride, padding, dilation, - groups, bias, norm, activation) - self.scale_factor = scale_factor - - def forward(self, input_tuple): - inp, mask = input_tuple - inp_resized = F.interpolate(inp, scale_factor=self.scale_factor) - with torch.no_grad(): - mask_resized = F.interpolate(mask, scale_factor=self.scale_factor) - return self.conv((inp_resized, mask_resized)) diff --git a/spaces/oliver2023/chatgpt-on-wechat/lib/itchat/content.py b/spaces/oliver2023/chatgpt-on-wechat/lib/itchat/content.py deleted file mode 100644 index 41dc0b1b7b5437d63ecd56b2c2fa31e4d9a37e82..0000000000000000000000000000000000000000 --- a/spaces/oliver2023/chatgpt-on-wechat/lib/itchat/content.py +++ /dev/null @@ -1,14 +0,0 @@ -TEXT = 'Text' -MAP = 'Map' -CARD = 'Card' -NOTE = 'Note' -SHARING = 'Sharing' -PICTURE = 'Picture' -RECORDING = VOICE = 'Recording' -ATTACHMENT = 'Attachment' -VIDEO = 'Video' -FRIENDS = 'Friends' -SYSTEM = 'System' - -INCOME_MSG = [TEXT, MAP, CARD, NOTE, SHARING, PICTURE, - RECORDING, VOICE, ATTACHMENT, VIDEO, FRIENDS, SYSTEM] diff --git a/spaces/oliver2023/chatgpt-on-wechat/plugins/dungeon/__init__.py b/spaces/oliver2023/chatgpt-on-wechat/plugins/dungeon/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/othnielnaga/stabilityai-StableBeluga-7B/app.py b/spaces/othnielnaga/stabilityai-StableBeluga-7B/app.py deleted file mode 100644 index dff43d7b3c2aab8044a67c058d383fb8a696180b..0000000000000000000000000000000000000000 --- a/spaces/othnielnaga/stabilityai-StableBeluga-7B/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stabilityai/StableBeluga-7B").launch() \ No newline at end of file diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/models/asymmetricautoencoderkl.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/models/asymmetricautoencoderkl.md deleted file mode 100644 index c7b3ee9b5155914240ce865c309b05bcf5206a30..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/models/asymmetricautoencoderkl.md +++ /dev/null @@ -1,55 +0,0 @@ -# AsymmetricAutoencoderKL - -Improved larger variational autoencoder (VAE) model with KL loss for inpainting task: [Designing a Better Asymmetric VQGAN for StableDiffusion](https://arxiv.org/abs/2306.04632) by Zixin Zhu, Xuelu Feng, Dongdong Chen, Jianmin Bao, Le Wang, Yinpeng Chen, Lu Yuan, Gang Hua. - -The abstract from the paper is: - -*StableDiffusion is a revolutionary text-to-image generator that is causing a stir in the world of image generation and editing. Unlike traditional methods that learn a diffusion model in pixel space, StableDiffusion learns a diffusion model in the latent space via a VQGAN, ensuring both efficiency and quality. It not only supports image generation tasks, but also enables image editing for real images, such as image inpainting and local editing. However, we have observed that the vanilla VQGAN used in StableDiffusion leads to significant information loss, causing distortion artifacts even in non-edited image regions. To this end, we propose a new asymmetric VQGAN with two simple designs. Firstly, in addition to the input from the encoder, the decoder contains a conditional branch that incorporates information from task-specific priors, such as the unmasked image region in inpainting. Secondly, the decoder is much heavier than the encoder, allowing for more detailed recovery while only slightly increasing the total inference cost. The training cost of our asymmetric VQGAN is cheap, and we only need to retrain a new asymmetric decoder while keeping the vanilla VQGAN encoder and StableDiffusion unchanged. Our asymmetric VQGAN can be widely used in StableDiffusion-based inpainting and local editing methods. Extensive experiments demonstrate that it can significantly improve the inpainting and editing performance, while maintaining the original text-to-image capability. The code is available at https://github.com/buxiangzhiren/Asymmetric_VQGAN* - -Evaluation results can be found in section 4.1 of the original paper. - -## Available checkpoints - -* [https://huggingface.co/cross-attention/asymmetric-autoencoder-kl-x-1-5](https://huggingface.co/cross-attention/asymmetric-autoencoder-kl-x-1-5) -* [https://huggingface.co/cross-attention/asymmetric-autoencoder-kl-x-2](https://huggingface.co/cross-attention/asymmetric-autoencoder-kl-x-2) - -## Example Usage - -```python -from io import BytesIO -from PIL import Image -import requests -from diffusers import AsymmetricAutoencoderKL, StableDiffusionInpaintPipeline - - -def download_image(url: str) -> Image.Image: - response = requests.get(url) - return Image.open(BytesIO(response.content)).convert("RGB") - - -prompt = "a photo of a person" -img_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/celeba_hq_256.png" -mask_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/mask_256.png" - -image = download_image(img_url).resize((256, 256)) -mask_image = download_image(mask_url).resize((256, 256)) - -pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") -pipe.vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") -pipe.to("cuda") - -image = pipe(prompt=prompt, image=image, mask_image=mask_image).images[0] -image.save("image.jpeg") -``` - -## AsymmetricAutoencoderKL - -[[autodoc]] models.autoencoder_asym_kl.AsymmetricAutoencoderKL - -## AutoencoderKLOutput - -[[autodoc]] models.autoencoder_kl.AutoencoderKLOutput - -## DecoderOutput - -[[autodoc]] models.vae.DecoderOutput diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/stable_diffusion/adapter.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/stable_diffusion/adapter.md deleted file mode 100644 index 4c7415ddb02b43d030c429713bdfff60ba69c624..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/stable_diffusion/adapter.md +++ /dev/null @@ -1,258 +0,0 @@ - - -# Text-to-Image Generation with Adapter Conditioning - -## Overview - -[T2I-Adapter: Learning Adapters to Dig out More Controllable Ability for Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.08453) by Chong Mou, Xintao Wang, Liangbin Xie, Jian Zhang, Zhongang Qi, Ying Shan, Xiaohu Qie. - -Using the pretrained models we can provide control images (for example, a depth map) to control Stable Diffusion text-to-image generation so that it follows the structure of the depth image and fills in the details. - -The abstract of the paper is the following: - -*The incredible generative ability of large-scale text-to-image (T2I) models has demonstrated strong power of learning complex structures and meaningful semantics. However, relying solely on text prompts cannot fully take advantage of the knowledge learned by the model, especially when flexible and accurate structure control is needed. In this paper, we aim to ``dig out" the capabilities that T2I models have implicitly learned, and then explicitly use them to control the generation more granularly. Specifically, we propose to learn simple and small T2I-Adapters to align internal knowledge in T2I models with external control signals, while freezing the original large T2I models. In this way, we can train various adapters according to different conditions, and achieve rich control and editing effects. Further, the proposed T2I-Adapters have attractive properties of practical value, such as composability and generalization ability. Extensive experiments demonstrate that our T2I-Adapter has promising generation quality and a wide range of applications.* - -This model was contributed by the community contributor [HimariO](https://github.com/HimariO) ❤️ . - -## Available Pipelines: - -| Pipeline | Tasks | Demo -|---|---|:---:| -| [StableDiffusionAdapterPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_adapter.py) | *Text-to-Image Generation with T2I-Adapter Conditioning* | - -| [StableDiffusionXLAdapterPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_xl_adapter.py) | *Text-to-Image Generation with T2I-Adapter Conditioning on StableDiffusion-XL* | - - -## Usage example with the base model of StableDiffusion-1.4/1.5 - -In the following we give a simple example of how to use a *T2IAdapter* checkpoint with Diffusers for inference based on StableDiffusion-1.4/1.5. -All adapters use the same pipeline. - - 1. Images are first converted into the appropriate *control image* format. - 2. The *control image* and *prompt* are passed to the [`StableDiffusionAdapterPipeline`]. - -Let's have a look at a simple example using the [Color Adapter](https://huggingface.co/TencentARC/t2iadapter_color_sd14v1). - -```python -from diffusers.utils import load_image - -image = load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/t2i-adapter/color_ref.png") -``` - -![img](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/t2i-adapter/color_ref.png) - - -Then we can create our color palette by simply resizing it to 8 by 8 pixels and then scaling it back to original size. - -```python -from PIL import Image - -color_palette = image.resize((8, 8)) -color_palette = color_palette.resize((512, 512), resample=Image.Resampling.NEAREST) -``` - -Let's take a look at the processed image. - -![img](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/t2i-adapter/color_palette.png) - - -Next, create the adapter pipeline - -```py -import torch -from diffusers import StableDiffusionAdapterPipeline, T2IAdapter - -adapter = T2IAdapter.from_pretrained("TencentARC/t2iadapter_color_sd14v1", torch_dtype=torch.float16) -pipe = StableDiffusionAdapterPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", - adapter=adapter, - torch_dtype=torch.float16, -) -pipe.to("cuda") -``` - -Finally, pass the prompt and control image to the pipeline - -```py -# fix the random seed, so you will get the same result as the example -generator = torch.manual_seed(7) - -out_image = pipe( - "At night, glowing cubes in front of the beach", - image=color_palette, - generator=generator, -).images[0] -``` - -![img](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/t2i-adapter/color_output.png) - -## Usage example with the base model of StableDiffusion-XL - -In the following we give a simple example of how to use a *T2IAdapter* checkpoint with Diffusers for inference based on StableDiffusion-XL. -All adapters use the same pipeline. - - 1. Images are first downloaded into the appropriate *control image* format. - 2. The *control image* and *prompt* are passed to the [`StableDiffusionXLAdapterPipeline`]. - -Let's have a look at a simple example using the [Sketch Adapter](https://huggingface.co/Adapter/t2iadapter/tree/main/sketch_sdxl_1.0). - -```python -from diffusers.utils import load_image - -sketch_image = load_image("https://huggingface.co/Adapter/t2iadapter/resolve/main/sketch.png").convert("L") -``` - -![img](https://huggingface.co/Adapter/t2iadapter/resolve/main/sketch.png) - -Then, create the adapter pipeline - -```py -import torch -from diffusers import ( - T2IAdapter, - StableDiffusionXLAdapterPipeline, - DDPMScheduler -) -from diffusers.models.unet_2d_condition import UNet2DConditionModel - -model_id = "stabilityai/stable-diffusion-xl-base-1.0" -adapter = T2IAdapter.from_pretrained("Adapter/t2iadapter", subfolder="sketch_sdxl_1.0",torch_dtype=torch.float16, adapter_type="full_adapter_xl") -scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler") - -pipe = StableDiffusionXLAdapterPipeline.from_pretrained( - model_id, adapter=adapter, safety_checker=None, torch_dtype=torch.float16, variant="fp16", scheduler=scheduler -) - -pipe.to("cuda") -``` - -Finally, pass the prompt and control image to the pipeline - -```py -# fix the random seed, so you will get the same result as the example -generator = torch.Generator().manual_seed(42) - -sketch_image_out = pipe( - prompt="a photo of a dog in real world, high quality", - negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality", - image=sketch_image, - generator=generator, - guidance_scale=7.5 -).images[0] -``` - -![img](https://huggingface.co/Adapter/t2iadapter/resolve/main/sketch_output.png) - -## Available checkpoints - -Non-diffusers checkpoints can be found under [TencentARC/T2I-Adapter](https://huggingface.co/TencentARC/T2I-Adapter/tree/main/models). - -### T2I-Adapter with Stable Diffusion 1.4 - -| Model Name | Control Image Overview| Control Image Example | Generated Image Example | -|---|---|---|---| -|[TencentARC/t2iadapter_color_sd14v1](https://huggingface.co/TencentARC/t2iadapter_color_sd14v1)
            *Trained with spatial color palette* | A image with 8x8 color palette.||| -|[TencentARC/t2iadapter_canny_sd14v1](https://huggingface.co/TencentARC/t2iadapter_canny_sd14v1)
            *Trained with canny edge detection* | A monochrome image with white edges on a black background.||| -|[TencentARC/t2iadapter_sketch_sd14v1](https://huggingface.co/TencentARC/t2iadapter_sketch_sd14v1)
            *Trained with [PidiNet](https://github.com/zhuoinoulu/pidinet) edge detection* | A hand-drawn monochrome image with white outlines on a black background.||| -|[TencentARC/t2iadapter_depth_sd14v1](https://huggingface.co/TencentARC/t2iadapter_depth_sd14v1)
            *Trained with Midas depth estimation* | A grayscale image with black representing deep areas and white representing shallow areas.||| -|[TencentARC/t2iadapter_openpose_sd14v1](https://huggingface.co/TencentARC/t2iadapter_openpose_sd14v1)
            *Trained with OpenPose bone image* | A [OpenPose bone](https://github.com/CMU-Perceptual-Computing-Lab/openpose) image.||| -|[TencentARC/t2iadapter_keypose_sd14v1](https://huggingface.co/TencentARC/t2iadapter_keypose_sd14v1)
            *Trained with mmpose skeleton image* | A [mmpose skeleton](https://github.com/open-mmlab/mmpose) image.||| -|[TencentARC/t2iadapter_seg_sd14v1](https://huggingface.co/TencentARC/t2iadapter_seg_sd14v1)
            *Trained with semantic segmentation* | An [custom](https://github.com/TencentARC/T2I-Adapter/discussions/25) segmentation protocol image.|| | -|[TencentARC/t2iadapter_canny_sd15v2](https://huggingface.co/TencentARC/t2iadapter_canny_sd15v2)|| -|[TencentARC/t2iadapter_depth_sd15v2](https://huggingface.co/TencentARC/t2iadapter_depth_sd15v2)|| -|[TencentARC/t2iadapter_sketch_sd15v2](https://huggingface.co/TencentARC/t2iadapter_sketch_sd15v2)|| -|[TencentARC/t2iadapter_zoedepth_sd15v1](https://huggingface.co/TencentARC/t2iadapter_zoedepth_sd15v1)|| -|[Adapter/t2iadapter, subfolder='sketch_sdxl_1.0'](https://huggingface.co/Adapter/t2iadapter/tree/main/sketch_sdxl_1.0)|| -|[Adapter/t2iadapter, subfolder='canny_sdxl_1.0'](https://huggingface.co/Adapter/t2iadapter/tree/main/canny_sdxl_1.0)|| -|[Adapter/t2iadapter, subfolder='openpose_sdxl_1.0'](https://huggingface.co/Adapter/t2iadapter/tree/main/openpose_sdxl_1.0)|| - -## Combining multiple adapters - -[`MultiAdapter`] can be used for applying multiple conditionings at once. - -Here we use the keypose adapter for the character posture and the depth adapter for creating the scene. - -```py -import torch -from PIL import Image -from diffusers.utils import load_image - -cond_keypose = load_image( - "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/t2i-adapter/keypose_sample_input.png" -) -cond_depth = load_image( - "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/t2i-adapter/depth_sample_input.png" -) -cond = [[cond_keypose, cond_depth]] - -prompt = ["A man walking in an office room with a nice view"] -``` - -The two control images look as such: - -![img](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/t2i-adapter/keypose_sample_input.png) -![img](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/t2i-adapter/depth_sample_input.png) - - -`MultiAdapter` combines keypose and depth adapters. - -`adapter_conditioning_scale` balances the relative influence of the different adapters. - -```py -from diffusers import StableDiffusionAdapterPipeline, MultiAdapter - -adapters = MultiAdapter( - [ - T2IAdapter.from_pretrained("TencentARC/t2iadapter_keypose_sd14v1"), - T2IAdapter.from_pretrained("TencentARC/t2iadapter_depth_sd14v1"), - ] -) -adapters = adapters.to(torch.float16) - -pipe = StableDiffusionAdapterPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", - torch_dtype=torch.float16, - adapter=adapters, -) - -images = pipe(prompt, cond, adapter_conditioning_scale=[0.8, 0.8]) -``` - -![img](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/t2i-adapter/keypose_depth_sample_output.png) - - -## T2I Adapter vs ControlNet - -T2I-Adapter is similar to [ControlNet](https://huggingface.co/docs/diffusers/main/en/api/pipelines/controlnet). -T2i-Adapter uses a smaller auxiliary network which is only run once for the entire diffusion process. -However, T2I-Adapter performs slightly worse than ControlNet. - -## StableDiffusionAdapterPipeline -[[autodoc]] StableDiffusionAdapterPipeline - - all - - __call__ - - enable_attention_slicing - - disable_attention_slicing - - enable_vae_slicing - - disable_vae_slicing - - enable_xformers_memory_efficient_attention - - disable_xformers_memory_efficient_attention - -## StableDiffusionXLAdapterPipeline -[[autodoc]] StableDiffusionXLAdapterPipeline - - all - - __call__ - - enable_attention_slicing - - disable_attention_slicing - - enable_vae_slicing - - disable_vae_slicing - - enable_xformers_memory_efficient_attention - - disable_xformers_memory_efficient_attention diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/textual_inversion/README.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/textual_inversion/README.md deleted file mode 100644 index 21bca526b5d2e55ee5dd6e4da3858fe66d649f9c..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/textual_inversion/README.md +++ /dev/null @@ -1,144 +0,0 @@ -## Textual Inversion fine-tuning example - -[Textual inversion](https://arxiv.org/abs/2208.01618) is a method to personalize text2image models like stable diffusion on your own images using just 3-5 examples. -The `textual_inversion.py` script shows how to implement the training procedure and adapt it for stable diffusion. - -## Running on Colab - -Colab for training -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) - -Colab for inference -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) - -## Running locally with PyTorch -### Installing the dependencies - -Before running the scripts, make sure to install the library's training dependencies: - -**Important** - -To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: -```bash -git clone https://github.com/huggingface/diffusers -cd diffusers -pip install . -``` - -Then cd in the example folder and run -```bash -pip install -r requirements.txt -``` - -And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: - -```bash -accelerate config -``` - -### Cat toy example - -First, let's login so that we can upload the checkpoint to the Hub during training: - -```bash -huggingface-cli login -``` - -Now let's get our dataset. For this example we will use some cat images: https://huggingface.co/datasets/diffusers/cat_toy_example . - -Let's first download it locally: - -```py -from huggingface_hub import snapshot_download - -local_dir = "./cat" -snapshot_download("diffusers/cat_toy_example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes") -``` - -This will be our training data. -Now we can launch the training using - -**___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** - -```bash -export MODEL_NAME="runwayml/stable-diffusion-v1-5" -export DATA_DIR="./cat" - -accelerate launch textual_inversion.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --train_data_dir=$DATA_DIR \ - --learnable_property="object" \ - --placeholder_token="" --initializer_token="toy" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=4 \ - --max_train_steps=3000 \ - --learning_rate=5.0e-04 --scale_lr \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --push_to_hub \ - --output_dir="textual_inversion_cat" -``` - -A full training run takes ~1 hour on one V100 GPU. - -**Note**: As described in [the official paper](https://arxiv.org/abs/2208.01618) -only one embedding vector is used for the placeholder token, *e.g.* `""`. -However, one can also add multiple embedding vectors for the placeholder token -to inclease the number of fine-tuneable parameters. This can help the model to learn -more complex details. To use multiple embedding vectors, you can should define `--num_vectors` -to a number larger than one, *e.g.*: -``` ---num_vectors 5 -``` - -The saved textual inversion vectors will then be larger in size compared to the default case. - -### Inference - -Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `placeholder_token` in your prompt. - -```python -from diffusers import StableDiffusionPipeline -import torch - -model_id = "path-to-your-trained-model" -pipe = StableDiffusionPipeline.from_pretrained(model_id,torch_dtype=torch.float16).to("cuda") - -prompt = "A backpack" - -image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] - -image.save("cat-backpack.png") -``` - - -## Training with Flax/JAX - -For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script. - -Before running the scripts, make sure to install the library's training dependencies: - -```bash -pip install -U -r requirements_flax.txt -``` - -```bash -export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" -export DATA_DIR="path-to-dir-containing-images" - -python textual_inversion_flax.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --train_data_dir=$DATA_DIR \ - --learnable_property="object" \ - --placeholder_token="" --initializer_token="toy" \ - --resolution=512 \ - --train_batch_size=1 \ - --max_train_steps=3000 \ - --learning_rate=5.0e-04 --scale_lr \ - --output_dir="textual_inversion_cat" -``` -It should be at least 70% faster than the PyTorch script with the same configuration. - -### Training with xformers: -You can enable memory efficient attention by [installing xFormers](https://github.com/facebookresearch/xformers#installing-xformers) and padding the `--enable_xformers_memory_efficient_attention` argument to the script. This is not available with the Flax/JAX implementation. diff --git a/spaces/parkyzh/bingo/src/components/header.tsx b/spaces/parkyzh/bingo/src/components/header.tsx deleted file mode 100644 index dc298b722154d1ac6d7a7e148204605562d6cc58..0000000000000000000000000000000000000000 --- a/spaces/parkyzh/bingo/src/components/header.tsx +++ /dev/null @@ -1,12 +0,0 @@ -import * as React from 'react' -import { UserMenu } from './user-menu' - -export async function Header() { - return ( -
            -
            - -
            -
            - ) -} diff --git a/spaces/patvfb/worldofshares/README.md b/spaces/patvfb/worldofshares/README.md deleted file mode 100644 index 2ca369aac5a0b0b4412cd9d66f067dea1864b3ad..0000000000000000000000000000000000000000 --- a/spaces/patvfb/worldofshares/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Worldofshares -emoji: 🌍 -colorFrom: white -colorTo: blue -sdk: gradio -sdk_version: 3.33.1 -app_file: app.py -pinned: false -duplicated_from: null ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/tool/ganseg.py b/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/tool/ganseg.py deleted file mode 100644 index e6225736d336cf75aedb8a7d7aec1229b497f6a9..0000000000000000000000000000000000000000 --- a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/tool/ganseg.py +++ /dev/null @@ -1,89 +0,0 @@ -''' -A simple tool to generate sample of output of a GAN, -and apply semantic segmentation on the output. -''' - -import torch, numpy, os, argparse, sys, shutil -from PIL import Image -from torch.utils.data import TensorDataset -from netdissect.zdataset import standard_z_sample, z_dataset_for_model -from netdissect.progress import default_progress, verbose_progress -from netdissect.autoeval import autoimport_eval -from netdissect.workerpool import WorkerBase, WorkerPool -from netdissect.nethook import edit_layers, retain_layers -from netdissect.segviz import segment_visualization -from netdissect.segmenter import UnifiedParsingSegmenter -from scipy.io import savemat - -def main(): - parser = argparse.ArgumentParser(description='GAN output segmentation util') - parser.add_argument('--model', type=str, default= - 'netdissect.proggan.from_pth_file("' + - 'models/karras/churchoutdoor_lsun.pth")', - help='constructor for the model to test') - parser.add_argument('--outdir', type=str, default='images', - help='directory for image output') - parser.add_argument('--size', type=int, default=100, - help='number of images to output') - parser.add_argument('--seed', type=int, default=1, - help='seed') - parser.add_argument('--quiet', action='store_true', default=False, - help='silences console output') - #if len(sys.argv) == 1: - # parser.print_usage(sys.stderr) - # sys.exit(1) - args = parser.parse_args() - verbose_progress(not args.quiet) - - # Instantiate the model - model = autoimport_eval(args.model) - - # Make the standard z - z_dataset = z_dataset_for_model(model, size=args.size) - - # Make the segmenter - segmenter = UnifiedParsingSegmenter() - - # Write out text labels - labels, cats = segmenter.get_label_and_category_names() - with open(os.path.join(args.outdir, 'labels.txt'), 'w') as f: - for i, (label, cat) in enumerate(labels): - f.write('%s %s\n' % (label, cat)) - - # Move models to cuda - model.cuda() - - batch_size = 10 - progress = default_progress() - dirname = args.outdir - - with torch.no_grad(): - # Pass 2: now generate images - z_loader = torch.utils.data.DataLoader(z_dataset, - batch_size=batch_size, num_workers=2, - pin_memory=True) - for batch_num, [z] in enumerate(progress(z_loader, - desc='Saving images')): - z = z.cuda() - start_index = batch_num * batch_size - tensor_im = model(z) - byte_im = ((tensor_im + 1) / 2 * 255).clamp(0, 255).byte().permute( - 0, 2, 3, 1).cpu() - seg = segmenter.segment_batch(tensor_im) - for i in range(len(tensor_im)): - index = i + start_index - filename = os.path.join(dirname, '%d_img.jpg' % index) - Image.fromarray(byte_im[i].numpy()).save( - filename, optimize=True, quality=100) - filename = os.path.join(dirname, '%d_seg.mat' % index) - savemat(filename, dict(seg=seg[i].cpu().numpy())) - filename = os.path.join(dirname, '%d_seg.png' % index) - Image.fromarray(segment_visualization(seg[i].cpu().numpy(), - tensor_im.shape[2:])).save(filename) - srcdir = os.path.realpath( - os.path.join(os.getcwd(), os.path.dirname(__file__))) - shutil.copy(os.path.join(srcdir, 'lightbox.html'), - os.path.join(dirname, '+lightbox.html')) - -if __name__ == '__main__': - main() diff --git a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/train_onelayer_inv.py b/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/train_onelayer_inv.py deleted file mode 100644 index 95a0649cfc5e86225ae54fe1c650d48d4910cc6f..0000000000000000000000000000000000000000 --- a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/train_onelayer_inv.py +++ /dev/null @@ -1,247 +0,0 @@ -import torch, multiprocessing, itertools, os, shutil, PIL, argparse, numpy -from torch.nn.functional import mse_loss, cosine_similarity -from collections import defaultdict -from . import encoder_net, setting -from . import nethook, zdataset, pbar -from . import proggan, customnet, parallelfolder -from .encoder_loss import cor_square_error -from torchvision import transforms, models -from .pidfile import exit_if_job_done, mark_job_done - -parser = argparse.ArgumentParser() -parser.add_argument('--lr', type=float, help='Learning rate', default=1e-4) -parser.add_argument('--invert_layer', type=int, help='Layer to invert', - default=1) -parser.add_argument('--model', type=str, help='Dataset being modeled', - default='church') -args = parser.parse_args() - -global_seed = 1 -invert_layer = args.invert_layer -expname = 'invert_layer_%d_cse' % invert_layer -expdir = os.path.join('results', args.model, expname) -os.makedirs(expdir, exist_ok=True) - -lr_milestones = [20, 60] # Reduce learning rate after 20 and 60 epochs -if invert_layer == 15 or invert_layer == 2: - lr_milestones = [60, 80] - -def main(): - torch.manual_seed(global_seed) - pbar.print('Training %s' % expdir) - - # Load a progressive GAN - generator = setting.load_proggan(args.model) - # Make a subset model with only some layers. - if invert_layer == 1: - s_maker = IdentityLayer() - else: - s_maker = nethook.subsequence(generator, - last_layer='layer%d' % (invert_layer - 1)) - r_maker = nethook.subsequence(generator, - first_layer='layer%d' % invert_layer, - last_layer='layer%d' % invert_layer) - r_decoder = nethook.subsequence(generator, - first_layer='layer%d' % (invert_layer + 1)) - - # Make an encoder model. - if invert_layer == 1: - encoder = encoder_net.Layer1toZNormEncoder() - else: - channels = [512, # "layer0" is z - 512, 512, 512, 512, 512, 512, 256, 256, 128, 128, 64, 64, 32, 32, 3] - encoder = encoder_net.LayerNormEncoder( - channels[invert_layer], - channels[invert_layer - 1], - stride=(2 if (invert_layer % 2 == 1 and invert_layer < 15) else 1), - skip_conv3=(invert_layer == 2), - skip_pnorm=(invert_layer == 15)) - - # Move models to GPU - for m in [generator, encoder, s_maker, r_maker, r_decoder]: - m.cuda() - - # Set up a training data loader: unending batches of random z. - batch_size = 32 - train_loader = training_loader(generator, batch_size) - - # Test data loader is finite, fixed set of z. - test_loader = testing_loader(generator, batch_size) - - # Set up optimizer - set_requires_grad(False, generator, s_maker, r_maker, r_decoder) - learning_rate = args.lr - optimizer = torch.optim.Adam(encoder.parameters(), lr=learning_rate) - scheduler = torch.optim.lr_scheduler.MultiStepLR( - optimizer, milestones=lr_milestones, gamma=0.1) - - epoch_batches = 100 - num_epochs = 100 - # img_elems = 256*256*3 - # rep_elems = 8*8*512 - # alpha = float(rep_elems) / (rep_elems + img_elems) - for epoch, epoch_loader in enumerate(pbar( - epoch_grouper(train_loader, epoch_batches, num_epochs=1+num_epochs), - total=(1+num_epochs))): - # Training loop (for 0th epoch, do no training, just testing) - if epoch > 0: - for (z_batch,) in pbar(epoch_loader, total=epoch_batches): - (z_batch,) = [d.cuda() for d in [z_batch]] - loss = encoder_loss(z_batch, s_maker, r_maker, encoder) - loss.backward() - pbar.post(l=loss.item()) - optimizer.step() - scheduler.step() - # Testing loop - with torch.no_grad(): - losses = defaultdict(float) - count = 0 - for i, (z_batch,) in enumerate(pbar(test_loader)): - (z_batch,) = [d.cuda() for d in [z_batch]] - nb = len(z_batch) - # Some other debugging losses - count += nb - losses['loss'] += nb * ( - encoder_loss(z_batch, s_maker, r_maker, encoder).item()) - for name, mloss in monitor_losses( - z_batch, encoder, s_maker, r_maker, r_decoder).items(): - losses[name] += nb * mloss.item() - if epoch % 10 == 0 and i == 0: - visualize_results(epoch, z_batch, - encoder, s_maker, r_maker, r_decoder) - losses = { name: loss / count for name, loss in losses.items() } - logline = '%d ' % epoch + ' '.join("%s=%4g" % (name, losses[name]) - for name in sorted(losses.keys())) - pbar.print(logline) - with open(os.path.join(expdir, 'log.txt'), 'a') as f: - f.write(logline + '\n') - if epoch % 10 == 0: - save_checkpoint( - epoch=epoch, - state_dict=encoder.state_dict(), - lr=learning_rate, - optimizer=optimizer.state_dict(), - **losses) - if epoch == num_epochs: - break - -def save_checkpoint(**kwargs): - dirname = os.path.join(expdir, 'snapshots') - os.makedirs(dirname, exist_ok=True) - filename = 'epoch_%d.pth.tar' % kwargs['epoch'] - torch.save(kwargs, os.path.join(dirname, filename)) - -def visualize_results(epoch, z_batch, encoder, s_maker, r_maker, r_decoder): - dirname = os.path.join(expdir, 'images') - os.makedirs(dirname, exist_ok=True) - true_s = s_maker(z_batch) - true_r = r_maker(true_s) - image_batch = r_decoder(true_r) - estimated_s = encoder(true_r) - estimated_r = r_maker(estimated_s) - estimated_image = r_decoder(estimated_r) - # For 6 images of the batch, save four images, plus an .npz file. - for i in range(min(len(z_batch), 6)): - for name, im in [ - ('epoch_%d_%d_g.png', image_batch), - ('epoch_%d_%d_r.png', estimated_image), - ]: - save_tensor_image(im[i], os.path.join(dirname, name % (epoch, i))) - numpy.savez(os.path.join(dirname, 'epoch_%d_%d.npz' % (epoch, i)), - true_z=z_batch[i].cpu().numpy(), - true_s=true_s[i].cpu().numpy(), - true_r=true_r[i].cpu().numpy(), - estimated_s=estimated_s[i].cpu().numpy(), - estimated_r=estimated_r[i].cpu().numpy()) - shutil.copy(os.path.join(os.path.dirname(__file__), 'lightbox.html'), - os.path.join(dirname, '+lightbox.html')) - -def save_tensor_image(img, filename): - np_data = ((img.permute(1, 2, 0) / 2 + 0.5) * 255).byte().cpu().numpy() - PIL.Image.fromarray(np_data).save(filename) - -def encoder_loss(z_batch, s_maker, r_maker, encoder): - true_s = s_maker(z_batch) - true_r = r_maker(true_s) - recovered_s = encoder(true_r) - recovered_r = r_maker(recovered_s) - return (cor_square_error(true_s, recovered_s) - + 0.01 * cor_square_error(true_r, recovered_r)) - -def monitor_losses(z_batch, encoder, s_maker, r_maker, r_decoder): - true_s = s_maker(z_batch) - true_r = r_maker(true_s) - true_image = r_decoder(true_r) - recovered_s = encoder(true_r) - recovered_r = r_maker(recovered_s) - recovered_image = r_decoder(recovered_r) - return dict( - loss_cs=cor_square_error(true_s, recovered_s), - loss_cr=cor_square_error(true_r, recovered_r), - loss_p=mse_loss(true_image, recovered_image)) - -def training_loader(z_generator, batch_size): - ''' - Returns an infinite generator that runs through randomized z - batches, forever. - ''' - g_epoch = 1 - while True: - z_data = zdataset.z_dataset_for_model( - z_generator, size=10000, seed=g_epoch + global_seed) - dataloader = torch.utils.data.DataLoader( - z_data, - shuffle=False, - batch_size=batch_size, - num_workers=10, - pin_memory=True) - for batch in dataloader: - yield batch - g_epoch += 1 - -def testing_loader(z_generator, batch_size): - ''' - Returns an a short iterator that returns a small set of test data. - ''' - z_data = zdataset.z_dataset_for_model( - z_generator, size=1000, seed=global_seed) - dataloader = torch.utils.data.DataLoader( - z_data, - shuffle=False, - batch_size=batch_size, - num_workers=10, - pin_memory=True) - return dataloader - -def epoch_grouper(loader, epoch_size, num_epochs=None): - ''' - To use with the infinite training loader: groups the training data - batches into epochs of the given size. - ''' - it = iter(loader) - epoch = 0 - while True: - chunk_it = itertools.islice(it, epoch_size) - try: - first_el = next(chunk_it) - except StopIteration: - return - yield itertools.chain((first_el,), chunk_it) - epoch += 1 - if num_epochs is not None and epoch >= num_epochs: - return - -def set_requires_grad(requires_grad, *models): - for model in models: - if model is not None: - for param in model.parameters(): - param.requires_grad = requires_grad - -class IdentityLayer(torch.nn.Module): - def forward(self, x): - return x - -if __name__ == '__main__': - exit_if_job_done(expdir) - main() - mark_job_done(expdir) diff --git a/spaces/pinkq/Newbing/src/state/index.ts b/spaces/pinkq/Newbing/src/state/index.ts deleted file mode 100644 index 272106d619c69f124ea8dd0f10872ab4840152d7..0000000000000000000000000000000000000000 --- a/spaces/pinkq/Newbing/src/state/index.ts +++ /dev/null @@ -1,118 +0,0 @@ -import { BingWebBot } from '@/lib/bots/bing' -import { BingConversationStyle, ChatMessageModel, BotId } from '@/lib/bots/bing/types' -import { nanoid } from '@/lib/utils' -import { atom } from 'jotai' -import { atomWithImmer } from 'jotai-immer' -import { atomWithStorage } from 'jotai/utils' -import { atomFamily } from 'jotai/utils' -import { atomWithHash, atomWithLocation } from 'jotai-location' - -const initialMessages: ChatMessageModel[] = [ - { author: 'system', text: 'conversation between user and robot', id: '1' }, - { author: 'user', text: '销量最高的 3 种宠物吸尘器有哪些优点和缺点? ', id: '2' }, - { - author: 'bot', text: ` -您好,这是必应。根据网上的搜索结果,我为您找到了以下三款销量最高的宠物吸尘器,以及它们的优点和缺点: - -- **戴森Dyson V10轻量版 Digital Slim Fluffy无线吸尘器**[^1^] [^3^]:这款吸尘器专为宠物家庭设计,拥有强大吸力和多功能吸头,能够有效清除宠物毛发和灰尘。 - - 优点:轻便易用,续航时间长,过滤系统先进,能够有效去除过敏原; - - 缺点:价格较高,噪音较大,需要定期清洁滤网。 -- **飞利浦FC6908/82(W7)无线吸尘器**[^2^] [^4^]:这是一款手持式干吸无线吸尘器,专门为宠物家庭设计。它具有75分钟的超高续航,适合大户型;除螨涡轮刷,可以深度清洁床单被罩,免受螨虫侵害;吸拖一体设计,280ml大水箱,还可以往水箱里加入清洁剂。 - - 优点:清洁性能强劲,操作方便,适用多种场景; - - 缺点:噪音较大,达到84dB(A);尘杯容量较小,只有0.6L;充电时长较长,需要5小时。 -- **必胜(Bissell) 宠物吸尘器**[^2^]:这是一款便携式手持真空吸尘器,专为清理宠物毛发而设计。它配备特殊的橡胶喷嘴和软毛刷头,可以轻松拾取宠物毛发和灰尘;几乎静音运行。 - - 优点:轻巧易用,重量只有1.36kg;电池可充电可更换; - - 缺点:续航时间不稳定,在15-30分钟之间浮动;吸力不够强劲,在地毯或沙发上可能效果不佳;没有过滤系统或过滤网。 - -希望这些信息对您有所帮助。如果您还有其他问题,请随时提问。😊 - `, id: '3' }, - { author: 'user', text: '今天的新闻', id: '4' }, - { - author: 'bot', text: `你好,这是Bing。我根据你的搜索查询“今天的新闻”为你找到了以下信息: - - # 中国新闻 - - 《求是》杂志发表习近平总书记重要文章《深化党和国家机构改革 推进国家治理体系和治理能力现代化》[^1^] - - 梦之队第5金! 全红婵陈芋汐女双10米台夺冠[^1^] - - 中央气象台7月16日18时发布台风橙色预警[^1^] - - 贵州石阡:暑期旅游带动乡村振兴[^1^] - - 激活大科学装置的“人才红利”[^1^] - - 聚才用才留才 让希望的田野成为智慧的田野[^1^] - - 科学家发现粮食系统增效可有效支撑我国碳中和[^1^] - - 成都以赛为媒提升城市美誉度[^1^] - - 香江观澜:“抢人才”成效显著 香港竞争力依旧[^1^] - - 浙江建德:新安江上享清凉[^1^] - - 定向资金持续精准滴灌 高质量发展力量不断积累[^1^] - - 企业信心恢复加大中长期投资 为稳增长奠定基础[^1^] - - 武汉渡江节开赛 千名“泳士”竞渡长江[^1^] - - 金融更加重视对实体经济支持 降低融资成本提振市场信心[^1^] - - 大运来了丨成都迎大运 全民健身动起来[^1^] - - 藏在高校里的宝藏博物馆[^1^] - - 中国汽车工业用70年赢得三个“全球第一”[^1^] - - 辽宁阜新市委常委、副市长赖化平涉嫌严重违纪违法接受审查调查[^1^] - - # 国际新闻 - - 联合国秘书长:可持续发展面临风险 各国需加强行动[^2^] - - 国际航运业加快绿色转型[^2^] - - 美企反对收紧对华芯片出口限制[^2^] - - 欧洲加大气候科技领域投资[^2^] - - 中企助力丹麦发展清洁能源[^2^] - - 中国代表呼吁国际社会共同努力防止乌克兰局势失控[^2^] - - 中国和阿尔及利亚共同构建新型国际关系典范[^2^] - - 以上信息仅供参考,具体详情请点击以下链接查看: - - [1] [中国新闻_央视网(cctv.com)](https://news.cctv.com/china/) - [2] [人民网_网上的人民日报](http://www.people.com.cn/)`, id: '5' }, - { author: 'user', text: '写一个快排', id: '6' }, - { author: 'bot', text: "你好,这是Bing。我可以帮你用python写一个快排。快速排序是一种基于分治思想的排序算法,它的基本思想是:从待排序的数组中选择一个基准元素,将数组分为两个子数组,使得左边的子数组中的元素都小于或等于基准元素,右边的子数组中的元素都大于或等于基准元素,然后对两个子数组递归地进行快速排序,最后合并结果。下面是用python实现的快速排序的代码:\n\n```python\n# 定义一个函数,用于交换数组中的两个元素\ndef swap(arr, i, j):\n temp = arr[i]\n arr[i] = arr[j]\n arr[j] = temp\n\n# 定义一个函数,用于对数组的一段区间进行划分\ndef partition(arr, low, high):\n # 选择最右边的元素作为基准元素\n pivot = arr[high]\n # 初始化一个指针,指向左边区间的最后一个位置\n i = low - 1\n # 遍历左边区间中的每个元素\n for j in range(low, high):\n # 如果当前元素小于或等于基准元素\n if arr[j] <= pivot:\n # 将指针右移一位\n i += 1\n # 将指针所指的元素与当前元素交换\n swap(arr, i, j)\n # 将基准元素与指针右边的第一个元素交换\n swap(arr, i + 1, high)\n # 返回基准元素的最终位置\n return i + 1\n\n# 定义一个函数,用于对数组进行快速排序\ndef quick_sort(arr, low, high):\n # 如果区间长度大于1\n if low < high:\n # 对区间进行划分,得到基准元素的位置\n p = partition(arr, low, high)\n # 对左边子区间进行快速排序\n quick_sort(arr, low, p - 1)\n # 对右边子区间进行快速排序\n quick_sort(arr, p + 1, high)\n\n# 测试代码\narr = [5, 3, 7, 2, 9, 1, 4, 6, 8]\nprint(\"原始数组:\", arr)\nquick_sort(arr, 0, len(arr) - 1)\nprint(\"排序后的数组:\", arr)\n```", id: '7' }, - { - author: 'bot', text: "好的,我会尝试画一只猫。\n > ![一只猫](https://tse2.mm.bing.net/th/id/OIG.jz34V0PNVkPC229h9spV?w=270&h=270&c=6&r=0&o=5&dpr=1.5&pid=ImgGn)![一只猫](https://tse1.mm.bing.net/th/id/OIG.6g7d.XLZMP_iwAByLhvo?w=270&h=270&c=6&r=0&o=5&dpr=1.5&pid=ImgGn)![一只猫](https://tse2.mm.bing.net/th/id/OIG.iAxF4ekekYn7sZw9SmU6?w=270&h=270&c=6&r=0&o=5&dpr=1.5&pid=ImgGn)![一只猫](https://tse4.mm.bing.net/th/id/OIG.qDnzeSKzUCeJcrBqc5mX?w=270&h=270&c=6&r=0&o=5&dpr=1.5&pid=ImgGn)", - id: '8' - } -] - -export const GreetMessages = [ - '谢谢你! 知道你什么时候准备好继续前进总是很有帮助的。我现在能为你回答什么问题?', - '重新开始总是很棒。问我任何问题!', - '当然,我很乐意重新开始。我现在可以为你提供哪些帮助?', - '当然,我已准备好进行新的挑战。我现在可以为你做什么?', - '很好,让我们来更改主题。你在想什么?', - '不用担心,我很高兴尝试一些新内容。我现在可以为你回答什么问题?', - '好的,我准备好了!感谢重置。我们应该了解哪些内容?', - '感谢刷新!你有新的话题吗?', - '明白了,让我们重新开始。接下来应该讨论什么?', - '下一步!我可以为你做什么?', - '好的,我已准备好新话题。我们应该一起了解哪些内容?' -] - -export const bingConversationStyleAtom = atomWithStorage('bingConversationStyle', BingConversationStyle.Creative, undefined, { unstable_getOnInit: true }) -export const voiceAtom = atomWithStorage('enableTTS', false, undefined, { unstable_getOnInit: true }) - -type Param = { botId: BotId; page: string } - -const createBotInstance = () => { - return new BingWebBot({ - cookie: ' ', - ua: ' ', - }) -} - -export const chatFamily = atomFamily( - (param: Param) => { - return atomWithImmer({ - botId: param.botId, - bot: createBotInstance(), - messages: [] as ChatMessageModel[], - generatingMessageId: '', - abortController: undefined as AbortController | undefined, - conversationId: nanoid(), - }) - }, - (a, b) => a.botId === b.botId && a.page === b.page, -) - -export const hashAtom = atomWithHash('dialog', '') - -export const locationAtom = atomWithLocation() - -export const voiceListenAtom = atom(false) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/cli/__init__.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/cli/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/fields.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/fields.py deleted file mode 100644 index 9d630f491d9a39644ae65564dac88eb51f0bbe78..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/fields.py +++ /dev/null @@ -1,274 +0,0 @@ -from __future__ import absolute_import - -import email.utils -import mimetypes -import re - -from .packages import six - - -def guess_content_type(filename, default="application/octet-stream"): - """ - Guess the "Content-Type" of a file. - - :param filename: - The filename to guess the "Content-Type" of using :mod:`mimetypes`. - :param default: - If no "Content-Type" can be guessed, default to `default`. - """ - if filename: - return mimetypes.guess_type(filename)[0] or default - return default - - -def format_header_param_rfc2231(name, value): - """ - Helper function to format and quote a single header parameter using the - strategy defined in RFC 2231. - - Particularly useful for header parameters which might contain - non-ASCII values, like file names. This follows - `RFC 2388 Section 4.4 `_. - - :param name: - The name of the parameter, a string expected to be ASCII only. - :param value: - The value of the parameter, provided as ``bytes`` or `str``. - :ret: - An RFC-2231-formatted unicode string. - """ - if isinstance(value, six.binary_type): - value = value.decode("utf-8") - - if not any(ch in value for ch in '"\\\r\n'): - result = u'%s="%s"' % (name, value) - try: - result.encode("ascii") - except (UnicodeEncodeError, UnicodeDecodeError): - pass - else: - return result - - if six.PY2: # Python 2: - value = value.encode("utf-8") - - # encode_rfc2231 accepts an encoded string and returns an ascii-encoded - # string in Python 2 but accepts and returns unicode strings in Python 3 - value = email.utils.encode_rfc2231(value, "utf-8") - value = "%s*=%s" % (name, value) - - if six.PY2: # Python 2: - value = value.decode("utf-8") - - return value - - -_HTML5_REPLACEMENTS = { - u"\u0022": u"%22", - # Replace "\" with "\\". - u"\u005C": u"\u005C\u005C", -} - -# All control characters from 0x00 to 0x1F *except* 0x1B. -_HTML5_REPLACEMENTS.update( - { - six.unichr(cc): u"%{:02X}".format(cc) - for cc in range(0x00, 0x1F + 1) - if cc not in (0x1B,) - } -) - - -def _replace_multiple(value, needles_and_replacements): - def replacer(match): - return needles_and_replacements[match.group(0)] - - pattern = re.compile( - r"|".join([re.escape(needle) for needle in needles_and_replacements.keys()]) - ) - - result = pattern.sub(replacer, value) - - return result - - -def format_header_param_html5(name, value): - """ - Helper function to format and quote a single header parameter using the - HTML5 strategy. - - Particularly useful for header parameters which might contain - non-ASCII values, like file names. This follows the `HTML5 Working Draft - Section 4.10.22.7`_ and matches the behavior of curl and modern browsers. - - .. _HTML5 Working Draft Section 4.10.22.7: - https://w3c.github.io/html/sec-forms.html#multipart-form-data - - :param name: - The name of the parameter, a string expected to be ASCII only. - :param value: - The value of the parameter, provided as ``bytes`` or `str``. - :ret: - A unicode string, stripped of troublesome characters. - """ - if isinstance(value, six.binary_type): - value = value.decode("utf-8") - - value = _replace_multiple(value, _HTML5_REPLACEMENTS) - - return u'%s="%s"' % (name, value) - - -# For backwards-compatibility. -format_header_param = format_header_param_html5 - - -class RequestField(object): - """ - A data container for request body parameters. - - :param name: - The name of this request field. Must be unicode. - :param data: - The data/value body. - :param filename: - An optional filename of the request field. Must be unicode. - :param headers: - An optional dict-like object of headers to initially use for the field. - :param header_formatter: - An optional callable that is used to encode and format the headers. By - default, this is :func:`format_header_param_html5`. - """ - - def __init__( - self, - name, - data, - filename=None, - headers=None, - header_formatter=format_header_param_html5, - ): - self._name = name - self._filename = filename - self.data = data - self.headers = {} - if headers: - self.headers = dict(headers) - self.header_formatter = header_formatter - - @classmethod - def from_tuples(cls, fieldname, value, header_formatter=format_header_param_html5): - """ - A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. - - Supports constructing :class:`~urllib3.fields.RequestField` from - parameter of key/value strings AND key/filetuple. A filetuple is a - (filename, data, MIME type) tuple where the MIME type is optional. - For example:: - - 'foo': 'bar', - 'fakefile': ('foofile.txt', 'contents of foofile'), - 'realfile': ('barfile.txt', open('realfile').read()), - 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), - 'nonamefile': 'contents of nonamefile field', - - Field names and filenames must be unicode. - """ - if isinstance(value, tuple): - if len(value) == 3: - filename, data, content_type = value - else: - filename, data = value - content_type = guess_content_type(filename) - else: - filename = None - content_type = None - data = value - - request_param = cls( - fieldname, data, filename=filename, header_formatter=header_formatter - ) - request_param.make_multipart(content_type=content_type) - - return request_param - - def _render_part(self, name, value): - """ - Overridable helper function to format a single header parameter. By - default, this calls ``self.header_formatter``. - - :param name: - The name of the parameter, a string expected to be ASCII only. - :param value: - The value of the parameter, provided as a unicode string. - """ - - return self.header_formatter(name, value) - - def _render_parts(self, header_parts): - """ - Helper function to format and quote a single header. - - Useful for single headers that are composed of multiple items. E.g., - 'Content-Disposition' fields. - - :param header_parts: - A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format - as `k1="v1"; k2="v2"; ...`. - """ - parts = [] - iterable = header_parts - if isinstance(header_parts, dict): - iterable = header_parts.items() - - for name, value in iterable: - if value is not None: - parts.append(self._render_part(name, value)) - - return u"; ".join(parts) - - def render_headers(self): - """ - Renders the headers for this request field. - """ - lines = [] - - sort_keys = ["Content-Disposition", "Content-Type", "Content-Location"] - for sort_key in sort_keys: - if self.headers.get(sort_key, False): - lines.append(u"%s: %s" % (sort_key, self.headers[sort_key])) - - for header_name, header_value in self.headers.items(): - if header_name not in sort_keys: - if header_value: - lines.append(u"%s: %s" % (header_name, header_value)) - - lines.append(u"\r\n") - return u"\r\n".join(lines) - - def make_multipart( - self, content_disposition=None, content_type=None, content_location=None - ): - """ - Makes this request field into a multipart request field. - - This method overrides "Content-Disposition", "Content-Type" and - "Content-Location" headers to the request parameter. - - :param content_type: - The 'Content-Type' of the request body. - :param content_location: - The 'Content-Location' of the request body. - - """ - self.headers["Content-Disposition"] = content_disposition or u"form-data" - self.headers["Content-Disposition"] += u"; ".join( - [ - u"", - self._render_parts( - ((u"name", self._name), (u"filename", self._filename)) - ), - ] - ) - self.headers["Content-Type"] = content_type - self.headers["Content-Location"] = content_location diff --git a/spaces/plzdontcry/dakubettergpt/src/components/ApiMenu/ApiMenu.tsx b/spaces/plzdontcry/dakubettergpt/src/components/ApiMenu/ApiMenu.tsx deleted file mode 100644 index 95dc852cceadd2adfca3fcc02c68b4cb07bc1503..0000000000000000000000000000000000000000 --- a/spaces/plzdontcry/dakubettergpt/src/components/ApiMenu/ApiMenu.tsx +++ /dev/null @@ -1,105 +0,0 @@ -import React, { useEffect, useState } from 'react'; -import { useTranslation, Trans } from 'react-i18next'; -import useStore from '@store/store'; - -import useHideOnOutsideClick from '@hooks/useHideOnOutsideClick'; - -import PopupModal from '@components/PopupModal'; - -import { availableEndpoints, defaultAPIEndpoint } from '@constants/auth'; - -import DownChevronArrow from '@icon/DownChevronArrow'; - -const ApiMenu = ({ - setIsModalOpen, -}: { - setIsModalOpen: React.Dispatch>; -}) => { - const { t } = useTranslation(['main', 'api']); - - const apiKey = useStore((state) => state.apiKey); - const setApiKey = useStore((state) => state.setApiKey); - - const [_apiKey, _setApiKey] = useState(apiKey || ''); - - const handleSave = () => { - setApiKey(_apiKey); - setIsModalOpen(false); - }; - - return ( - -
            - -
            -
            - {t('apiKey.inputLabel', { ns: 'api' })} -
            - { - _setApiKey(e.target.value); - }} - /> -
            -
            -
            - ); -}; - -const ApiEndpointSelector = ({ - _apiEndpoint, - _setApiEndpoint, -}: { - _apiEndpoint: string; - _setApiEndpoint: React.Dispatch>; -}) => { - const [dropDown, setDropDown, dropDownRef] = useHideOnOutsideClick(); - - return ( -
            - - -
            - ); -}; - -export default ApiMenu; \ No newline at end of file diff --git a/spaces/prasanthntu/who-is-the-hero/app.py b/spaces/prasanthntu/who-is-the-hero/app.py deleted file mode 100644 index 8a69db303b9584cc55bbabba5fd3eaccbe86536f..0000000000000000000000000000000000000000 --- a/spaces/prasanthntu/who-is-the-hero/app.py +++ /dev/null @@ -1,56 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: app.ipynb. - -# %% auto 0 -__all__ = ['learn', 'categories', 'examples', 'intf', 'classify_image'] - -# %% app.ipynb 2 -from fastai.vision.all import * -import gradio as gr - -# Helpers used while building the model -# def is_cat(x): return x[0].isupper() - -# %% app.ipynb 4 -learn = load_learner("who_is_the_hero_model.pkl") - -# %% app.ipynb 6 -categories = learn.dls.vocab -categories = [category.capitalize() for category in categories] -print (f"Categories: {categories}") - -def classify_image(img): - pred, idx, probs = learn.predict(img) - - prediction = dict(zip(categories, map(float, probs))) - print (f"prediction = {prediction}") - - predicted_hero = max(prediction, key=lambda key: prediction[key]) - print (f"predicted_hero = {predicted_hero}") - - if predicted_hero == 'Superman': - alter_ego = "Clark Kent Jr" - elif predicted_hero == "Batman": - alter_ego = "Bruce Wayne" - elif predicted_hero == "Flash": - alter_ego = "Barry Allen" - else: - alter_ego = None - - return prediction, alter_ego - -# %% app.ipynb 9 -examples = [ - 'images/batman.jpg', 'images/batman2.jpg', 'images/batman3.png', - 'images/superman1.jpg', 'images/superman2.jpg', 'images/superman3.jpg', - 'images/flash1.jpg', 'images/flash2.jpg', 'images/flash3.jpg' -] - -intf = gr.Interface( - fn=classify_image, - inputs=gr.Image(shape=(192,192)), - outputs=[gr.Label(label='Predicted output'), gr.Text(label="Alter Ego")], - examples=examples, - title="Who is the 'Super Hero' Classifier", - description="Classifier is fine-tuned on pre-trained **resnet18** model using ~200 images in total" -) -intf.launch(inline=True) diff --git a/spaces/prerna9811/Chord/portaudio/examples/paex_read_write_wire.c b/spaces/prerna9811/Chord/portaudio/examples/paex_read_write_wire.c deleted file mode 100644 index b5046afd613c3d79501199608480fb8e95e1892e..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/examples/paex_read_write_wire.c +++ /dev/null @@ -1,204 +0,0 @@ -/** @file paex_read_write_wire.c - @ingroup examples_src - @brief Tests full duplex blocking I/O by passing input straight to output. - @author Bjorn Roche. XO Audio LLC for Z-Systems Engineering. - @author based on code by: Phil Burk http://www.softsynth.com - @author based on code by: Ross Bencina rossb@audiomulch.com -*/ -/* - * $Id: patest_read_record.c 757 2004-02-13 07:48:10Z rossbencina $ - * - * This program uses the PortAudio Portable Audio Library. - * For more information see: http://www.portaudio.com - * Copyright (c) 1999-2000 Ross Bencina and Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include -#include -#include -#include "portaudio.h" - -/* #define SAMPLE_RATE (17932) // Test failure to open with this value. */ -#define SAMPLE_RATE (44100) -#define FRAMES_PER_BUFFER (512) -#define NUM_SECONDS (10) -/* #define DITHER_FLAG (paDitherOff) */ -#define DITHER_FLAG (0) - -/* Select sample format. */ -#if 1 -#define PA_SAMPLE_TYPE paFloat32 -#define SAMPLE_SIZE (4) -#define SAMPLE_SILENCE (0.0f) -#define PRINTF_S_FORMAT "%.8f" -#elif 0 -#define PA_SAMPLE_TYPE paInt16 -#define SAMPLE_SIZE (2) -#define SAMPLE_SILENCE (0) -#define PRINTF_S_FORMAT "%d" -#elif 0 -#define PA_SAMPLE_TYPE paInt24 -#define SAMPLE_SIZE (3) -#define SAMPLE_SILENCE (0) -#define PRINTF_S_FORMAT "%d" -#elif 0 -#define PA_SAMPLE_TYPE paInt8 -#define SAMPLE_SIZE (1) -#define SAMPLE_SILENCE (0) -#define PRINTF_S_FORMAT "%d" -#else -#define PA_SAMPLE_TYPE paUInt8 -#define SAMPLE_SIZE (1) -#define SAMPLE_SILENCE (128) -#define PRINTF_S_FORMAT "%d" -#endif - -/*******************************************************************/ -int main(void); -int main(void) -{ - PaStreamParameters inputParameters, outputParameters; - PaStream *stream = NULL; - PaError err; - const PaDeviceInfo* inputInfo; - const PaDeviceInfo* outputInfo; - char *sampleBlock = NULL; - int i; - int numBytes; - int numChannels; - - printf("patest_read_write_wire.c\n"); fflush(stdout); - printf("sizeof(int) = %lu\n", sizeof(int)); fflush(stdout); - printf("sizeof(long) = %lu\n", sizeof(long)); fflush(stdout); - - err = Pa_Initialize(); - if( err != paNoError ) goto error2; - - inputParameters.device = Pa_GetDefaultInputDevice(); /* default input device */ - printf( "Input device # %d.\n", inputParameters.device ); - inputInfo = Pa_GetDeviceInfo( inputParameters.device ); - printf( " Name: %s\n", inputInfo->name ); - printf( " LL: %g s\n", inputInfo->defaultLowInputLatency ); - printf( " HL: %g s\n", inputInfo->defaultHighInputLatency ); - - outputParameters.device = Pa_GetDefaultOutputDevice(); /* default output device */ - printf( "Output device # %d.\n", outputParameters.device ); - outputInfo = Pa_GetDeviceInfo( outputParameters.device ); - printf( " Name: %s\n", outputInfo->name ); - printf( " LL: %g s\n", outputInfo->defaultLowOutputLatency ); - printf( " HL: %g s\n", outputInfo->defaultHighOutputLatency ); - - numChannels = inputInfo->maxInputChannels < outputInfo->maxOutputChannels - ? inputInfo->maxInputChannels : outputInfo->maxOutputChannels; - printf( "Num channels = %d.\n", numChannels ); - - inputParameters.channelCount = numChannels; - inputParameters.sampleFormat = PA_SAMPLE_TYPE; - inputParameters.suggestedLatency = inputInfo->defaultHighInputLatency ; - inputParameters.hostApiSpecificStreamInfo = NULL; - - outputParameters.channelCount = numChannels; - outputParameters.sampleFormat = PA_SAMPLE_TYPE; - outputParameters.suggestedLatency = outputInfo->defaultHighOutputLatency; - outputParameters.hostApiSpecificStreamInfo = NULL; - - /* -- setup -- */ - - err = Pa_OpenStream( - &stream, - &inputParameters, - &outputParameters, - SAMPLE_RATE, - FRAMES_PER_BUFFER, - paClipOff, /* we won't output out of range samples so don't bother clipping them */ - NULL, /* no callback, use blocking API */ - NULL ); /* no callback, so no callback userData */ - if( err != paNoError ) goto error2; - - numBytes = FRAMES_PER_BUFFER * numChannels * SAMPLE_SIZE ; - sampleBlock = (char *) malloc( numBytes ); - if( sampleBlock == NULL ) - { - printf("Could not allocate record array.\n"); - goto error1; - } - memset( sampleBlock, SAMPLE_SILENCE, numBytes ); - - err = Pa_StartStream( stream ); - if( err != paNoError ) goto error1; - printf("Wire on. Will run %d seconds.\n", NUM_SECONDS); fflush(stdout); - - for( i=0; i<(NUM_SECONDS*SAMPLE_RATE)/FRAMES_PER_BUFFER; ++i ) - { - // You may get underruns or overruns if the output is not primed by PortAudio. - err = Pa_WriteStream( stream, sampleBlock, FRAMES_PER_BUFFER ); - if( err ) goto xrun; - err = Pa_ReadStream( stream, sampleBlock, FRAMES_PER_BUFFER ); - if( err ) goto xrun; - } - printf("Wire off.\n"); fflush(stdout); - - err = Pa_StopStream( stream ); - if( err != paNoError ) goto error1; - - free( sampleBlock ); - - Pa_Terminate(); - return 0; - -xrun: - printf("err = %d\n", err); fflush(stdout); - if( stream ) { - Pa_AbortStream( stream ); - Pa_CloseStream( stream ); - } - free( sampleBlock ); - Pa_Terminate(); - if( err & paInputOverflow ) - fprintf( stderr, "Input Overflow.\n" ); - if( err & paOutputUnderflow ) - fprintf( stderr, "Output Underflow.\n" ); - return -2; -error1: - free( sampleBlock ); -error2: - if( stream ) { - Pa_AbortStream( stream ); - Pa_CloseStream( stream ); - } - Pa_Terminate(); - fprintf( stderr, "An error occurred while using the portaudio stream\n" ); - fprintf( stderr, "Error number: %d\n", err ); - fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); - return -1; -} diff --git a/spaces/prerna9811/Chord/portaudio/test/patest_sine_srate.c b/spaces/prerna9811/Chord/portaudio/test/patest_sine_srate.c deleted file mode 100644 index d4ce81b26095264fb9822d4421af8962c43566e4..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/test/patest_sine_srate.c +++ /dev/null @@ -1,182 +0,0 @@ -/* - * $Id: patest_sine.c 1097 2006-08-26 08:27:53Z rossb $ - * - * This program uses the PortAudio Portable Audio Library. - * For more information see: http://www.portaudio.com/ - * Copyright (c) 1999-2000 Ross Bencina and Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -/** @file patest_sine_srate_mac.c - @ingroup test_src - @brief Plays sine waves at 44100 and 48000, - and forces the hardware to change if this is a mac. - Designed for use with CoreAudio. - @author Bjorn Roche - @author Ross Bencina - @author Phil Burk -*/ - -#include -#include -#include "portaudio.h" - -#ifdef __APPLE__ -#include "pa_mac_core.h" -#endif - -#define NUM_SECONDS (5) -#define SAMPLE_RATE1 (44100) -#define SAMPLE_RATE2 (48000) -#define FRAMES_PER_BUFFER (64) - -#ifndef M_PI -#define M_PI (3.14159265) -#endif - -#define TABLE_SIZE (200) -typedef struct -{ - float sine[TABLE_SIZE]; - int left_phase; - int right_phase; -} -paTestData; - -/* This routine will be called by the PortAudio engine when audio is needed. -** It may called at interrupt level on some machines so don't do anything -** that could mess up the system like calling malloc() or free(). -*/ -static int patestCallback( const void *inputBuffer, void *outputBuffer, - unsigned long framesPerBuffer, - const PaStreamCallbackTimeInfo* timeInfo, - PaStreamCallbackFlags statusFlags, - void *userData ) -{ - paTestData *data = (paTestData*)userData; - float *out = (float*)outputBuffer; - unsigned long i; - - (void) timeInfo; /* Prevent unused variable warnings. */ - (void) statusFlags; - (void) inputBuffer; - - for( i=0; isine[data->left_phase]; /* left */ - *out++ = data->sine[data->right_phase]; /* right */ - data->left_phase += 1; - if( data->left_phase >= TABLE_SIZE ) data->left_phase -= TABLE_SIZE; - data->right_phase += 3; /* higher pitch so we can distinguish left and right. */ - if( data->right_phase >= TABLE_SIZE ) data->right_phase -= TABLE_SIZE; - } - - return paContinue; -} - -/*******************************************************************/ -int main(void); -int main(void) -{ - PaStreamParameters outputParameters; - PaStream *stream; - PaError err; - paTestData data; -#ifdef __APPLE__ - PaMacCoreStreamInfo macInfo; -#endif - int i; - - /* initialise sinusoidal wavetable */ - for( i=0; idefaultLowOutputLatency; - /** setup host specific info */ -#ifdef __APPLE__ - PaMacCore_SetupStreamInfo( &macInfo, paMacCorePro ); - outputParameters.hostApiSpecificStreamInfo = &macInfo; -#else - printf( "Hardware SR changing not being tested on this platform.\n" ); - outputParameters.hostApiSpecificStreamInfo = NULL; -#endif - err = Pa_OpenStream( - &stream, - NULL, /* no input */ - &outputParameters, - sr, - FRAMES_PER_BUFFER, - paClipOff, /* we won't output out of range samples so don't bother clipping them */ - patestCallback, - &data ); - if( err != paNoError ) goto error; - - err = Pa_StartStream( stream ); - if( err != paNoError ) goto error; - - printf("Play for %d seconds.\n", NUM_SECONDS ); - Pa_Sleep( NUM_SECONDS * 1000 ); - - err = Pa_StopStream( stream ); - if( err != paNoError ) goto error; - - err = Pa_CloseStream( stream ); - if( err != paNoError ) goto error; - } - - Pa_Terminate(); - printf("Test finished.\n"); - - return err; -error: - Pa_Terminate(); - fprintf( stderr, "An error occurred while using the portaudio stream\n" ); - fprintf( stderr, "Error number: %d\n", err ); - fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); - return err; -} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/dateutil/parser/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/dateutil/parser/__init__.py deleted file mode 100644 index d174b0e4dcc472999b75e55ebb88af320ae38081..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/dateutil/parser/__init__.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -from ._parser import parse, parser, parserinfo, ParserError -from ._parser import DEFAULTPARSER, DEFAULTTZPARSER -from ._parser import UnknownTimezoneWarning - -from ._parser import __doc__ - -from .isoparser import isoparser, isoparse - -__all__ = ['parse', 'parser', 'parserinfo', - 'isoparse', 'isoparser', - 'ParserError', - 'UnknownTimezoneWarning'] - - -### -# Deprecate portions of the private interface so that downstream code that -# is improperly relying on it is given *some* notice. - - -def __deprecated_private_func(f): - from functools import wraps - import warnings - - msg = ('{name} is a private function and may break without warning, ' - 'it will be moved and or renamed in future versions.') - msg = msg.format(name=f.__name__) - - @wraps(f) - def deprecated_func(*args, **kwargs): - warnings.warn(msg, DeprecationWarning) - return f(*args, **kwargs) - - return deprecated_func - -def __deprecate_private_class(c): - import warnings - - msg = ('{name} is a private class and may break without warning, ' - 'it will be moved and or renamed in future versions.') - msg = msg.format(name=c.__name__) - - class private_class(c): - __doc__ = c.__doc__ - - def __init__(self, *args, **kwargs): - warnings.warn(msg, DeprecationWarning) - super(private_class, self).__init__(*args, **kwargs) - - private_class.__name__ = c.__name__ - - return private_class - - -from ._parser import _timelex, _resultbase -from ._parser import _tzparser, _parsetz - -_timelex = __deprecate_private_class(_timelex) -_tzparser = __deprecate_private_class(_tzparser) -_resultbase = __deprecate_private_class(_resultbase) -_parsetz = __deprecated_private_func(_parsetz) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/components/duplicate_button.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/components/duplicate_button.py deleted file mode 100644 index 5213a988feaaf9c855381edfd726d309e67877e2..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/components/duplicate_button.py +++ /dev/null @@ -1,83 +0,0 @@ -""" Predefined buttons with bound events that can be included in a gr.Blocks for convenience. """ - -from __future__ import annotations - -from typing import Literal - -from gradio_client.documentation import document, set_documentation_group - -from gradio.components import Button -from gradio.utils import get_space - -set_documentation_group("component") - - -@document() -class DuplicateButton(Button): - """ - Button that triggers a Spaces Duplication, when the demo is on Hugging Face Spaces. Does nothing locally. - Preprocessing: passes the button value as a {str} into the function - Postprocessing: expects a {str} to be returned from a function, which is set as the label of the button - """ - - is_template = True - - def __init__( - self, - value: str = "Duplicate Space", - *, - every: float | None = None, - variant: Literal["primary", "secondary", "stop"] = "secondary", - size: Literal["sm", "lg"] | None = "sm", - icon: str | None = None, - link: str | None = None, - visible: bool = True, - interactive: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - render: bool = True, - scale: int | None = 0, - min_width: int | None = None, - _activate: bool = True, - ): - """ - Parameters: - value: Default text for the button to display. If callable, the function will be called whenever the app loads to set the initial value of the component. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - variant: 'primary' for main call-to-action, 'secondary' for a more subdued style, 'stop' for a stop button. - size: Size of the button. Can be "sm" or "lg". - icon: URL or path to the icon file to display within the button. If None, no icon will be displayed. - link: URL to open when the button is clicked. If None, no link will be used. - visible: If False, component will be hidden. - interactive: If False, the Button will be in a disabled state. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. - render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later. - scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. - min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. - """ - super().__init__( - value=value, - every=every, - variant=variant, - size=size, - icon=icon, - link=link, - visible=visible, - interactive=interactive, - elem_id=elem_id, - elem_classes=elem_classes, - render=render, - scale=scale, - min_width=min_width, - ) - if _activate: - self.activate() - - def activate(self): - space_name = get_space() - if space_name is not None: - self.click( - fn=None, - js=f"() => {{ window.open(`https://huggingface.co/spaces/{space_name}?duplicate=true`, '_blank') }}", - ) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Example-6146f1ae.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Example-6146f1ae.js deleted file mode 100644 index 6e87e9d5da0c585bef67f4fc081f41b6a8c48d43..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Example-6146f1ae.js +++ /dev/null @@ -1,2 +0,0 @@ -import{r as I}from"./file-url-f4206b44.js";const H=new Error("failed to get response body reader"),F=new Error("failed to complete download"),K="Content-Length",Y=async(t,i)=>{const e=await fetch(t);let a;try{const o=parseInt(e.headers.get(K)||"-1"),n=e.body?.getReader();if(!n)throw H;const s=[];let l=0;for(;;){const{done:p,value:x}=await n.read(),v=x?x.length:0;if(p){if(o!=-1&&o!==l)throw F;i&&i({url:t,total:o,received:l,delta:v,done:p});break}s.push(x),l+=v,i&&i({url:t,total:o,received:l,delta:v,done:p})}const m=new Uint8Array(l);let u=0;for(const p of s)m.set(p,u),u+=p.length;a=m.buffer}catch(o){console.log("failed to send download progress event: ",o),a=await e.arrayBuffer(),i&&i({url:t,total:a.byteLength,received:a.byteLength,delta:0,done:!0})}return a},R=async(t,i,e=!1,a)=>{const o=e?await Y(t,a):await(await fetch(t)).arrayBuffer(),n=new Blob([o],{type:i});return URL.createObjectURL(n)};var r;(function(t){t.LOAD="LOAD",t.EXEC="EXEC",t.WRITE_FILE="WRITE_FILE",t.READ_FILE="READ_FILE",t.DELETE_FILE="DELETE_FILE",t.RENAME="RENAME",t.CREATE_DIR="CREATE_DIR",t.LIST_DIR="LIST_DIR",t.DELETE_DIR="DELETE_DIR",t.ERROR="ERROR",t.DOWNLOAD="DOWNLOAD",t.PROGRESS="PROGRESS",t.LOG="LOG",t.MOUNT="MOUNT",t.UNMOUNT="UNMOUNT"})(r||(r={}));const J=(()=>{let t=0;return()=>t++})(),Q=new Error("ffmpeg is not loaded, call `await ffmpeg.load()` first"),Z=new Error("called FFmpeg.terminate()");class ${#t=null;#a={};#e={};#o=[];#l=[];loaded=!1;#n=()=>{this.#t&&(this.#t.onmessage=({data:{id:i,type:e,data:a}})=>{switch(e){case r.LOAD:this.loaded=!0,this.#a[i](a);break;case r.MOUNT:case r.UNMOUNT:case r.EXEC:case r.WRITE_FILE:case r.READ_FILE:case r.DELETE_FILE:case r.RENAME:case r.CREATE_DIR:case r.LIST_DIR:case r.DELETE_DIR:this.#a[i](a);break;case r.LOG:this.#o.forEach(o=>o(a));break;case r.PROGRESS:this.#l.forEach(o=>o(a));break;case r.ERROR:this.#e[i](a);break}delete this.#a[i],delete this.#e[i]})};#i=({type:i,data:e},a=[],o)=>this.#t?new Promise((n,s)=>{const l=J();this.#t&&this.#t.postMessage({id:l,type:i,data:e},a),this.#a[l]=n,this.#e[l]=s,o?.addEventListener("abort",()=>{s(new DOMException(`Message # ${l} was aborted`,"AbortError"))},{once:!0})}):Promise.reject(Q);on(i,e){i==="log"?this.#o.push(e):i==="progress"&&this.#l.push(e)}off(i,e){i==="log"?this.#o=this.#o.filter(a=>a!==e):i==="progress"&&(this.#l=this.#l.filter(a=>a!==e))}load=(i={},{signal:e}={})=>(this.#t||(this.#t=new Worker(new URL("https://gradio.s3-us-west-2.amazonaws.com/4.0.2/assets/worker-1779ba70.js",self.location),{type:"module"}),this.#n()),this.#i({type:r.LOAD,data:i},void 0,e));exec=(i,e=-1,{signal:a}={})=>this.#i({type:r.EXEC,data:{args:i,timeout:e}},void 0,a);terminate=()=>{const i=Object.keys(this.#e);for(const e of i)this.#e[e](Z),delete this.#e[e],delete this.#a[e];this.#t&&(this.#t.terminate(),this.#t=null,this.loaded=!1)};writeFile=(i,e,{signal:a}={})=>{const o=[];return e instanceof Uint8Array&&o.push(e.buffer),this.#i({type:r.WRITE_FILE,data:{path:i,data:e}},o,a)};mount=(i,e,a)=>{const o=[];return this.#i({type:r.MOUNT,data:{fsType:i,options:e,mountPoint:a}},o)};unmount=i=>{const e=[];return this.#i({type:r.UNMOUNT,data:{mountPoint:i}},e)};readFile=(i,e="binary",{signal:a}={})=>this.#i({type:r.READ_FILE,data:{path:i,encoding:e}},void 0,a);deleteFile=(i,{signal:e}={})=>this.#i({type:r.DELETE_FILE,data:{path:i}},void 0,e);rename=(i,e,{signal:a}={})=>this.#i({type:r.RENAME,data:{oldPath:i,newPath:e}},void 0,a);createDir=(i,{signal:e}={})=>this.#i({type:r.CREATE_DIR,data:{path:i}},void 0,e);listDir=(i,{signal:e}={})=>this.#i({type:r.LIST_DIR,data:{path:i}},void 0,e);deleteDir=(i,{signal:e}={})=>this.#i({type:r.DELETE_DIR,data:{path:i}},void 0,e)}const ii={ez:"application/andrew-inset",aw:"application/applixware",atom:"application/atom+xml",atomcat:"application/atomcat+xml",atomdeleted:"application/atomdeleted+xml",atomsvc:"application/atomsvc+xml",dwd:"application/atsc-dwd+xml",held:"application/atsc-held+xml",rsat:"application/atsc-rsat+xml",bdoc:"application/bdoc",xcs:"application/calendar+xml",ccxml:"application/ccxml+xml",cdfx:"application/cdfx+xml",cdmia:"application/cdmi-capability",cdmic:"application/cdmi-container",cdmid:"application/cdmi-domain",cdmio:"application/cdmi-object",cdmiq:"application/cdmi-queue",cu:"application/cu-seeme",mpd:"application/dash+xml",davmount:"application/davmount+xml",dbk:"application/docbook+xml",dssc:"application/dssc+der",xdssc:"application/dssc+xml",es:"application/ecmascript",ecma:"application/ecmascript",emma:"application/emma+xml",emotionml:"application/emotionml+xml",epub:"application/epub+zip",exi:"application/exi",fdt:"application/fdt+xml",pfr:"application/font-tdpfr",geojson:"application/geo+json",gml:"application/gml+xml",gpx:"application/gpx+xml",gxf:"application/gxf",gz:"application/gzip",hjson:"application/hjson",stk:"application/hyperstudio",ink:"application/inkml+xml",inkml:"application/inkml+xml",ipfix:"application/ipfix",its:"application/its+xml",jar:"application/java-archive",war:"application/java-archive",ear:"application/java-archive",ser:"application/java-serialized-object",class:"application/java-vm",js:"application/javascript",mjs:"application/javascript",json:"application/json",map:"application/json",json5:"application/json5",jsonml:"application/jsonml+json",jsonld:"application/ld+json",lgr:"application/lgr+xml",lostxml:"application/lost+xml",hqx:"application/mac-binhex40",cpt:"application/mac-compactpro",mads:"application/mads+xml",webmanifest:"application/manifest+json",mrc:"application/marc",mrcx:"application/marcxml+xml",ma:"application/mathematica",nb:"application/mathematica",mb:"application/mathematica",mathml:"application/mathml+xml",mbox:"application/mbox",mscml:"application/mediaservercontrol+xml",metalink:"application/metalink+xml",meta4:"application/metalink4+xml",mets:"application/mets+xml",maei:"application/mmt-aei+xml",musd:"application/mmt-usd+xml",mods:"application/mods+xml",m21:"application/mp21",mp21:"application/mp21",mp4s:"application/mp4",m4p:"application/mp4",doc:"application/msword",dot:"application/msword",mxf:"application/mxf",nq:"application/n-quads",nt:"application/n-triples",cjs:"application/node",bin:"application/octet-stream",dms:"application/octet-stream",lrf:"application/octet-stream",mar:"application/octet-stream",so:"application/octet-stream",dist:"application/octet-stream",distz:"application/octet-stream",pkg:"application/octet-stream",bpk:"application/octet-stream",dump:"application/octet-stream",elc:"application/octet-stream",deploy:"application/octet-stream",exe:"application/octet-stream",dll:"application/octet-stream",deb:"application/octet-stream",dmg:"application/octet-stream",iso:"application/octet-stream",img:"application/octet-stream",msi:"application/octet-stream",msp:"application/octet-stream",msm:"application/octet-stream",buffer:"application/octet-stream",oda:"application/oda",opf:"application/oebps-package+xml",ogx:"application/ogg",omdoc:"application/omdoc+xml",onetoc:"application/onenote",onetoc2:"application/onenote",onetmp:"application/onenote",onepkg:"application/onenote",oxps:"application/oxps",relo:"application/p2p-overlay+xml",xer:"application/patch-ops-error+xml",pdf:"application/pdf",pgp:"application/pgp-encrypted",asc:"application/pgp-signature",sig:"application/pgp-signature",prf:"application/pics-rules",p10:"application/pkcs10",p7m:"application/pkcs7-mime",p7c:"application/pkcs7-mime",p7s:"application/pkcs7-signature",p8:"application/pkcs8",ac:"application/pkix-attr-cert",cer:"application/pkix-cert",crl:"application/pkix-crl",pkipath:"application/pkix-pkipath",pki:"application/pkixcmp",pls:"application/pls+xml",ai:"application/postscript",eps:"application/postscript",ps:"application/postscript",provx:"application/provenance+xml",cww:"application/prs.cww",pskcxml:"application/pskc+xml",raml:"application/raml+yaml",rdf:"application/rdf+xml",owl:"application/rdf+xml",rif:"application/reginfo+xml",rnc:"application/relax-ng-compact-syntax",rl:"application/resource-lists+xml",rld:"application/resource-lists-diff+xml",rs:"application/rls-services+xml",rapd:"application/route-apd+xml",sls:"application/route-s-tsid+xml",rusd:"application/route-usd+xml",gbr:"application/rpki-ghostbusters",mft:"application/rpki-manifest",roa:"application/rpki-roa",rsd:"application/rsd+xml",rss:"application/rss+xml",rtf:"application/rtf",sbml:"application/sbml+xml",scq:"application/scvp-cv-request",scs:"application/scvp-cv-response",spq:"application/scvp-vp-request",spp:"application/scvp-vp-response",sdp:"application/sdp",senmlx:"application/senml+xml",sensmlx:"application/sensml+xml",setpay:"application/set-payment-initiation",setreg:"application/set-registration-initiation",shf:"application/shf+xml",siv:"application/sieve",sieve:"application/sieve",smi:"application/smil+xml",smil:"application/smil+xml",rq:"application/sparql-query",srx:"application/sparql-results+xml",gram:"application/srgs",grxml:"application/srgs+xml",sru:"application/sru+xml",ssdl:"application/ssdl+xml",ssml:"application/ssml+xml",swidtag:"application/swid+xml",tei:"application/tei+xml",teicorpus:"application/tei+xml",tfi:"application/thraud+xml",tsd:"application/timestamped-data",toml:"application/toml",trig:"application/trig",ttml:"application/ttml+xml",ubj:"application/ubjson",rsheet:"application/urc-ressheet+xml",td:"application/urc-targetdesc+xml",vxml:"application/voicexml+xml",wasm:"application/wasm",wgt:"application/widget",hlp:"application/winhlp",wsdl:"application/wsdl+xml",wspolicy:"application/wspolicy+xml",xaml:"application/xaml+xml",xav:"application/xcap-att+xml",xca:"application/xcap-caps+xml",xdf:"application/xcap-diff+xml",xel:"application/xcap-el+xml",xns:"application/xcap-ns+xml",xenc:"application/xenc+xml",xhtml:"application/xhtml+xml",xht:"application/xhtml+xml",xlf:"application/xliff+xml",xml:"application/xml",xsl:"application/xml",xsd:"application/xml",rng:"application/xml",dtd:"application/xml-dtd",xop:"application/xop+xml",xpl:"application/xproc+xml",xslt:"application/xml",xspf:"application/xspf+xml",mxml:"application/xv+xml",xhvml:"application/xv+xml",xvml:"application/xv+xml",xvm:"application/xv+xml",yang:"application/yang",yin:"application/yin+xml",zip:"application/zip","3gpp":"video/3gpp",adp:"audio/adpcm",amr:"audio/amr",au:"audio/basic",snd:"audio/basic",mid:"audio/midi",midi:"audio/midi",kar:"audio/midi",rmi:"audio/midi",mxmf:"audio/mobile-xmf",mp3:"audio/mpeg",m4a:"audio/mp4",mp4a:"audio/mp4",mpga:"audio/mpeg",mp2:"audio/mpeg",mp2a:"audio/mpeg",m2a:"audio/mpeg",m3a:"audio/mpeg",oga:"audio/ogg",ogg:"audio/ogg",spx:"audio/ogg",opus:"audio/ogg",s3m:"audio/s3m",sil:"audio/silk",wav:"audio/wav",weba:"audio/webm",xm:"audio/xm",ttc:"font/collection",otf:"font/otf",ttf:"font/ttf",woff:"font/woff",woff2:"font/woff2",exr:"image/aces",apng:"image/apng",avif:"image/avif",bmp:"image/bmp",cgm:"image/cgm",drle:"image/dicom-rle",emf:"image/emf",fits:"image/fits",g3:"image/g3fax",gif:"image/gif",heic:"image/heic",heics:"image/heic-sequence",heif:"image/heif",heifs:"image/heif-sequence",hej2:"image/hej2k",hsj2:"image/hsj2",ief:"image/ief",jls:"image/jls",jp2:"image/jp2",jpg2:"image/jp2",jpeg:"image/jpeg",jpg:"image/jpeg",jpe:"image/jpeg",jph:"image/jph",jhc:"image/jphc",jpm:"image/jpm",jpx:"image/jpx",jpf:"image/jpx",jxr:"image/jxr",jxra:"image/jxra",jxrs:"image/jxrs",jxs:"image/jxs",jxsc:"image/jxsc",jxsi:"image/jxsi",jxss:"image/jxss",ktx:"image/ktx",ktx2:"image/ktx2",png:"image/png",btif:"image/prs.btif",pti:"image/prs.pti",sgi:"image/sgi",svg:"image/svg+xml",svgz:"image/svg+xml",t38:"image/t38",tif:"image/tiff",tiff:"image/tiff",tfx:"image/tiff-fx",webp:"image/webp",wmf:"image/wmf","disposition-notification":"message/disposition-notification",u8msg:"message/global",u8dsn:"message/global-delivery-status",u8mdn:"message/global-disposition-notification",u8hdr:"message/global-headers",eml:"message/rfc822",mime:"message/rfc822","3mf":"model/3mf",gltf:"model/gltf+json",glb:"model/gltf-binary",igs:"model/iges",iges:"model/iges",msh:"model/mesh",mesh:"model/mesh",silo:"model/mesh",mtl:"model/mtl",obj:"model/obj",stpz:"model/step+zip",stpxz:"model/step-xml+zip",stl:"model/stl",wrl:"model/vrml",vrml:"model/vrml",x3db:"model/x3d+fastinfoset",x3dbz:"model/x3d+binary",x3dv:"model/x3d-vrml",x3dvz:"model/x3d+vrml",x3d:"model/x3d+xml",x3dz:"model/x3d+xml",appcache:"text/cache-manifest",manifest:"text/cache-manifest",ics:"text/calendar",ifb:"text/calendar",coffee:"text/coffeescript",litcoffee:"text/coffeescript",css:"text/css",csv:"text/csv",html:"text/html",htm:"text/html",shtml:"text/html",jade:"text/jade",jsx:"text/jsx",less:"text/less",markdown:"text/markdown",md:"text/markdown",mml:"text/mathml",mdx:"text/mdx",n3:"text/n3",txt:"text/plain",text:"text/plain",conf:"text/plain",def:"text/plain",list:"text/plain",log:"text/plain",in:"text/plain",ini:"text/plain",dsc:"text/prs.lines.tag",rtx:"text/richtext",sgml:"text/sgml",sgm:"text/sgml",shex:"text/shex",slim:"text/slim",slm:"text/slim",spdx:"text/spdx",stylus:"text/stylus",styl:"text/stylus",tsv:"text/tab-separated-values",t:"text/troff",tr:"text/troff",roff:"text/troff",man:"text/troff",me:"text/troff",ms:"text/troff",ttl:"text/turtle",uri:"text/uri-list",uris:"text/uri-list",urls:"text/uri-list",vcard:"text/vcard",vtt:"text/vtt",yaml:"text/yaml",yml:"text/yaml","3gp":"video/3gpp","3g2":"video/3gpp2",h261:"video/h261",h263:"video/h263",h264:"video/h264",m4s:"video/iso.segment",jpgv:"video/jpeg",jpgm:"image/jpm",mj2:"video/mj2",mjp2:"video/mj2",ts:"video/mp2t",mp4:"video/mp4",mp4v:"video/mp4",mpg4:"video/mp4",mpeg:"video/mpeg",mpg:"video/mpeg",mpe:"video/mpeg",m1v:"video/mpeg",m2v:"video/mpeg",ogv:"video/ogg",qt:"video/quicktime",mov:"video/quicktime",webm:"video/webm"};function ti(t){let i=(""+t).trim().toLowerCase(),e=i.lastIndexOf(".");return ii[~e?i.substring(++e):i]}const Qi=t=>{let i=["B","KB","MB","GB","PB"],e=0;for(;t>1024;)t/=1024,e++;let a=i[e];return t.toFixed(1)+" "+a},Zi=()=>!0;function ei(t,{autoplay:i}){async function e(){i&&await t.play()}return t.addEventListener("loadeddata",e),{destroy(){t.removeEventListener("loadeddata",e)}}}async function $i(){const t=new $,i="https://unpkg.com/@ffmpeg/core@0.12.4/dist/esm";return await t.load({coreURL:await R(`${i}/ffmpeg-core.js`,"text/javascript"),wasmURL:await R(`${i}/ffmpeg-core.wasm`,"application/wasm")}),t}async function it(t,i,e,a){try{const o=a.src,n=ti(a.src)||"video/mp4",s=await R(o,n),m=await(await fetch(s)).blob(),u=ai(n)||"mp4",p=`input.${u}`,x=`output.${u}`;await t.writeFile(p,new Uint8Array(await m.arrayBuffer()));let v=["-i",p,"-ss",i.toString(),"-to",e.toString(),"-c:a","copy",x];await t.exec(v);const _=await t.readFile(x);return new Blob([_],{type:`video/${u}`})}catch(o){console.error("Error initializing FFmpeg:",o)}}const ai=t=>({"video/mp4":"mp4","video/webm":"webm","video/ogg":"ogv","video/quicktime":"mov","video/x-msvideo":"avi","video/x-matroska":"mkv","video/mpeg":"mpeg","video/3gpp":"3gp","video/3gpp2":"3g2","video/h261":"h261","video/h263":"h263","video/h264":"h264","video/jpeg":"jpgv","video/jpm":"jpm","video/mj2":"mj2","video/mpv":"mpv","video/vnd.ms-playready.media.pyv":"pyv","video/vnd.uvvu.mp4":"uvu","video/vnd.vivo":"viv","video/x-f4v":"f4v","video/x-fli":"fli","video/x-flv":"flv","video/x-m4v":"m4v","video/x-ms-asf":"asf","video/x-ms-wm":"wm","video/x-ms-wmv":"wmv","video/x-ms-wmx":"wmx","video/x-ms-wvx":"wvx","video/x-sgi-movie":"movie","video/x-smv":"smv"})[t]||null;const{SvelteComponent:oi,action_destroyer:li,add_render_callback:ni,append:pi,assign:T,attr:b,binding_callbacks:si,create_slot:mi,detach:k,element:L,empty:ci,exclude_internal_props:A,get_all_dirty_from_scope:di,get_slot_changes:ri,handle_promise:N,init:ui,insert:w,is_function:fi,listen:h,noop:E,raf:gi,run_all:xi,safe_not_equal:vi,set_data:hi,set_style:_i,space:bi,src_url_equal:U,text:Ei,toggle_class:q,transition_in:C,transition_out:B,update_await_block_branch:ji,update_slot_base:yi}=window.__gradio__svelte__internal,{createEventDispatcher:ki}=window.__gradio__svelte__internal;function wi(t){let i,e=t[20].message+"",a;return{c(){i=L("p"),a=Ei(e),_i(i,"color","red")},m(o,n){w(o,i,n),pi(i,a)},p(o,n){n&16&&e!==(e=o[20].message+"")&&hi(a,e)},i:E,o:E,d(o){o&&k(i)}}}function Ri(t){let i,e,a,o,n,s=!1,l,m=!0,u,p,x,v;const _=t[14].default,g=mi(_,t,t[13],null);function y(){cancelAnimationFrame(l),a.paused||(l=gi(y),s=!0),t[15].call(a)}return{c(){i=L("div"),i.innerHTML='',e=bi(),a=L("video"),g&&g.c(),b(i,"class","overlay svelte-1wkm2e0"),q(i,"hidden",!t[10]),U(a.src,o=t[19])||b(a,"src",o),a.muted=t[5],a.playsInline=t[6],b(a,"preload",t[7]),a.autoplay=t[8],a.controls=t[9],b(a,"data-testid",n=t[12]["data-testid"]),b(a,"crossorigin","anonymous"),b(a,"class","svelte-1wkm2e0"),t[1]===void 0&&ni(()=>t[16].call(a))},m(c,f){w(c,i,f),w(c,e,f),w(c,a,f),g&&g.m(a,null),t[18](a),p=!0,x||(v=[h(a,"loadeddata",t[11].bind(null,"loadeddata")),h(a,"click",t[11].bind(null,"click")),h(a,"play",t[11].bind(null,"play")),h(a,"pause",t[11].bind(null,"pause")),h(a,"ended",t[11].bind(null,"ended")),h(a,"mouseover",t[11].bind(null,"mouseover")),h(a,"mouseout",t[11].bind(null,"mouseout")),h(a,"focus",t[11].bind(null,"focus")),h(a,"blur",t[11].bind(null,"blur")),h(a,"timeupdate",y),h(a,"durationchange",t[16]),h(a,"play",t[17]),h(a,"pause",t[17]),li(u=ei.call(null,a,{autoplay:t[8]??!1}))],x=!0)},p(c,f){(!p||f&1024)&&q(i,"hidden",!c[10]),g&&g.p&&(!p||f&8192)&&yi(g,_,c,c[13],p?ri(_,c[13],f,null):di(c[13]),null),(!p||f&16&&!U(a.src,o=c[19]))&&b(a,"src",o),(!p||f&32)&&(a.muted=c[5]),(!p||f&64)&&(a.playsInline=c[6]),(!p||f&128)&&b(a,"preload",c[7]),(!p||f&256)&&(a.autoplay=c[8]),(!p||f&512)&&(a.controls=c[9]),(!p||f&4096&&n!==(n=c[12]["data-testid"]))&&b(a,"data-testid",n),!s&&f&1&&!isNaN(c[0])&&(a.currentTime=c[0]),s=!1,f&4&&m!==(m=c[2])&&a[m?"pause":"play"](),u&&fi(u.update)&&f&256&&u.update.call(null,{autoplay:c[8]??!1})},i(c){p||(C(g,c),p=!0)},o(c){B(g,c),p=!1},d(c){c&&(k(i),k(e),k(a)),g&&g.d(c),t[18](null),x=!1,xi(v)}}}function Li(t){return{c:E,m:E,p:E,i:E,o:E,d:E}}function Di(t){let i,e,a,o={ctx:t,current:null,token:null,hasCatch:!0,pending:Li,then:Ri,catch:wi,value:19,error:20,blocks:[,,,]};return N(e=I(t[4]),o),{c(){i=ci(),o.block.c()},m(n,s){w(n,i,s),o.block.m(n,o.anchor=s),o.mount=()=>i.parentNode,o.anchor=i,a=!0},p(n,[s]){t=n,o.ctx=t,s&16&&e!==(e=I(t[4]))&&N(e,o)||ji(o,t,s)},i(n){a||(C(o.block),a=!0)},o(n){for(let s=0;s<3;s+=1){const l=o.blocks[s];B(l)}a=!1},d(n){n&&k(i),o.block.d(n),o.token=null,o=null}}}function Oi(t,i,e){let{$$slots:a={},$$scope:o}=i,{src:n=void 0}=i,{muted:s=void 0}=i,{playsinline:l=void 0}=i,{preload:m=void 0}=i,{autoplay:u=void 0}=i,{controls:p=void 0}=i,{currentTime:x=void 0}=i,{duration:v=void 0}=i,{paused:_=void 0}=i,{node:g=void 0}=i,{processingVideo:y=!1}=i;const c=ki();function f(){x=this.currentTime,e(0,x)}function W(){v=this.duration,e(1,v)}function G(){_=this.paused,e(2,_)}function X(d){si[d?"unshift":"push"](()=>{g=d,e(3,g)})}return t.$$set=d=>{e(12,i=T(T({},i),A(d))),"src"in d&&e(4,n=d.src),"muted"in d&&e(5,s=d.muted),"playsinline"in d&&e(6,l=d.playsinline),"preload"in d&&e(7,m=d.preload),"autoplay"in d&&e(8,u=d.autoplay),"controls"in d&&e(9,p=d.controls),"currentTime"in d&&e(0,x=d.currentTime),"duration"in d&&e(1,v=d.duration),"paused"in d&&e(2,_=d.paused),"node"in d&&e(3,g=d.node),"processingVideo"in d&&e(10,y=d.processingVideo),"$$scope"in d&&e(13,o=d.$$scope)},i=A(i),[x,v,_,g,n,s,l,m,u,p,y,c,i,o,a,f,W,G,X]}class Ii extends oi{constructor(i){super(),ui(this,i,Oi,Di,vi,{src:4,muted:5,playsinline:6,preload:7,autoplay:8,controls:9,currentTime:0,duration:1,paused:2,node:3,processingVideo:10})}}const{SvelteComponent:Ti,add_flush_callback:Ai,append:Ni,attr:Ui,bind:qi,binding_callbacks:zi,create_component:Si,destroy_component:Ci,detach:D,element:M,empty:Bi,init:Mi,insert:O,is_function:z,mount_component:Pi,noop:S,safe_not_equal:Vi,set_data:Wi,text:Gi,toggle_class:j,transition_in:P,transition_out:V}=window.__gradio__svelte__internal;function Xi(t){let i,e;return{c(){i=M("div"),e=Gi(t[2])},m(a,o){O(a,i,o),Ni(i,e)},p(a,o){o&4&&Wi(e,a[2])},i:S,o:S,d(a){a&&D(i)}}}function Hi(t){let i,e,a,o;function n(l){t[6](l)}let s={muted:!0,playsinline:!0,src:t[3]+t[2]};return t[4]!==void 0&&(s.node=t[4]),e=new Ii({props:s}),zi.push(()=>qi(e,"node",n)),e.$on("loadeddata",t[5]),e.$on("mouseover",function(){z(t[4].play.bind(t[4]))&&t[4].play.bind(t[4]).apply(this,arguments)}),e.$on("mouseout",function(){z(t[4].pause.bind(t[4]))&&t[4].pause.bind(t[4]).apply(this,arguments)}),{c(){i=M("div"),Si(e.$$.fragment),Ui(i,"class","container svelte-1jmx6y1"),j(i,"table",t[0]==="table"),j(i,"gallery",t[0]==="gallery"),j(i,"selected",t[1])},m(l,m){O(l,i,m),Pi(e,i,null),o=!0},p(l,m){t=l;const u={};m&12&&(u.src=t[3]+t[2]),!a&&m&16&&(a=!0,u.node=t[4],Ai(()=>a=!1)),e.$set(u),(!o||m&1)&&j(i,"table",t[0]==="table"),(!o||m&1)&&j(i,"gallery",t[0]==="gallery"),(!o||m&2)&&j(i,"selected",t[1])},i(l){o||(P(e.$$.fragment,l),o=!0)},o(l){V(e.$$.fragment,l),o=!1},d(l){l&&D(i),Ci(e)}}}function Fi(t){let i,e,a,o;const n=[Hi,Xi],s=[];function l(m,u){return 0}return i=l(),e=s[i]=n[i](t),{c(){e.c(),a=Bi()},m(m,u){s[i].m(m,u),O(m,a,u),o=!0},p(m,[u]){e.p(m,u)},i(m){o||(P(e),o=!0)},o(m){V(e),o=!1},d(m){m&&D(a),s[i].d(m)}}}function Ki(t,i,e){let{type:a}=i,{selected:o=!1}=i,{value:n}=i,{samples_dir:s}=i,l;async function m(){e(4,l.muted=!0,l),e(4,l.playsInline=!0,l),e(4,l.controls=!1,l),l.setAttribute("muted",""),await l.play(),l.pause()}function u(p){l=p,e(4,l)}return t.$$set=p=>{"type"in p&&e(0,a=p.type),"selected"in p&&e(1,o=p.selected),"value"in p&&e(2,n=p.value),"samples_dir"in p&&e(3,s=p.samples_dir)},[a,o,n,s,l,m,u]}class Yi extends Ti{constructor(i){super(),Mi(this,i,Ki,Fi,Vi,{type:0,selected:1,value:2,samples_dir:3})}}const tt=Object.freeze(Object.defineProperty({__proto__:null,default:Yi},Symbol.toStringTag,{value:"Module"}));export{Yi as E,Ii as V,Zi as a,ei as b,tt as c,$i as l,Qi as p,it as t}; -//# sourceMappingURL=Example-6146f1ae.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/tests/test_dlpack.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/tests/test_dlpack.py deleted file mode 100644 index 49249bc6a8b48383d0a318f7a6a45403ad1b095f..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/tests/test_dlpack.py +++ /dev/null @@ -1,124 +0,0 @@ -import sys -import pytest - -import numpy as np -from numpy.testing import assert_array_equal, IS_PYPY - - -class TestDLPack: - @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") - def test_dunder_dlpack_refcount(self): - x = np.arange(5) - y = x.__dlpack__() - assert sys.getrefcount(x) == 3 - del y - assert sys.getrefcount(x) == 2 - - def test_dunder_dlpack_stream(self): - x = np.arange(5) - x.__dlpack__(stream=None) - - with pytest.raises(RuntimeError): - x.__dlpack__(stream=1) - - def test_strides_not_multiple_of_itemsize(self): - dt = np.dtype([('int', np.int32), ('char', np.int8)]) - y = np.zeros((5,), dtype=dt) - z = y['int'] - - with pytest.raises(BufferError): - np.from_dlpack(z) - - @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") - def test_from_dlpack_refcount(self): - x = np.arange(5) - y = np.from_dlpack(x) - assert sys.getrefcount(x) == 3 - del y - assert sys.getrefcount(x) == 2 - - @pytest.mark.parametrize("dtype", [ - np.bool_, - np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64, - np.float16, np.float32, np.float64, - np.complex64, np.complex128 - ]) - def test_dtype_passthrough(self, dtype): - x = np.arange(5).astype(dtype) - y = np.from_dlpack(x) - - assert y.dtype == x.dtype - assert_array_equal(x, y) - - def test_invalid_dtype(self): - x = np.asarray(np.datetime64('2021-05-27')) - - with pytest.raises(BufferError): - np.from_dlpack(x) - - def test_invalid_byte_swapping(self): - dt = np.dtype('=i8').newbyteorder() - x = np.arange(5, dtype=dt) - - with pytest.raises(BufferError): - np.from_dlpack(x) - - def test_non_contiguous(self): - x = np.arange(25).reshape((5, 5)) - - y1 = x[0] - assert_array_equal(y1, np.from_dlpack(y1)) - - y2 = x[:, 0] - assert_array_equal(y2, np.from_dlpack(y2)) - - y3 = x[1, :] - assert_array_equal(y3, np.from_dlpack(y3)) - - y4 = x[1] - assert_array_equal(y4, np.from_dlpack(y4)) - - y5 = np.diagonal(x).copy() - assert_array_equal(y5, np.from_dlpack(y5)) - - @pytest.mark.parametrize("ndim", range(33)) - def test_higher_dims(self, ndim): - shape = (1,) * ndim - x = np.zeros(shape, dtype=np.float64) - - assert shape == np.from_dlpack(x).shape - - def test_dlpack_device(self): - x = np.arange(5) - assert x.__dlpack_device__() == (1, 0) - y = np.from_dlpack(x) - assert y.__dlpack_device__() == (1, 0) - z = y[::2] - assert z.__dlpack_device__() == (1, 0) - - def dlpack_deleter_exception(self): - x = np.arange(5) - _ = x.__dlpack__() - raise RuntimeError - - def test_dlpack_destructor_exception(self): - with pytest.raises(RuntimeError): - self.dlpack_deleter_exception() - - def test_readonly(self): - x = np.arange(5) - x.flags.writeable = False - with pytest.raises(BufferError): - x.__dlpack__() - - def test_ndim0(self): - x = np.array(1.0) - y = np.from_dlpack(x) - assert_array_equal(x, y) - - def test_size1dims_arrays(self): - x = np.ndarray(dtype='f8', shape=(10, 5, 1), strides=(8, 80, 4), - buffer=np.ones(1000, dtype=np.uint8), order='F') - y = np.from_dlpack(x) - assert_array_equal(x, y) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/doc/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/doc/__init__.py deleted file mode 100644 index 8a944fecd865487e489ecefb90700f5eed38cd44..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/doc/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -import os - -ref_dir = os.path.join(os.path.dirname(__file__)) - -__all__ = sorted(f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and - not f.startswith('__')) - -for f in __all__: - __import__(__name__ + '.' + f) - -del f, ref_dir - -__doc__ = """\ -Topical documentation -===================== - -The following topics are available: -%s - -You can view them by - ->>> help(np.doc.TOPIC) #doctest: +SKIP - -""" % '\n- '.join([''] + __all__) - -__all__.extend(['__doc__']) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/test_datetimelike.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/test_datetimelike.py deleted file mode 100644 index 96aab94b24ddd6716a11b684a569f3cdfaf5a5e8..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/test_datetimelike.py +++ /dev/null @@ -1,1335 +0,0 @@ -from __future__ import annotations - -import re -import warnings - -import numpy as np -import pytest - -from pandas._libs import ( - NaT, - OutOfBoundsDatetime, - Timestamp, -) - -import pandas as pd -from pandas import ( - DatetimeIndex, - Period, - PeriodIndex, - TimedeltaIndex, -) -import pandas._testing as tm -from pandas.core.arrays import ( - DatetimeArray, - NumpyExtensionArray, - PeriodArray, - TimedeltaArray, -) -from pandas.core.arrays.datetimes import _sequence_to_dt64ns -from pandas.core.arrays.timedeltas import sequence_to_td64ns - - -# TODO: more freq variants -@pytest.fixture(params=["D", "B", "W", "M", "Q", "Y"]) -def freqstr(request): - """Fixture returning parametrized frequency in string format.""" - return request.param - - -@pytest.fixture -def period_index(freqstr): - """ - A fixture to provide PeriodIndex objects with different frequencies. - - Most PeriodArray behavior is already tested in PeriodIndex tests, - so here we just test that the PeriodArray behavior matches - the PeriodIndex behavior. - """ - # TODO: non-monotone indexes; NaTs, different start dates - with warnings.catch_warnings(): - # suppress deprecation of Period[B] - warnings.filterwarnings( - "ignore", message="Period with BDay freq", category=FutureWarning - ) - pi = pd.period_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr) - return pi - - -@pytest.fixture -def datetime_index(freqstr): - """ - A fixture to provide DatetimeIndex objects with different frequencies. - - Most DatetimeArray behavior is already tested in DatetimeIndex tests, - so here we just test that the DatetimeArray behavior matches - the DatetimeIndex behavior. - """ - # TODO: non-monotone indexes; NaTs, different start dates, timezones - dti = pd.date_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr) - return dti - - -@pytest.fixture -def timedelta_index(): - """ - A fixture to provide TimedeltaIndex objects with different frequencies. - Most TimedeltaArray behavior is already tested in TimedeltaIndex tests, - so here we just test that the TimedeltaArray behavior matches - the TimedeltaIndex behavior. - """ - # TODO: flesh this out - return TimedeltaIndex(["1 Day", "3 Hours", "NaT"]) - - -class SharedTests: - index_cls: type[DatetimeIndex | PeriodIndex | TimedeltaIndex] - - @pytest.fixture - def arr1d(self): - """Fixture returning DatetimeArray with daily frequency.""" - data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 - arr = self.array_cls(data, freq="D") - return arr - - def test_compare_len1_raises(self, arr1d): - # make sure we raise when comparing with different lengths, specific - # to the case where one has length-1, which numpy would broadcast - arr = arr1d - idx = self.index_cls(arr) - - with pytest.raises(ValueError, match="Lengths must match"): - arr == arr[:1] - - # test the index classes while we're at it, GH#23078 - with pytest.raises(ValueError, match="Lengths must match"): - idx <= idx[[0]] - - @pytest.mark.parametrize( - "result", - [ - pd.date_range("2020", periods=3), - pd.date_range("2020", periods=3, tz="UTC"), - pd.timedelta_range("0 days", periods=3), - pd.period_range("2020Q1", periods=3, freq="Q"), - ], - ) - def test_compare_with_Categorical(self, result): - expected = pd.Categorical(result) - assert all(result == expected) - assert not any(result != expected) - - @pytest.mark.parametrize("reverse", [True, False]) - @pytest.mark.parametrize("as_index", [True, False]) - def test_compare_categorical_dtype(self, arr1d, as_index, reverse, ordered): - other = pd.Categorical(arr1d, ordered=ordered) - if as_index: - other = pd.CategoricalIndex(other) - - left, right = arr1d, other - if reverse: - left, right = right, left - - ones = np.ones(arr1d.shape, dtype=bool) - zeros = ~ones - - result = left == right - tm.assert_numpy_array_equal(result, ones) - - result = left != right - tm.assert_numpy_array_equal(result, zeros) - - if not reverse and not as_index: - # Otherwise Categorical raises TypeError bc it is not ordered - # TODO: we should probably get the same behavior regardless? - result = left < right - tm.assert_numpy_array_equal(result, zeros) - - result = left <= right - tm.assert_numpy_array_equal(result, ones) - - result = left > right - tm.assert_numpy_array_equal(result, zeros) - - result = left >= right - tm.assert_numpy_array_equal(result, ones) - - def test_take(self): - data = np.arange(100, dtype="i8") * 24 * 3600 * 10**9 - np.random.default_rng(2).shuffle(data) - - if self.array_cls is PeriodArray: - arr = PeriodArray(data, dtype="period[D]") - else: - arr = self.array_cls(data) - idx = self.index_cls._simple_new(arr) - - takers = [1, 4, 94] - result = arr.take(takers) - expected = idx.take(takers) - - tm.assert_index_equal(self.index_cls(result), expected) - - takers = np.array([1, 4, 94]) - result = arr.take(takers) - expected = idx.take(takers) - - tm.assert_index_equal(self.index_cls(result), expected) - - @pytest.mark.parametrize("fill_value", [2, 2.0, Timestamp(2021, 1, 1, 12).time]) - def test_take_fill_raises(self, fill_value, arr1d): - msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" - with pytest.raises(TypeError, match=msg): - arr1d.take([0, 1], allow_fill=True, fill_value=fill_value) - - def test_take_fill(self, arr1d): - np.arange(10, dtype="i8") * 24 * 3600 * 10**9 - - arr = arr1d # self.array_cls(data, freq="D") - - result = arr.take([-1, 1], allow_fill=True, fill_value=None) - assert result[0] is NaT - - result = arr.take([-1, 1], allow_fill=True, fill_value=np.nan) - assert result[0] is NaT - - result = arr.take([-1, 1], allow_fill=True, fill_value=NaT) - assert result[0] is NaT - - @pytest.mark.filterwarnings( - "ignore:Period with BDay freq is deprecated:FutureWarning" - ) - def test_take_fill_str(self, arr1d): - # Cast str fill_value matching other fill_value-taking methods - result = arr1d.take([-1, 1], allow_fill=True, fill_value=str(arr1d[-1])) - expected = arr1d[[-1, 1]] - tm.assert_equal(result, expected) - - msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" - with pytest.raises(TypeError, match=msg): - arr1d.take([-1, 1], allow_fill=True, fill_value="foo") - - def test_concat_same_type(self, arr1d): - arr = arr1d - idx = self.index_cls(arr) - idx = idx.insert(0, NaT) - arr = self.array_cls(idx) - - result = arr._concat_same_type([arr[:-1], arr[1:], arr]) - arr2 = arr.astype(object) - expected = self.index_cls(np.concatenate([arr2[:-1], arr2[1:], arr2]), None) - - tm.assert_index_equal(self.index_cls(result), expected) - - def test_unbox_scalar(self, arr1d): - result = arr1d._unbox_scalar(arr1d[0]) - expected = arr1d._ndarray.dtype.type - assert isinstance(result, expected) - - result = arr1d._unbox_scalar(NaT) - assert isinstance(result, expected) - - msg = f"'value' should be a {self.scalar_type.__name__}." - with pytest.raises(ValueError, match=msg): - arr1d._unbox_scalar("foo") - - def test_check_compatible_with(self, arr1d): - arr1d._check_compatible_with(arr1d[0]) - arr1d._check_compatible_with(arr1d[:1]) - arr1d._check_compatible_with(NaT) - - def test_scalar_from_string(self, arr1d): - result = arr1d._scalar_from_string(str(arr1d[0])) - assert result == arr1d[0] - - def test_reduce_invalid(self, arr1d): - msg = "does not support reduction 'not a method'" - with pytest.raises(TypeError, match=msg): - arr1d._reduce("not a method") - - @pytest.mark.parametrize("method", ["pad", "backfill"]) - def test_fillna_method_doesnt_change_orig(self, method): - data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 - if self.array_cls is PeriodArray: - arr = self.array_cls(data, dtype="period[D]") - else: - arr = self.array_cls(data) - arr[4] = NaT - - fill_value = arr[3] if method == "pad" else arr[5] - - result = arr._pad_or_backfill(method=method) - assert result[4] == fill_value - - # check that the original was not changed - assert arr[4] is NaT - - def test_searchsorted(self): - data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 - if self.array_cls is PeriodArray: - arr = self.array_cls(data, dtype="period[D]") - else: - arr = self.array_cls(data) - - # scalar - result = arr.searchsorted(arr[1]) - assert result == 1 - - result = arr.searchsorted(arr[2], side="right") - assert result == 3 - - # own-type - result = arr.searchsorted(arr[1:3]) - expected = np.array([1, 2], dtype=np.intp) - tm.assert_numpy_array_equal(result, expected) - - result = arr.searchsorted(arr[1:3], side="right") - expected = np.array([2, 3], dtype=np.intp) - tm.assert_numpy_array_equal(result, expected) - - # GH#29884 match numpy convention on whether NaT goes - # at the end or the beginning - result = arr.searchsorted(NaT) - assert result == 10 - - @pytest.mark.parametrize("box", [None, "index", "series"]) - def test_searchsorted_castable_strings(self, arr1d, box, string_storage): - arr = arr1d - if box is None: - pass - elif box == "index": - # Test the equivalent Index.searchsorted method while we're here - arr = self.index_cls(arr) - else: - # Test the equivalent Series.searchsorted method while we're here - arr = pd.Series(arr) - - # scalar - result = arr.searchsorted(str(arr[1])) - assert result == 1 - - result = arr.searchsorted(str(arr[2]), side="right") - assert result == 3 - - result = arr.searchsorted([str(x) for x in arr[1:3]]) - expected = np.array([1, 2], dtype=np.intp) - tm.assert_numpy_array_equal(result, expected) - - with pytest.raises( - TypeError, - match=re.escape( - f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', " - "or array of those. Got 'str' instead." - ), - ): - arr.searchsorted("foo") - - if string_storage == "python": - arr_type = "StringArray" - elif string_storage == "pyarrow": - arr_type = "ArrowStringArray" - else: - arr_type = "ArrowStringArrayNumpySemantics" - - with pd.option_context("string_storage", string_storage): - with pytest.raises( - TypeError, - match=re.escape( - f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', " - f"or array of those. Got '{arr_type}' instead." - ), - ): - arr.searchsorted([str(arr[1]), "baz"]) - - def test_getitem_near_implementation_bounds(self): - # We only check tz-naive for DTA bc the bounds are slightly different - # for other tzs - i8vals = np.asarray([NaT._value + n for n in range(1, 5)], dtype="i8") - if self.array_cls is PeriodArray: - arr = self.array_cls(i8vals, dtype="period[ns]") - else: - arr = self.array_cls(i8vals, freq="ns") - arr[0] # should not raise OutOfBoundsDatetime - - index = pd.Index(arr) - index[0] # should not raise OutOfBoundsDatetime - - ser = pd.Series(arr) - ser[0] # should not raise OutOfBoundsDatetime - - def test_getitem_2d(self, arr1d): - # 2d slicing on a 1D array - expected = type(arr1d)(arr1d._ndarray[:, np.newaxis], dtype=arr1d.dtype) - result = arr1d[:, np.newaxis] - tm.assert_equal(result, expected) - - # Lookup on a 2D array - arr2d = expected - expected = type(arr2d)(arr2d._ndarray[:3, 0], dtype=arr2d.dtype) - result = arr2d[:3, 0] - tm.assert_equal(result, expected) - - # Scalar lookup - result = arr2d[-1, 0] - expected = arr1d[-1] - assert result == expected - - def test_iter_2d(self, arr1d): - data2d = arr1d._ndarray[:3, np.newaxis] - arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype) - result = list(arr2d) - assert len(result) == 3 - for x in result: - assert isinstance(x, type(arr1d)) - assert x.ndim == 1 - assert x.dtype == arr1d.dtype - - def test_repr_2d(self, arr1d): - data2d = arr1d._ndarray[:3, np.newaxis] - arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype) - - result = repr(arr2d) - - if isinstance(arr2d, TimedeltaArray): - expected = ( - f"<{type(arr2d).__name__}>\n" - "[\n" - f"['{arr1d[0]._repr_base()}'],\n" - f"['{arr1d[1]._repr_base()}'],\n" - f"['{arr1d[2]._repr_base()}']\n" - "]\n" - f"Shape: (3, 1), dtype: {arr1d.dtype}" - ) - else: - expected = ( - f"<{type(arr2d).__name__}>\n" - "[\n" - f"['{arr1d[0]}'],\n" - f"['{arr1d[1]}'],\n" - f"['{arr1d[2]}']\n" - "]\n" - f"Shape: (3, 1), dtype: {arr1d.dtype}" - ) - - assert result == expected - - def test_setitem(self): - data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 - if self.array_cls is PeriodArray: - arr = self.array_cls(data, dtype="period[D]") - else: - arr = self.array_cls(data, freq="D") - - arr[0] = arr[1] - expected = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 - expected[0] = expected[1] - - tm.assert_numpy_array_equal(arr.asi8, expected) - - arr[:2] = arr[-2:] - expected[:2] = expected[-2:] - tm.assert_numpy_array_equal(arr.asi8, expected) - - @pytest.mark.parametrize( - "box", - [ - pd.Index, - pd.Series, - np.array, - list, - NumpyExtensionArray, - ], - ) - def test_setitem_object_dtype(self, box, arr1d): - expected = arr1d.copy()[::-1] - if expected.dtype.kind in ["m", "M"]: - expected = expected._with_freq(None) - - vals = expected - if box is list: - vals = list(vals) - elif box is np.array: - # if we do np.array(x).astype(object) then dt64 and td64 cast to ints - vals = np.array(vals.astype(object)) - elif box is NumpyExtensionArray: - vals = box(np.asarray(vals, dtype=object)) - else: - vals = box(vals).astype(object) - - arr1d[:] = vals - - tm.assert_equal(arr1d, expected) - - def test_setitem_strs(self, arr1d): - # Check that we parse strs in both scalar and listlike - - # Setting list-like of strs - expected = arr1d.copy() - expected[[0, 1]] = arr1d[-2:] - - result = arr1d.copy() - result[:2] = [str(x) for x in arr1d[-2:]] - tm.assert_equal(result, expected) - - # Same thing but now for just a scalar str - expected = arr1d.copy() - expected[0] = arr1d[-1] - - result = arr1d.copy() - result[0] = str(arr1d[-1]) - tm.assert_equal(result, expected) - - @pytest.mark.parametrize("as_index", [True, False]) - def test_setitem_categorical(self, arr1d, as_index): - expected = arr1d.copy()[::-1] - if not isinstance(expected, PeriodArray): - expected = expected._with_freq(None) - - cat = pd.Categorical(arr1d) - if as_index: - cat = pd.CategoricalIndex(cat) - - arr1d[:] = cat[::-1] - - tm.assert_equal(arr1d, expected) - - def test_setitem_raises(self, arr1d): - arr = arr1d[:10] - val = arr[0] - - with pytest.raises(IndexError, match="index 12 is out of bounds"): - arr[12] = val - - with pytest.raises(TypeError, match="value should be a.* 'object'"): - arr[0] = object() - - msg = "cannot set using a list-like indexer with a different length" - with pytest.raises(ValueError, match=msg): - # GH#36339 - arr[[]] = [arr[1]] - - msg = "cannot set using a slice indexer with a different length than" - with pytest.raises(ValueError, match=msg): - # GH#36339 - arr[1:1] = arr[:3] - - @pytest.mark.parametrize("box", [list, np.array, pd.Index, pd.Series]) - def test_setitem_numeric_raises(self, arr1d, box): - # We dont case e.g. int64 to our own dtype for setitem - - msg = ( - f"value should be a '{arr1d._scalar_type.__name__}', " - "'NaT', or array of those. Got" - ) - with pytest.raises(TypeError, match=msg): - arr1d[:2] = box([0, 1]) - - with pytest.raises(TypeError, match=msg): - arr1d[:2] = box([0.0, 1.0]) - - def test_inplace_arithmetic(self): - # GH#24115 check that iadd and isub are actually in-place - data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 - if self.array_cls is PeriodArray: - arr = self.array_cls(data, dtype="period[D]") - else: - arr = self.array_cls(data, freq="D") - - expected = arr + pd.Timedelta(days=1) - arr += pd.Timedelta(days=1) - tm.assert_equal(arr, expected) - - expected = arr - pd.Timedelta(days=1) - arr -= pd.Timedelta(days=1) - tm.assert_equal(arr, expected) - - def test_shift_fill_int_deprecated(self, arr1d): - # GH#31971, enforced in 2.0 - with pytest.raises(TypeError, match="value should be a"): - arr1d.shift(1, fill_value=1) - - def test_median(self, arr1d): - arr = arr1d - if len(arr) % 2 == 0: - # make it easier to define `expected` - arr = arr[:-1] - - expected = arr[len(arr) // 2] - - result = arr.median() - assert type(result) is type(expected) - assert result == expected - - arr[len(arr) // 2] = NaT - if not isinstance(expected, Period): - expected = arr[len(arr) // 2 - 1 : len(arr) // 2 + 2].mean() - - assert arr.median(skipna=False) is NaT - - result = arr.median() - assert type(result) is type(expected) - assert result == expected - - assert arr[:0].median() is NaT - assert arr[:0].median(skipna=False) is NaT - - # 2d Case - arr2 = arr.reshape(-1, 1) - - result = arr2.median(axis=None) - assert type(result) is type(expected) - assert result == expected - - assert arr2.median(axis=None, skipna=False) is NaT - - result = arr2.median(axis=0) - expected2 = type(arr)._from_sequence([expected], dtype=arr.dtype) - tm.assert_equal(result, expected2) - - result = arr2.median(axis=0, skipna=False) - expected2 = type(arr)._from_sequence([NaT], dtype=arr.dtype) - tm.assert_equal(result, expected2) - - result = arr2.median(axis=1) - tm.assert_equal(result, arr) - - result = arr2.median(axis=1, skipna=False) - tm.assert_equal(result, arr) - - def test_from_integer_array(self): - arr = np.array([1, 2, 3], dtype=np.int64) - expected = self.array_cls(arr, dtype=self.example_dtype) - - data = pd.array(arr, dtype="Int64") - result = self.array_cls(data, dtype=self.example_dtype) - - tm.assert_extension_array_equal(result, expected) - - -class TestDatetimeArray(SharedTests): - index_cls = DatetimeIndex - array_cls = DatetimeArray - scalar_type = Timestamp - example_dtype = "M8[ns]" - - @pytest.fixture - def arr1d(self, tz_naive_fixture, freqstr): - """ - Fixture returning DatetimeArray with parametrized frequency and - timezones - """ - tz = tz_naive_fixture - dti = pd.date_range("2016-01-01 01:01:00", periods=5, freq=freqstr, tz=tz) - dta = dti._data - return dta - - def test_round(self, arr1d): - # GH#24064 - dti = self.index_cls(arr1d) - - result = dti.round(freq="2T") - expected = dti - pd.Timedelta(minutes=1) - expected = expected._with_freq(None) - tm.assert_index_equal(result, expected) - - dta = dti._data - result = dta.round(freq="2T") - expected = expected._data._with_freq(None) - tm.assert_datetime_array_equal(result, expected) - - def test_array_interface(self, datetime_index): - arr = DatetimeArray(datetime_index) - - # default asarray gives the same underlying data (for tz naive) - result = np.asarray(arr) - expected = arr._ndarray - assert result is expected - tm.assert_numpy_array_equal(result, expected) - result = np.array(arr, copy=False) - assert result is expected - tm.assert_numpy_array_equal(result, expected) - - # specifying M8[ns] gives the same result as default - result = np.asarray(arr, dtype="datetime64[ns]") - expected = arr._ndarray - assert result is expected - tm.assert_numpy_array_equal(result, expected) - result = np.array(arr, dtype="datetime64[ns]", copy=False) - assert result is expected - tm.assert_numpy_array_equal(result, expected) - result = np.array(arr, dtype="datetime64[ns]") - assert result is not expected - tm.assert_numpy_array_equal(result, expected) - - # to object dtype - result = np.asarray(arr, dtype=object) - expected = np.array(list(arr), dtype=object) - tm.assert_numpy_array_equal(result, expected) - - # to other dtype always copies - result = np.asarray(arr, dtype="int64") - assert result is not arr.asi8 - assert not np.may_share_memory(arr, result) - expected = arr.asi8.copy() - tm.assert_numpy_array_equal(result, expected) - - # other dtypes handled by numpy - for dtype in ["float64", str]: - result = np.asarray(arr, dtype=dtype) - expected = np.asarray(arr).astype(dtype) - tm.assert_numpy_array_equal(result, expected) - - def test_array_object_dtype(self, arr1d): - # GH#23524 - arr = arr1d - dti = self.index_cls(arr1d) - - expected = np.array(list(dti)) - - result = np.array(arr, dtype=object) - tm.assert_numpy_array_equal(result, expected) - - # also test the DatetimeIndex method while we're at it - result = np.array(dti, dtype=object) - tm.assert_numpy_array_equal(result, expected) - - def test_array_tz(self, arr1d): - # GH#23524 - arr = arr1d - dti = self.index_cls(arr1d) - - expected = dti.asi8.view("M8[ns]") - result = np.array(arr, dtype="M8[ns]") - tm.assert_numpy_array_equal(result, expected) - - result = np.array(arr, dtype="datetime64[ns]") - tm.assert_numpy_array_equal(result, expected) - - # check that we are not making copies when setting copy=False - result = np.array(arr, dtype="M8[ns]", copy=False) - assert result.base is expected.base - assert result.base is not None - result = np.array(arr, dtype="datetime64[ns]", copy=False) - assert result.base is expected.base - assert result.base is not None - - def test_array_i8_dtype(self, arr1d): - arr = arr1d - dti = self.index_cls(arr1d) - - expected = dti.asi8 - result = np.array(arr, dtype="i8") - tm.assert_numpy_array_equal(result, expected) - - result = np.array(arr, dtype=np.int64) - tm.assert_numpy_array_equal(result, expected) - - # check that we are still making copies when setting copy=False - result = np.array(arr, dtype="i8", copy=False) - assert result.base is not expected.base - assert result.base is None - - def test_from_array_keeps_base(self): - # Ensure that DatetimeArray._ndarray.base isn't lost. - arr = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]") - dta = DatetimeArray(arr) - - assert dta._ndarray is arr - dta = DatetimeArray(arr[:0]) - assert dta._ndarray.base is arr - - def test_from_dti(self, arr1d): - arr = arr1d - dti = self.index_cls(arr1d) - assert list(dti) == list(arr) - - # Check that Index.__new__ knows what to do with DatetimeArray - dti2 = pd.Index(arr) - assert isinstance(dti2, DatetimeIndex) - assert list(dti2) == list(arr) - - def test_astype_object(self, arr1d): - arr = arr1d - dti = self.index_cls(arr1d) - - asobj = arr.astype("O") - assert isinstance(asobj, np.ndarray) - assert asobj.dtype == "O" - assert list(asobj) == list(dti) - - @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") - def test_to_period(self, datetime_index, freqstr): - dti = datetime_index - arr = DatetimeArray(dti) - - expected = dti.to_period(freq=freqstr) - result = arr.to_period(freq=freqstr) - assert isinstance(result, PeriodArray) - - tm.assert_equal(result, expected._data) - - def test_to_period_2d(self, arr1d): - arr2d = arr1d.reshape(1, -1) - - warn = None if arr1d.tz is None else UserWarning - with tm.assert_produces_warning(warn): - result = arr2d.to_period("D") - expected = arr1d.to_period("D").reshape(1, -1) - tm.assert_period_array_equal(result, expected) - - @pytest.mark.parametrize("propname", DatetimeArray._bool_ops) - def test_bool_properties(self, arr1d, propname): - # in this case _bool_ops is just `is_leap_year` - dti = self.index_cls(arr1d) - arr = arr1d - assert dti.freq == arr.freq - - result = getattr(arr, propname) - expected = np.array(getattr(dti, propname), dtype=result.dtype) - - tm.assert_numpy_array_equal(result, expected) - - @pytest.mark.parametrize("propname", DatetimeArray._field_ops) - def test_int_properties(self, arr1d, propname): - dti = self.index_cls(arr1d) - arr = arr1d - - result = getattr(arr, propname) - expected = np.array(getattr(dti, propname), dtype=result.dtype) - - tm.assert_numpy_array_equal(result, expected) - - def test_take_fill_valid(self, arr1d, fixed_now_ts): - arr = arr1d - dti = self.index_cls(arr1d) - - now = fixed_now_ts.tz_localize(dti.tz) - result = arr.take([-1, 1], allow_fill=True, fill_value=now) - assert result[0] == now - - msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" - with pytest.raises(TypeError, match=msg): - # fill_value Timedelta invalid - arr.take([-1, 1], allow_fill=True, fill_value=now - now) - - with pytest.raises(TypeError, match=msg): - # fill_value Period invalid - arr.take([-1, 1], allow_fill=True, fill_value=Period("2014Q1")) - - tz = None if dti.tz is not None else "US/Eastern" - now = fixed_now_ts.tz_localize(tz) - msg = "Cannot compare tz-naive and tz-aware datetime-like objects" - with pytest.raises(TypeError, match=msg): - # Timestamp with mismatched tz-awareness - arr.take([-1, 1], allow_fill=True, fill_value=now) - - value = NaT._value - msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" - with pytest.raises(TypeError, match=msg): - # require NaT, not iNaT, as it could be confused with an integer - arr.take([-1, 1], allow_fill=True, fill_value=value) - - value = np.timedelta64("NaT", "ns") - with pytest.raises(TypeError, match=msg): - # require appropriate-dtype if we have a NA value - arr.take([-1, 1], allow_fill=True, fill_value=value) - - if arr.tz is not None: - # GH#37356 - # Assuming here that arr1d fixture does not include Australia/Melbourne - value = fixed_now_ts.tz_localize("Australia/Melbourne") - result = arr.take([-1, 1], allow_fill=True, fill_value=value) - - expected = arr.take( - [-1, 1], - allow_fill=True, - fill_value=value.tz_convert(arr.dtype.tz), - ) - tm.assert_equal(result, expected) - - def test_concat_same_type_invalid(self, arr1d): - # different timezones - arr = arr1d - - if arr.tz is None: - other = arr.tz_localize("UTC") - else: - other = arr.tz_localize(None) - - with pytest.raises(ValueError, match="to_concat must have the same"): - arr._concat_same_type([arr, other]) - - def test_concat_same_type_different_freq(self): - # we *can* concatenate DTI with different freqs. - a = DatetimeArray(pd.date_range("2000", periods=2, freq="D", tz="US/Central")) - b = DatetimeArray(pd.date_range("2000", periods=2, freq="H", tz="US/Central")) - result = DatetimeArray._concat_same_type([a, b]) - expected = DatetimeArray( - pd.to_datetime( - [ - "2000-01-01 00:00:00", - "2000-01-02 00:00:00", - "2000-01-01 00:00:00", - "2000-01-01 01:00:00", - ] - ).tz_localize("US/Central") - ) - - tm.assert_datetime_array_equal(result, expected) - - def test_strftime(self, arr1d): - arr = arr1d - - result = arr.strftime("%Y %b") - expected = np.array([ts.strftime("%Y %b") for ts in arr], dtype=object) - tm.assert_numpy_array_equal(result, expected) - - def test_strftime_nat(self): - # GH 29578 - arr = DatetimeArray(DatetimeIndex(["2019-01-01", NaT])) - - result = arr.strftime("%Y-%m-%d") - expected = np.array(["2019-01-01", np.nan], dtype=object) - tm.assert_numpy_array_equal(result, expected) - - -class TestTimedeltaArray(SharedTests): - index_cls = TimedeltaIndex - array_cls = TimedeltaArray - scalar_type = pd.Timedelta - example_dtype = "m8[ns]" - - def test_from_tdi(self): - tdi = TimedeltaIndex(["1 Day", "3 Hours"]) - arr = TimedeltaArray(tdi) - assert list(arr) == list(tdi) - - # Check that Index.__new__ knows what to do with TimedeltaArray - tdi2 = pd.Index(arr) - assert isinstance(tdi2, TimedeltaIndex) - assert list(tdi2) == list(arr) - - def test_astype_object(self): - tdi = TimedeltaIndex(["1 Day", "3 Hours"]) - arr = TimedeltaArray(tdi) - asobj = arr.astype("O") - assert isinstance(asobj, np.ndarray) - assert asobj.dtype == "O" - assert list(asobj) == list(tdi) - - def test_to_pytimedelta(self, timedelta_index): - tdi = timedelta_index - arr = TimedeltaArray(tdi) - - expected = tdi.to_pytimedelta() - result = arr.to_pytimedelta() - - tm.assert_numpy_array_equal(result, expected) - - def test_total_seconds(self, timedelta_index): - tdi = timedelta_index - arr = TimedeltaArray(tdi) - - expected = tdi.total_seconds() - result = arr.total_seconds() - - tm.assert_numpy_array_equal(result, expected.values) - - @pytest.mark.parametrize("propname", TimedeltaArray._field_ops) - def test_int_properties(self, timedelta_index, propname): - tdi = timedelta_index - arr = TimedeltaArray(tdi) - - result = getattr(arr, propname) - expected = np.array(getattr(tdi, propname), dtype=result.dtype) - - tm.assert_numpy_array_equal(result, expected) - - def test_array_interface(self, timedelta_index): - arr = TimedeltaArray(timedelta_index) - - # default asarray gives the same underlying data - result = np.asarray(arr) - expected = arr._ndarray - assert result is expected - tm.assert_numpy_array_equal(result, expected) - result = np.array(arr, copy=False) - assert result is expected - tm.assert_numpy_array_equal(result, expected) - - # specifying m8[ns] gives the same result as default - result = np.asarray(arr, dtype="timedelta64[ns]") - expected = arr._ndarray - assert result is expected - tm.assert_numpy_array_equal(result, expected) - result = np.array(arr, dtype="timedelta64[ns]", copy=False) - assert result is expected - tm.assert_numpy_array_equal(result, expected) - result = np.array(arr, dtype="timedelta64[ns]") - assert result is not expected - tm.assert_numpy_array_equal(result, expected) - - # to object dtype - result = np.asarray(arr, dtype=object) - expected = np.array(list(arr), dtype=object) - tm.assert_numpy_array_equal(result, expected) - - # to other dtype always copies - result = np.asarray(arr, dtype="int64") - assert result is not arr.asi8 - assert not np.may_share_memory(arr, result) - expected = arr.asi8.copy() - tm.assert_numpy_array_equal(result, expected) - - # other dtypes handled by numpy - for dtype in ["float64", str]: - result = np.asarray(arr, dtype=dtype) - expected = np.asarray(arr).astype(dtype) - tm.assert_numpy_array_equal(result, expected) - - def test_take_fill_valid(self, timedelta_index, fixed_now_ts): - tdi = timedelta_index - arr = TimedeltaArray(tdi) - - td1 = pd.Timedelta(days=1) - result = arr.take([-1, 1], allow_fill=True, fill_value=td1) - assert result[0] == td1 - - value = fixed_now_ts - msg = f"value should be a '{arr._scalar_type.__name__}' or 'NaT'. Got" - with pytest.raises(TypeError, match=msg): - # fill_value Timestamp invalid - arr.take([0, 1], allow_fill=True, fill_value=value) - - value = fixed_now_ts.to_period("D") - with pytest.raises(TypeError, match=msg): - # fill_value Period invalid - arr.take([0, 1], allow_fill=True, fill_value=value) - - value = np.datetime64("NaT", "ns") - with pytest.raises(TypeError, match=msg): - # require appropriate-dtype if we have a NA value - arr.take([-1, 1], allow_fill=True, fill_value=value) - - -@pytest.mark.filterwarnings(r"ignore:Period with BDay freq is deprecated:FutureWarning") -@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") -class TestPeriodArray(SharedTests): - index_cls = PeriodIndex - array_cls = PeriodArray - scalar_type = Period - example_dtype = PeriodIndex([], freq="W").dtype - - @pytest.fixture - def arr1d(self, period_index): - """ - Fixture returning DatetimeArray from parametrized PeriodIndex objects - """ - return period_index._data - - def test_from_pi(self, arr1d): - pi = self.index_cls(arr1d) - arr = arr1d - assert list(arr) == list(pi) - - # Check that Index.__new__ knows what to do with PeriodArray - pi2 = pd.Index(arr) - assert isinstance(pi2, PeriodIndex) - assert list(pi2) == list(arr) - - def test_astype_object(self, arr1d): - pi = self.index_cls(arr1d) - arr = arr1d - asobj = arr.astype("O") - assert isinstance(asobj, np.ndarray) - assert asobj.dtype == "O" - assert list(asobj) == list(pi) - - def test_take_fill_valid(self, arr1d): - arr = arr1d - - value = NaT._value - msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" - with pytest.raises(TypeError, match=msg): - # require NaT, not iNaT, as it could be confused with an integer - arr.take([-1, 1], allow_fill=True, fill_value=value) - - value = np.timedelta64("NaT", "ns") - with pytest.raises(TypeError, match=msg): - # require appropriate-dtype if we have a NA value - arr.take([-1, 1], allow_fill=True, fill_value=value) - - @pytest.mark.parametrize("how", ["S", "E"]) - def test_to_timestamp(self, how, arr1d): - pi = self.index_cls(arr1d) - arr = arr1d - - expected = DatetimeArray(pi.to_timestamp(how=how)) - result = arr.to_timestamp(how=how) - assert isinstance(result, DatetimeArray) - - tm.assert_equal(result, expected) - - def test_to_timestamp_roundtrip_bday(self): - # Case where infer_freq inside would choose "D" instead of "B" - dta = pd.date_range("2021-10-18", periods=3, freq="B")._data - parr = dta.to_period() - result = parr.to_timestamp() - assert result.freq == "B" - tm.assert_extension_array_equal(result, dta) - - dta2 = dta[::2] - parr2 = dta2.to_period() - result2 = parr2.to_timestamp() - assert result2.freq == "2B" - tm.assert_extension_array_equal(result2, dta2) - - parr3 = dta.to_period("2B") - result3 = parr3.to_timestamp() - assert result3.freq == "B" - tm.assert_extension_array_equal(result3, dta) - - def test_to_timestamp_out_of_bounds(self): - # GH#19643 previously overflowed silently - pi = pd.period_range("1500", freq="Y", periods=3) - msg = "Out of bounds nanosecond timestamp: 1500-01-01 00:00:00" - with pytest.raises(OutOfBoundsDatetime, match=msg): - pi.to_timestamp() - - with pytest.raises(OutOfBoundsDatetime, match=msg): - pi._data.to_timestamp() - - @pytest.mark.parametrize("propname", PeriodArray._bool_ops) - def test_bool_properties(self, arr1d, propname): - # in this case _bool_ops is just `is_leap_year` - pi = self.index_cls(arr1d) - arr = arr1d - - result = getattr(arr, propname) - expected = np.array(getattr(pi, propname)) - - tm.assert_numpy_array_equal(result, expected) - - @pytest.mark.parametrize("propname", PeriodArray._field_ops) - def test_int_properties(self, arr1d, propname): - pi = self.index_cls(arr1d) - arr = arr1d - - result = getattr(arr, propname) - expected = np.array(getattr(pi, propname)) - - tm.assert_numpy_array_equal(result, expected) - - def test_array_interface(self, arr1d): - arr = arr1d - - # default asarray gives objects - result = np.asarray(arr) - expected = np.array(list(arr), dtype=object) - tm.assert_numpy_array_equal(result, expected) - - # to object dtype (same as default) - result = np.asarray(arr, dtype=object) - tm.assert_numpy_array_equal(result, expected) - - result = np.asarray(arr, dtype="int64") - tm.assert_numpy_array_equal(result, arr.asi8) - - # to other dtypes - msg = r"float\(\) argument must be a string or a( real)? number, not 'Period'" - with pytest.raises(TypeError, match=msg): - np.asarray(arr, dtype="float64") - - result = np.asarray(arr, dtype="S20") - expected = np.asarray(arr).astype("S20") - tm.assert_numpy_array_equal(result, expected) - - def test_strftime(self, arr1d): - arr = arr1d - - result = arr.strftime("%Y") - expected = np.array([per.strftime("%Y") for per in arr], dtype=object) - tm.assert_numpy_array_equal(result, expected) - - def test_strftime_nat(self): - # GH 29578 - arr = PeriodArray(PeriodIndex(["2019-01-01", NaT], dtype="period[D]")) - - result = arr.strftime("%Y-%m-%d") - expected = np.array(["2019-01-01", np.nan], dtype=object) - tm.assert_numpy_array_equal(result, expected) - - -@pytest.mark.parametrize( - "arr,casting_nats", - [ - ( - TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data, - (NaT, np.timedelta64("NaT", "ns")), - ), - ( - pd.date_range("2000-01-01", periods=3, freq="D")._data, - (NaT, np.datetime64("NaT", "ns")), - ), - (pd.period_range("2000-01-01", periods=3, freq="D")._data, (NaT,)), - ], - ids=lambda x: type(x).__name__, -) -def test_casting_nat_setitem_array(arr, casting_nats): - expected = type(arr)._from_sequence([NaT, arr[1], arr[2]]) - - for nat in casting_nats: - arr = arr.copy() - arr[0] = nat - tm.assert_equal(arr, expected) - - -@pytest.mark.parametrize( - "arr,non_casting_nats", - [ - ( - TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data, - (np.datetime64("NaT", "ns"), NaT._value), - ), - ( - pd.date_range("2000-01-01", periods=3, freq="D")._data, - (np.timedelta64("NaT", "ns"), NaT._value), - ), - ( - pd.period_range("2000-01-01", periods=3, freq="D")._data, - (np.datetime64("NaT", "ns"), np.timedelta64("NaT", "ns"), NaT._value), - ), - ], - ids=lambda x: type(x).__name__, -) -def test_invalid_nat_setitem_array(arr, non_casting_nats): - msg = ( - "value should be a '(Timestamp|Timedelta|Period)', 'NaT', or array of those. " - "Got '(timedelta64|datetime64|int)' instead." - ) - - for nat in non_casting_nats: - with pytest.raises(TypeError, match=msg): - arr[0] = nat - - -@pytest.mark.parametrize( - "arr", - [ - pd.date_range("2000", periods=4).array, - pd.timedelta_range("2000", periods=4).array, - ], -) -def test_to_numpy_extra(arr): - arr[0] = NaT - original = arr.copy() - - result = arr.to_numpy() - assert np.isnan(result[0]) - - result = arr.to_numpy(dtype="int64") - assert result[0] == -9223372036854775808 - - result = arr.to_numpy(dtype="int64", na_value=0) - assert result[0] == 0 - - result = arr.to_numpy(na_value=arr[1].to_numpy()) - assert result[0] == result[1] - - result = arr.to_numpy(na_value=arr[1].to_numpy(copy=False)) - assert result[0] == result[1] - - tm.assert_equal(arr, original) - - -@pytest.mark.parametrize("as_index", [True, False]) -@pytest.mark.parametrize( - "values", - [ - pd.to_datetime(["2020-01-01", "2020-02-01"]), - TimedeltaIndex([1, 2], unit="D"), - PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"), - ], -) -@pytest.mark.parametrize( - "klass", - [ - list, - np.array, - pd.array, - pd.Series, - pd.Index, - pd.Categorical, - pd.CategoricalIndex, - ], -) -def test_searchsorted_datetimelike_with_listlike(values, klass, as_index): - # https://github.com/pandas-dev/pandas/issues/32762 - if not as_index: - values = values._data - - result = values.searchsorted(klass(values)) - expected = np.array([0, 1], dtype=result.dtype) - - tm.assert_numpy_array_equal(result, expected) - - -@pytest.mark.parametrize( - "values", - [ - pd.to_datetime(["2020-01-01", "2020-02-01"]), - TimedeltaIndex([1, 2], unit="D"), - PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"), - ], -) -@pytest.mark.parametrize( - "arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2] -) -def test_searchsorted_datetimelike_with_listlike_invalid_dtype(values, arg): - # https://github.com/pandas-dev/pandas/issues/32762 - msg = "[Unexpected type|Cannot compare]" - with pytest.raises(TypeError, match=msg): - values.searchsorted(arg) - - -@pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series]) -def test_period_index_construction_from_strings(klass): - # https://github.com/pandas-dev/pandas/issues/26109 - strings = ["2020Q1", "2020Q2"] * 2 - data = klass(strings) - result = PeriodIndex(data, freq="Q") - expected = PeriodIndex([Period(s) for s in strings]) - tm.assert_index_equal(result, expected) - - -@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"]) -def test_from_pandas_array(dtype): - # GH#24615 - data = np.array([1, 2, 3], dtype=dtype) - arr = NumpyExtensionArray(data) - - cls = {"M8[ns]": DatetimeArray, "m8[ns]": TimedeltaArray}[dtype] - - result = cls(arr) - expected = cls(data) - tm.assert_extension_array_equal(result, expected) - - result = cls._from_sequence(arr) - expected = cls._from_sequence(data) - tm.assert_extension_array_equal(result, expected) - - func = {"M8[ns]": _sequence_to_dt64ns, "m8[ns]": sequence_to_td64ns}[dtype] - result = func(arr)[0] - expected = func(data)[0] - tm.assert_equal(result, expected) - - func = {"M8[ns]": pd.to_datetime, "m8[ns]": pd.to_timedelta}[dtype] - result = func(arr).array - expected = func(data).array - tm.assert_equal(result, expected) - - # Let's check the Indexes while we're here - idx_cls = {"M8[ns]": DatetimeIndex, "m8[ns]": TimedeltaIndex}[dtype] - result = idx_cls(arr) - expected = idx_cls(data) - tm.assert_index_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_date_range.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_date_range.py deleted file mode 100644 index 2e2e33e2fb3666f3ed00a33978c46fb3d5b3c3f6..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_date_range.py +++ /dev/null @@ -1,1304 +0,0 @@ -""" -test date_range, bdate_range construction from the convenience range functions -""" - -from datetime import ( - datetime, - time, - timedelta, -) - -import numpy as np -import pytest -import pytz -from pytz import timezone - -from pandas._libs.tslibs import timezones -from pandas._libs.tslibs.offsets import ( - BDay, - CDay, - DateOffset, - MonthEnd, - prefix_mapping, -) -from pandas.errors import OutOfBoundsDatetime -import pandas.util._test_decorators as td - -import pandas as pd -from pandas import ( - DataFrame, - DatetimeIndex, - Series, - Timedelta, - Timestamp, - bdate_range, - date_range, - offsets, -) -import pandas._testing as tm -from pandas.core.arrays.datetimes import _generate_range as generate_range - -START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) - - -def _get_expected_range( - begin_to_match, - end_to_match, - both_range, - inclusive_endpoints, -): - """Helper to get expected range from a both inclusive range""" - left_match = begin_to_match == both_range[0] - right_match = end_to_match == both_range[-1] - - if inclusive_endpoints == "left" and right_match: - expected_range = both_range[:-1] - elif inclusive_endpoints == "right" and left_match: - expected_range = both_range[1:] - elif inclusive_endpoints == "neither" and left_match and right_match: - expected_range = both_range[1:-1] - elif inclusive_endpoints == "neither" and right_match: - expected_range = both_range[:-1] - elif inclusive_endpoints == "neither" and left_match: - expected_range = both_range[1:] - elif inclusive_endpoints == "both": - expected_range = both_range[:] - else: - expected_range = both_range[:] - - return expected_range - - -class TestTimestampEquivDateRange: - # Older tests in TestTimeSeries constructed their `stamp` objects - # using `date_range` instead of the `Timestamp` constructor. - # TestTimestampEquivDateRange checks that these are equivalent in the - # pertinent cases. - - def test_date_range_timestamp_equiv(self): - rng = date_range("20090415", "20090519", tz="US/Eastern") - stamp = rng[0] - - ts = Timestamp("20090415", tz="US/Eastern") - assert ts == stamp - - def test_date_range_timestamp_equiv_dateutil(self): - rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern") - stamp = rng[0] - - ts = Timestamp("20090415", tz="dateutil/US/Eastern") - assert ts == stamp - - def test_date_range_timestamp_equiv_explicit_pytz(self): - rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern")) - stamp = rng[0] - - ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern")) - assert ts == stamp - - @td.skip_if_windows - def test_date_range_timestamp_equiv_explicit_dateutil(self): - from pandas._libs.tslibs.timezones import dateutil_gettz as gettz - - rng = date_range("20090415", "20090519", tz=gettz("US/Eastern")) - stamp = rng[0] - - ts = Timestamp("20090415", tz=gettz("US/Eastern")) - assert ts == stamp - - def test_date_range_timestamp_equiv_from_datetime_instance(self): - datetime_instance = datetime(2014, 3, 4) - # build a timestamp with a frequency, since then it supports - # addition/subtraction of integers - timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0] - - ts = Timestamp(datetime_instance) - assert ts == timestamp_instance - - def test_date_range_timestamp_equiv_preserve_frequency(self): - timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0] - ts = Timestamp("2014-03-05") - - assert timestamp_instance == ts - - -class TestDateRanges: - @pytest.mark.parametrize("freq", ["N", "U", "L", "T", "S", "H", "D"]) - def test_date_range_edges(self, freq): - # GH#13672 - td = Timedelta(f"1{freq}") - ts = Timestamp("1970-01-01") - - idx = date_range( - start=ts + td, - end=ts + 4 * td, - freq=freq, - ) - exp = DatetimeIndex( - [ts + n * td for n in range(1, 5)], - freq=freq, - ) - tm.assert_index_equal(idx, exp) - - # start after end - idx = date_range( - start=ts + 4 * td, - end=ts + td, - freq=freq, - ) - exp = DatetimeIndex([], freq=freq) - tm.assert_index_equal(idx, exp) - - # start matches end - idx = date_range( - start=ts + td, - end=ts + td, - freq=freq, - ) - exp = DatetimeIndex([ts + td], freq=freq) - tm.assert_index_equal(idx, exp) - - def test_date_range_near_implementation_bound(self): - # GH#??? - freq = Timedelta(1) - - with pytest.raises(OutOfBoundsDatetime, match="Cannot generate range with"): - date_range(end=Timestamp.min, periods=2, freq=freq) - - def test_date_range_nat(self): - # GH#11587 - msg = "Neither `start` nor `end` can be NaT" - with pytest.raises(ValueError, match=msg): - date_range(start="2016-01-01", end=pd.NaT, freq="D") - with pytest.raises(ValueError, match=msg): - date_range(start=pd.NaT, end="2016-01-01", freq="D") - - def test_date_range_multiplication_overflow(self): - # GH#24255 - # check that overflows in calculating `addend = periods * stride` - # are caught - with tm.assert_produces_warning(None): - # we should _not_ be seeing a overflow RuntimeWarning - dti = date_range(start="1677-09-22", periods=213503, freq="D") - - assert dti[0] == Timestamp("1677-09-22") - assert len(dti) == 213503 - - msg = "Cannot generate range with" - with pytest.raises(OutOfBoundsDatetime, match=msg): - date_range("1969-05-04", periods=200000000, freq="30000D") - - def test_date_range_unsigned_overflow_handling(self): - # GH#24255 - # case where `addend = periods * stride` overflows int64 bounds - # but not uint64 bounds - dti = date_range(start="1677-09-22", end="2262-04-11", freq="D") - - dti2 = date_range(start=dti[0], periods=len(dti), freq="D") - assert dti2.equals(dti) - - dti3 = date_range(end=dti[-1], periods=len(dti), freq="D") - assert dti3.equals(dti) - - def test_date_range_int64_overflow_non_recoverable(self): - # GH#24255 - # case with start later than 1970-01-01, overflow int64 but not uint64 - msg = "Cannot generate range with" - with pytest.raises(OutOfBoundsDatetime, match=msg): - date_range(start="1970-02-01", periods=106752 * 24, freq="H") - - # case with end before 1970-01-01, overflow int64 but not uint64 - with pytest.raises(OutOfBoundsDatetime, match=msg): - date_range(end="1969-11-14", periods=106752 * 24, freq="H") - - @pytest.mark.slow - @pytest.mark.parametrize( - "s_ts, e_ts", [("2262-02-23", "1969-11-14"), ("1970-02-01", "1677-10-22")] - ) - def test_date_range_int64_overflow_stride_endpoint_different_signs( - self, s_ts, e_ts - ): - # cases where stride * periods overflow int64 and stride/endpoint - # have different signs - start = Timestamp(s_ts) - end = Timestamp(e_ts) - - expected = date_range(start=start, end=end, freq="-1H") - assert expected[0] == start - assert expected[-1] == end - - dti = date_range(end=end, periods=len(expected), freq="-1H") - tm.assert_index_equal(dti, expected) - - def test_date_range_out_of_bounds(self): - # GH#14187 - msg = "Cannot generate range" - with pytest.raises(OutOfBoundsDatetime, match=msg): - date_range("2016-01-01", periods=100000, freq="D") - with pytest.raises(OutOfBoundsDatetime, match=msg): - date_range(end="1763-10-12", periods=100000, freq="D") - - def test_date_range_gen_error(self): - rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5min") - assert len(rng) == 4 - - @pytest.mark.parametrize("freq", ["AS", "YS"]) - def test_begin_year_alias(self, freq): - # see gh-9313 - rng = date_range("1/1/2013", "7/1/2017", freq=freq) - exp = DatetimeIndex( - ["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"], - freq=freq, - ) - tm.assert_index_equal(rng, exp) - - @pytest.mark.parametrize("freq", ["A", "Y"]) - def test_end_year_alias(self, freq): - # see gh-9313 - rng = date_range("1/1/2013", "7/1/2017", freq=freq) - exp = DatetimeIndex( - ["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq - ) - tm.assert_index_equal(rng, exp) - - @pytest.mark.parametrize("freq", ["BA", "BY"]) - def test_business_end_year_alias(self, freq): - # see gh-9313 - rng = date_range("1/1/2013", "7/1/2017", freq=freq) - exp = DatetimeIndex( - ["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq - ) - tm.assert_index_equal(rng, exp) - - def test_date_range_negative_freq(self): - # GH 11018 - rng = date_range("2011-12-31", freq="-2A", periods=3) - exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A") - tm.assert_index_equal(rng, exp) - assert rng.freq == "-2A" - - rng = date_range("2011-01-31", freq="-2M", periods=3) - exp = DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M") - tm.assert_index_equal(rng, exp) - assert rng.freq == "-2M" - - def test_date_range_bms_bug(self): - # #1645 - rng = date_range("1/1/2000", periods=10, freq="BMS") - - ex_first = Timestamp("2000-01-03") - assert rng[0] == ex_first - - def test_date_range_normalize(self): - snap = datetime.today() - n = 50 - - rng = date_range(snap, periods=n, normalize=False, freq="2D") - - offset = timedelta(2) - values = DatetimeIndex([snap + i * offset for i in range(n)], freq=offset) - - tm.assert_index_equal(rng, values) - - rng = date_range("1/1/2000 08:15", periods=n, normalize=False, freq="B") - the_time = time(8, 15) - for val in rng: - assert val.time() == the_time - - def test_date_range_fy5252(self): - dr = date_range( - start="2013-01-01", - periods=2, - freq=offsets.FY5253(startingMonth=1, weekday=3, variation="nearest"), - ) - assert dr[0] == Timestamp("2013-01-31") - assert dr[1] == Timestamp("2014-01-30") - - def test_date_range_ambiguous_arguments(self): - # #2538 - start = datetime(2011, 1, 1, 5, 3, 40) - end = datetime(2011, 1, 1, 8, 9, 40) - - msg = ( - "Of the four parameters: start, end, periods, and " - "freq, exactly three must be specified" - ) - with pytest.raises(ValueError, match=msg): - date_range(start, end, periods=10, freq="s") - - def test_date_range_convenience_periods(self): - # GH 20808 - result = date_range("2018-04-24", "2018-04-27", periods=3) - expected = DatetimeIndex( - ["2018-04-24 00:00:00", "2018-04-25 12:00:00", "2018-04-27 00:00:00"], - freq=None, - ) - - tm.assert_index_equal(result, expected) - - # Test if spacing remains linear if tz changes to dst in range - result = date_range( - "2018-04-01 01:00:00", - "2018-04-01 04:00:00", - tz="Australia/Sydney", - periods=3, - ) - expected = DatetimeIndex( - [ - Timestamp("2018-04-01 01:00:00+1100", tz="Australia/Sydney"), - Timestamp("2018-04-01 02:00:00+1000", tz="Australia/Sydney"), - Timestamp("2018-04-01 04:00:00+1000", tz="Australia/Sydney"), - ] - ) - tm.assert_index_equal(result, expected) - - def test_date_range_index_comparison(self): - rng = date_range("2011-01-01", periods=3, tz="US/Eastern") - df = Series(rng).to_frame() - arr = np.array([rng.to_list()]).T - arr2 = np.array([rng]).T - - with pytest.raises(ValueError, match="Unable to coerce to Series"): - rng == df - - with pytest.raises(ValueError, match="Unable to coerce to Series"): - df == rng - - expected = DataFrame([True, True, True]) - - results = df == arr2 - tm.assert_frame_equal(results, expected) - - expected = Series([True, True, True], name=0) - - results = df[0] == arr2[:, 0] - tm.assert_series_equal(results, expected) - - expected = np.array( - [[True, False, False], [False, True, False], [False, False, True]] - ) - results = rng == arr - tm.assert_numpy_array_equal(results, expected) - - @pytest.mark.parametrize( - "start,end,result_tz", - [ - ["20180101", "20180103", "US/Eastern"], - [datetime(2018, 1, 1), datetime(2018, 1, 3), "US/Eastern"], - [Timestamp("20180101"), Timestamp("20180103"), "US/Eastern"], - [ - Timestamp("20180101", tz="US/Eastern"), - Timestamp("20180103", tz="US/Eastern"), - "US/Eastern", - ], - [ - Timestamp("20180101", tz="US/Eastern"), - Timestamp("20180103", tz="US/Eastern"), - None, - ], - ], - ) - def test_date_range_linspacing_tz(self, start, end, result_tz): - # GH 20983 - result = date_range(start, end, periods=3, tz=result_tz) - expected = date_range("20180101", periods=3, freq="D", tz="US/Eastern") - tm.assert_index_equal(result, expected) - - def test_date_range_businesshour(self): - idx = DatetimeIndex( - [ - "2014-07-04 09:00", - "2014-07-04 10:00", - "2014-07-04 11:00", - "2014-07-04 12:00", - "2014-07-04 13:00", - "2014-07-04 14:00", - "2014-07-04 15:00", - "2014-07-04 16:00", - ], - freq="BH", - ) - rng = date_range("2014-07-04 09:00", "2014-07-04 16:00", freq="BH") - tm.assert_index_equal(idx, rng) - - idx = DatetimeIndex(["2014-07-04 16:00", "2014-07-07 09:00"], freq="BH") - rng = date_range("2014-07-04 16:00", "2014-07-07 09:00", freq="BH") - tm.assert_index_equal(idx, rng) - - idx = DatetimeIndex( - [ - "2014-07-04 09:00", - "2014-07-04 10:00", - "2014-07-04 11:00", - "2014-07-04 12:00", - "2014-07-04 13:00", - "2014-07-04 14:00", - "2014-07-04 15:00", - "2014-07-04 16:00", - "2014-07-07 09:00", - "2014-07-07 10:00", - "2014-07-07 11:00", - "2014-07-07 12:00", - "2014-07-07 13:00", - "2014-07-07 14:00", - "2014-07-07 15:00", - "2014-07-07 16:00", - "2014-07-08 09:00", - "2014-07-08 10:00", - "2014-07-08 11:00", - "2014-07-08 12:00", - "2014-07-08 13:00", - "2014-07-08 14:00", - "2014-07-08 15:00", - "2014-07-08 16:00", - ], - freq="BH", - ) - rng = date_range("2014-07-04 09:00", "2014-07-08 16:00", freq="BH") - tm.assert_index_equal(idx, rng) - - def test_date_range_timedelta(self): - start = "2020-01-01" - end = "2020-01-11" - rng1 = date_range(start, end, freq="3D") - rng2 = date_range(start, end, freq=timedelta(days=3)) - tm.assert_index_equal(rng1, rng2) - - def test_range_misspecified(self): - # GH #1095 - msg = ( - "Of the four parameters: start, end, periods, and " - "freq, exactly three must be specified" - ) - - with pytest.raises(ValueError, match=msg): - date_range(start="1/1/2000") - - with pytest.raises(ValueError, match=msg): - date_range(end="1/1/2000") - - with pytest.raises(ValueError, match=msg): - date_range(periods=10) - - with pytest.raises(ValueError, match=msg): - date_range(start="1/1/2000", freq="H") - - with pytest.raises(ValueError, match=msg): - date_range(end="1/1/2000", freq="H") - - with pytest.raises(ValueError, match=msg): - date_range(periods=10, freq="H") - - with pytest.raises(ValueError, match=msg): - date_range() - - def test_compat_replace(self): - # https://github.com/statsmodels/statsmodels/issues/3349 - # replace should take ints/longs for compat - result = date_range(Timestamp("1960-04-01 00:00:00"), periods=76, freq="QS-JAN") - assert len(result) == 76 - - def test_catch_infinite_loop(self): - offset = offsets.DateOffset(minute=5) - # blow up, don't loop forever - msg = "Offset did not increment date" - with pytest.raises(ValueError, match=msg): - date_range(datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset) - - @pytest.mark.parametrize("periods", (1, 2)) - def test_wom_len(self, periods): - # https://github.com/pandas-dev/pandas/issues/20517 - res = date_range(start="20110101", periods=periods, freq="WOM-1MON") - assert len(res) == periods - - def test_construct_over_dst(self): - # GH 20854 - pre_dst = Timestamp("2010-11-07 01:00:00").tz_localize( - "US/Pacific", ambiguous=True - ) - pst_dst = Timestamp("2010-11-07 01:00:00").tz_localize( - "US/Pacific", ambiguous=False - ) - expect_data = [ - Timestamp("2010-11-07 00:00:00", tz="US/Pacific"), - pre_dst, - pst_dst, - ] - expected = DatetimeIndex(expect_data, freq="H") - result = date_range(start="2010-11-7", periods=3, freq="H", tz="US/Pacific") - tm.assert_index_equal(result, expected) - - def test_construct_with_different_start_end_string_format(self): - # GH 12064 - result = date_range( - "2013-01-01 00:00:00+09:00", "2013/01/01 02:00:00+09:00", freq="H" - ) - expected = DatetimeIndex( - [ - Timestamp("2013-01-01 00:00:00+09:00"), - Timestamp("2013-01-01 01:00:00+09:00"), - Timestamp("2013-01-01 02:00:00+09:00"), - ], - freq="H", - ) - tm.assert_index_equal(result, expected) - - def test_error_with_zero_monthends(self): - msg = r"Offset <0 \* MonthEnds> did not increment date" - with pytest.raises(ValueError, match=msg): - date_range("1/1/2000", "1/1/2001", freq=MonthEnd(0)) - - def test_range_bug(self): - # GH #770 - offset = DateOffset(months=3) - result = date_range("2011-1-1", "2012-1-31", freq=offset) - - start = datetime(2011, 1, 1) - expected = DatetimeIndex([start + i * offset for i in range(5)], freq=offset) - tm.assert_index_equal(result, expected) - - def test_range_tz_pytz(self): - # see gh-2906 - tz = timezone("US/Eastern") - start = tz.localize(datetime(2011, 1, 1)) - end = tz.localize(datetime(2011, 1, 3)) - - dr = date_range(start=start, periods=3) - assert dr.tz.zone == tz.zone - assert dr[0] == start - assert dr[2] == end - - dr = date_range(end=end, periods=3) - assert dr.tz.zone == tz.zone - assert dr[0] == start - assert dr[2] == end - - dr = date_range(start=start, end=end) - assert dr.tz.zone == tz.zone - assert dr[0] == start - assert dr[2] == end - - @pytest.mark.parametrize( - "start, end", - [ - [ - Timestamp(datetime(2014, 3, 6), tz="US/Eastern"), - Timestamp(datetime(2014, 3, 12), tz="US/Eastern"), - ], - [ - Timestamp(datetime(2013, 11, 1), tz="US/Eastern"), - Timestamp(datetime(2013, 11, 6), tz="US/Eastern"), - ], - ], - ) - def test_range_tz_dst_straddle_pytz(self, start, end): - dr = date_range(start, end, freq="D") - assert dr[0] == start - assert dr[-1] == end - assert np.all(dr.hour == 0) - - dr = date_range(start, end, freq="D", tz="US/Eastern") - assert dr[0] == start - assert dr[-1] == end - assert np.all(dr.hour == 0) - - dr = date_range( - start.replace(tzinfo=None), - end.replace(tzinfo=None), - freq="D", - tz="US/Eastern", - ) - assert dr[0] == start - assert dr[-1] == end - assert np.all(dr.hour == 0) - - def test_range_tz_dateutil(self): - # see gh-2906 - - # Use maybe_get_tz to fix filename in tz under dateutil. - from pandas._libs.tslibs.timezones import maybe_get_tz - - tz = lambda x: maybe_get_tz("dateutil/" + x) - - start = datetime(2011, 1, 1, tzinfo=tz("US/Eastern")) - end = datetime(2011, 1, 3, tzinfo=tz("US/Eastern")) - - dr = date_range(start=start, periods=3) - assert dr.tz == tz("US/Eastern") - assert dr[0] == start - assert dr[2] == end - - dr = date_range(end=end, periods=3) - assert dr.tz == tz("US/Eastern") - assert dr[0] == start - assert dr[2] == end - - dr = date_range(start=start, end=end) - assert dr.tz == tz("US/Eastern") - assert dr[0] == start - assert dr[2] == end - - @pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"]) - def test_range_closed(self, freq, inclusive_endpoints_fixture): - begin = datetime(2011, 1, 1) - end = datetime(2014, 1, 1) - - result_range = date_range( - begin, end, inclusive=inclusive_endpoints_fixture, freq=freq - ) - both_range = date_range(begin, end, inclusive="both", freq=freq) - expected_range = _get_expected_range( - begin, end, both_range, inclusive_endpoints_fixture - ) - - tm.assert_index_equal(expected_range, result_range) - - @pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"]) - def test_range_closed_with_tz_aware_start_end( - self, freq, inclusive_endpoints_fixture - ): - # GH12409, GH12684 - begin = Timestamp("2011/1/1", tz="US/Eastern") - end = Timestamp("2014/1/1", tz="US/Eastern") - - result_range = date_range( - begin, end, inclusive=inclusive_endpoints_fixture, freq=freq - ) - both_range = date_range(begin, end, inclusive="both", freq=freq) - expected_range = _get_expected_range( - begin, - end, - both_range, - inclusive_endpoints_fixture, - ) - - tm.assert_index_equal(expected_range, result_range) - - @pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"]) - def test_range_with_tz_closed_with_tz_aware_start_end( - self, freq, inclusive_endpoints_fixture - ): - begin = Timestamp("2011/1/1") - end = Timestamp("2014/1/1") - begintz = Timestamp("2011/1/1", tz="US/Eastern") - endtz = Timestamp("2014/1/1", tz="US/Eastern") - - result_range = date_range( - begin, - end, - inclusive=inclusive_endpoints_fixture, - freq=freq, - tz="US/Eastern", - ) - both_range = date_range( - begin, end, inclusive="both", freq=freq, tz="US/Eastern" - ) - expected_range = _get_expected_range( - begintz, - endtz, - both_range, - inclusive_endpoints_fixture, - ) - - tm.assert_index_equal(expected_range, result_range) - - def test_range_closed_boundary(self, inclusive_endpoints_fixture): - # GH#11804 - right_boundary = date_range( - "2015-09-12", - "2015-12-01", - freq="QS-MAR", - inclusive=inclusive_endpoints_fixture, - ) - left_boundary = date_range( - "2015-09-01", - "2015-09-12", - freq="QS-MAR", - inclusive=inclusive_endpoints_fixture, - ) - both_boundary = date_range( - "2015-09-01", - "2015-12-01", - freq="QS-MAR", - inclusive=inclusive_endpoints_fixture, - ) - neither_boundary = date_range( - "2015-09-11", - "2015-09-12", - freq="QS-MAR", - inclusive=inclusive_endpoints_fixture, - ) - - expected_right = both_boundary - expected_left = both_boundary - expected_both = both_boundary - - if inclusive_endpoints_fixture == "right": - expected_left = both_boundary[1:] - elif inclusive_endpoints_fixture == "left": - expected_right = both_boundary[:-1] - elif inclusive_endpoints_fixture == "both": - expected_right = both_boundary[1:] - expected_left = both_boundary[:-1] - - expected_neither = both_boundary[1:-1] - - tm.assert_index_equal(right_boundary, expected_right) - tm.assert_index_equal(left_boundary, expected_left) - tm.assert_index_equal(both_boundary, expected_both) - tm.assert_index_equal(neither_boundary, expected_neither) - - def test_years_only(self): - # GH 6961 - dr = date_range("2014", "2015", freq="M") - assert dr[0] == datetime(2014, 1, 31) - assert dr[-1] == datetime(2014, 12, 31) - - def test_freq_divides_end_in_nanos(self): - # GH 10885 - result_1 = date_range("2005-01-12 10:00", "2005-01-12 16:00", freq="345min") - result_2 = date_range("2005-01-13 10:00", "2005-01-13 16:00", freq="345min") - expected_1 = DatetimeIndex( - ["2005-01-12 10:00:00", "2005-01-12 15:45:00"], - dtype="datetime64[ns]", - freq="345T", - tz=None, - ) - expected_2 = DatetimeIndex( - ["2005-01-13 10:00:00", "2005-01-13 15:45:00"], - dtype="datetime64[ns]", - freq="345T", - tz=None, - ) - tm.assert_index_equal(result_1, expected_1) - tm.assert_index_equal(result_2, expected_2) - - def test_cached_range_bug(self): - rng = date_range("2010-09-01 05:00:00", periods=50, freq=DateOffset(hours=6)) - assert len(rng) == 50 - assert rng[0] == datetime(2010, 9, 1, 5) - - def test_timezone_comparison_bug(self): - # smoke test - start = Timestamp("20130220 10:00", tz="US/Eastern") - result = date_range(start, periods=2, tz="US/Eastern") - assert len(result) == 2 - - def test_timezone_comparison_assert(self): - start = Timestamp("20130220 10:00", tz="US/Eastern") - msg = "Inferred time zone not equal to passed time zone" - with pytest.raises(AssertionError, match=msg): - date_range(start, periods=2, tz="Europe/Berlin") - - def test_negative_non_tick_frequency_descending_dates(self, tz_aware_fixture): - # GH 23270 - tz = tz_aware_fixture - result = date_range(start="2011-06-01", end="2011-01-01", freq="-1MS", tz=tz) - expected = date_range(end="2011-06-01", start="2011-01-01", freq="1MS", tz=tz)[ - ::-1 - ] - tm.assert_index_equal(result, expected) - - def test_range_where_start_equal_end(self, inclusive_endpoints_fixture): - # GH 43394 - start = "2021-09-02" - end = "2021-09-02" - result = date_range( - start=start, end=end, freq="D", inclusive=inclusive_endpoints_fixture - ) - - both_range = date_range(start=start, end=end, freq="D", inclusive="both") - if inclusive_endpoints_fixture == "neither": - expected = both_range[1:-1] - elif inclusive_endpoints_fixture in ("left", "right", "both"): - expected = both_range[:] - - tm.assert_index_equal(result, expected) - - def test_freq_dateoffset_with_relateivedelta_nanos(self): - # GH 46877 - freq = DateOffset(hours=10, days=57, nanoseconds=3) - result = date_range(end="1970-01-01 00:00:00", periods=10, freq=freq, name="a") - expected = DatetimeIndex( - [ - "1968-08-02T05:59:59.999999973", - "1968-09-28T15:59:59.999999976", - "1968-11-25T01:59:59.999999979", - "1969-01-21T11:59:59.999999982", - "1969-03-19T21:59:59.999999985", - "1969-05-16T07:59:59.999999988", - "1969-07-12T17:59:59.999999991", - "1969-09-08T03:59:59.999999994", - "1969-11-04T13:59:59.999999997", - "1970-01-01T00:00:00.000000000", - ], - name="a", - ) - tm.assert_index_equal(result, expected) - - -class TestDateRangeTZ: - """Tests for date_range with timezones""" - - def test_hongkong_tz_convert(self): - # GH#1673 smoke test - dr = date_range("2012-01-01", "2012-01-10", freq="D", tz="Hongkong") - - # it works! - dr.hour - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_date_range_span_dst_transition(self, tzstr): - # GH#1778 - - # Standard -> Daylight Savings Time - dr = date_range("03/06/2012 00:00", periods=200, freq="W-FRI", tz="US/Eastern") - - assert (dr.hour == 0).all() - - dr = date_range("2012-11-02", periods=10, tz=tzstr) - result = dr.hour - expected = pd.Index([0] * 10, dtype="int32") - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_date_range_timezone_str_argument(self, tzstr): - tz = timezones.maybe_get_tz(tzstr) - result = date_range("1/1/2000", periods=10, tz=tzstr) - expected = date_range("1/1/2000", periods=10, tz=tz) - - tm.assert_index_equal(result, expected) - - def test_date_range_with_fixedoffset_noname(self): - from pandas.tests.indexes.datetimes.test_timezones import fixed_off_no_name - - off = fixed_off_no_name - start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off) - end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off) - rng = date_range(start=start, end=end) - assert off == rng.tz - - idx = pd.Index([start, end]) - assert off == idx.tz - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_date_range_with_tz(self, tzstr): - stamp = Timestamp("3/11/2012 05:00", tz=tzstr) - assert stamp.hour == 5 - - rng = date_range("3/11/2012 04:00", periods=10, freq="H", tz=tzstr) - - assert stamp == rng[1] - - -class TestGenRangeGeneration: - def test_generate(self): - rng1 = list(generate_range(START, END, periods=None, offset=BDay(), unit="ns")) - rng2 = list(generate_range(START, END, periods=None, offset="B", unit="ns")) - assert rng1 == rng2 - - def test_generate_cday(self): - rng1 = list(generate_range(START, END, periods=None, offset=CDay(), unit="ns")) - rng2 = list(generate_range(START, END, periods=None, offset="C", unit="ns")) - assert rng1 == rng2 - - def test_1(self): - rng = list( - generate_range( - start=datetime(2009, 3, 25), - end=None, - periods=2, - offset=BDay(), - unit="ns", - ) - ) - expected = [datetime(2009, 3, 25), datetime(2009, 3, 26)] - assert rng == expected - - def test_2(self): - rng = list( - generate_range( - start=datetime(2008, 1, 1), - end=datetime(2008, 1, 3), - periods=None, - offset=BDay(), - unit="ns", - ) - ) - expected = [datetime(2008, 1, 1), datetime(2008, 1, 2), datetime(2008, 1, 3)] - assert rng == expected - - def test_3(self): - rng = list( - generate_range( - start=datetime(2008, 1, 5), - end=datetime(2008, 1, 6), - periods=None, - offset=BDay(), - unit="ns", - ) - ) - expected = [] - assert rng == expected - - def test_precision_finer_than_offset(self): - # GH#9907 - result1 = date_range( - start="2015-04-15 00:00:03", end="2016-04-22 00:00:00", freq="Q" - ) - result2 = date_range( - start="2015-04-15 00:00:03", end="2015-06-22 00:00:04", freq="W" - ) - expected1_list = [ - "2015-06-30 00:00:03", - "2015-09-30 00:00:03", - "2015-12-31 00:00:03", - "2016-03-31 00:00:03", - ] - expected2_list = [ - "2015-04-19 00:00:03", - "2015-04-26 00:00:03", - "2015-05-03 00:00:03", - "2015-05-10 00:00:03", - "2015-05-17 00:00:03", - "2015-05-24 00:00:03", - "2015-05-31 00:00:03", - "2015-06-07 00:00:03", - "2015-06-14 00:00:03", - "2015-06-21 00:00:03", - ] - expected1 = DatetimeIndex( - expected1_list, dtype="datetime64[ns]", freq="Q-DEC", tz=None - ) - expected2 = DatetimeIndex( - expected2_list, dtype="datetime64[ns]", freq="W-SUN", tz=None - ) - tm.assert_index_equal(result1, expected1) - tm.assert_index_equal(result2, expected2) - - dt1, dt2 = "2017-01-01", "2017-01-01" - tz1, tz2 = "US/Eastern", "Europe/London" - - @pytest.mark.parametrize( - "start,end", - [ - (Timestamp(dt1, tz=tz1), Timestamp(dt2)), - (Timestamp(dt1), Timestamp(dt2, tz=tz2)), - (Timestamp(dt1, tz=tz1), Timestamp(dt2, tz=tz2)), - (Timestamp(dt1, tz=tz2), Timestamp(dt2, tz=tz1)), - ], - ) - def test_mismatching_tz_raises_err(self, start, end): - # issue 18488 - msg = "Start and end cannot both be tz-aware with different timezones" - with pytest.raises(TypeError, match=msg): - date_range(start, end) - with pytest.raises(TypeError, match=msg): - date_range(start, end, freq=BDay()) - - -class TestBusinessDateRange: - def test_constructor(self): - bdate_range(START, END, freq=BDay()) - bdate_range(START, periods=20, freq=BDay()) - bdate_range(end=START, periods=20, freq=BDay()) - - msg = "periods must be a number, got B" - with pytest.raises(TypeError, match=msg): - date_range("2011-1-1", "2012-1-1", "B") - - with pytest.raises(TypeError, match=msg): - bdate_range("2011-1-1", "2012-1-1", "B") - - msg = "freq must be specified for bdate_range; use date_range instead" - with pytest.raises(TypeError, match=msg): - bdate_range(START, END, periods=10, freq=None) - - def test_misc(self): - end = datetime(2009, 5, 13) - dr = bdate_range(end=end, periods=20) - firstDate = end - 19 * BDay() - - assert len(dr) == 20 - assert dr[0] == firstDate - assert dr[-1] == end - - def test_date_parse_failure(self): - badly_formed_date = "2007/100/1" - - msg = "Unknown datetime string format, unable to parse: 2007/100/1" - with pytest.raises(ValueError, match=msg): - Timestamp(badly_formed_date) - - with pytest.raises(ValueError, match=msg): - bdate_range(start=badly_formed_date, periods=10) - - with pytest.raises(ValueError, match=msg): - bdate_range(end=badly_formed_date, periods=10) - - with pytest.raises(ValueError, match=msg): - bdate_range(badly_formed_date, badly_formed_date) - - def test_daterange_bug_456(self): - # GH #456 - rng1 = bdate_range("12/5/2011", "12/5/2011") - rng2 = bdate_range("12/2/2011", "12/5/2011") - assert rng2._data.freq == BDay() - - result = rng1.union(rng2) - assert isinstance(result, DatetimeIndex) - - @pytest.mark.parametrize("inclusive", ["left", "right", "neither", "both"]) - def test_bdays_and_open_boundaries(self, inclusive): - # GH 6673 - start = "2018-07-21" # Saturday - end = "2018-07-29" # Sunday - result = date_range(start, end, freq="B", inclusive=inclusive) - - bday_start = "2018-07-23" # Monday - bday_end = "2018-07-27" # Friday - expected = date_range(bday_start, bday_end, freq="D") - tm.assert_index_equal(result, expected) - # Note: we do _not_ expect the freqs to match here - - def test_bday_near_overflow(self): - # GH#24252 avoid doing unnecessary addition that _would_ overflow - start = Timestamp.max.floor("D").to_pydatetime() - rng = date_range(start, end=None, periods=1, freq="B") - expected = DatetimeIndex([start], freq="B") - tm.assert_index_equal(rng, expected) - - def test_bday_overflow_error(self): - # GH#24252 check that we get OutOfBoundsDatetime and not OverflowError - msg = "Out of bounds nanosecond timestamp" - start = Timestamp.max.floor("D").to_pydatetime() - with pytest.raises(OutOfBoundsDatetime, match=msg): - date_range(start, periods=2, freq="B") - - -class TestCustomDateRange: - def test_constructor(self): - bdate_range(START, END, freq=CDay()) - bdate_range(START, periods=20, freq=CDay()) - bdate_range(end=START, periods=20, freq=CDay()) - - msg = "periods must be a number, got C" - with pytest.raises(TypeError, match=msg): - date_range("2011-1-1", "2012-1-1", "C") - - with pytest.raises(TypeError, match=msg): - bdate_range("2011-1-1", "2012-1-1", "C") - - def test_misc(self): - end = datetime(2009, 5, 13) - dr = bdate_range(end=end, periods=20, freq="C") - firstDate = end - 19 * CDay() - - assert len(dr) == 20 - assert dr[0] == firstDate - assert dr[-1] == end - - def test_daterange_bug_456(self): - # GH #456 - rng1 = bdate_range("12/5/2011", "12/5/2011", freq="C") - rng2 = bdate_range("12/2/2011", "12/5/2011", freq="C") - assert rng2._data.freq == CDay() - - result = rng1.union(rng2) - assert isinstance(result, DatetimeIndex) - - def test_cdaterange(self): - result = bdate_range("2013-05-01", periods=3, freq="C") - expected = DatetimeIndex(["2013-05-01", "2013-05-02", "2013-05-03"], freq="C") - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - def test_cdaterange_weekmask(self): - result = bdate_range( - "2013-05-01", periods=3, freq="C", weekmask="Sun Mon Tue Wed Thu" - ) - expected = DatetimeIndex( - ["2013-05-01", "2013-05-02", "2013-05-05"], freq=result.freq - ) - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - # raise with non-custom freq - msg = ( - "a custom frequency string is required when holidays or " - "weekmask are passed, got frequency B" - ) - with pytest.raises(ValueError, match=msg): - bdate_range("2013-05-01", periods=3, weekmask="Sun Mon Tue Wed Thu") - - def test_cdaterange_holidays(self): - result = bdate_range("2013-05-01", periods=3, freq="C", holidays=["2013-05-01"]) - expected = DatetimeIndex( - ["2013-05-02", "2013-05-03", "2013-05-06"], freq=result.freq - ) - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - # raise with non-custom freq - msg = ( - "a custom frequency string is required when holidays or " - "weekmask are passed, got frequency B" - ) - with pytest.raises(ValueError, match=msg): - bdate_range("2013-05-01", periods=3, holidays=["2013-05-01"]) - - def test_cdaterange_weekmask_and_holidays(self): - result = bdate_range( - "2013-05-01", - periods=3, - freq="C", - weekmask="Sun Mon Tue Wed Thu", - holidays=["2013-05-01"], - ) - expected = DatetimeIndex( - ["2013-05-02", "2013-05-05", "2013-05-06"], freq=result.freq - ) - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - # raise with non-custom freq - msg = ( - "a custom frequency string is required when holidays or " - "weekmask are passed, got frequency B" - ) - with pytest.raises(ValueError, match=msg): - bdate_range( - "2013-05-01", - periods=3, - weekmask="Sun Mon Tue Wed Thu", - holidays=["2013-05-01"], - ) - - @pytest.mark.parametrize( - "freq", [freq for freq in prefix_mapping if freq.startswith("C")] - ) - def test_all_custom_freq(self, freq): - # should not raise - bdate_range( - START, END, freq=freq, weekmask="Mon Wed Fri", holidays=["2009-03-14"] - ) - - bad_freq = freq + "FOO" - msg = f"invalid custom frequency string: {bad_freq}" - with pytest.raises(ValueError, match=msg): - bdate_range(START, END, freq=bad_freq) - - @pytest.mark.parametrize( - "start_end", - [ - ("2018-01-01T00:00:01.000Z", "2018-01-03T00:00:01.000Z"), - ("2018-01-01T00:00:00.010Z", "2018-01-03T00:00:00.010Z"), - ("2001-01-01T00:00:00.010Z", "2001-01-03T00:00:00.010Z"), - ], - ) - def test_range_with_millisecond_resolution(self, start_end): - # https://github.com/pandas-dev/pandas/issues/24110 - start, end = start_end - result = date_range(start=start, end=end, periods=2, inclusive="left") - expected = DatetimeIndex([start]) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize( - "start,period,expected", - [ - ("2022-07-23 00:00:00+02:00", 1, ["2022-07-25 00:00:00+02:00"]), - ("2022-07-22 00:00:00+02:00", 1, ["2022-07-22 00:00:00+02:00"]), - ( - "2022-07-22 00:00:00+02:00", - 2, - ["2022-07-22 00:00:00+02:00", "2022-07-25 00:00:00+02:00"], - ), - ], - ) - def test_range_with_timezone_and_custombusinessday(self, start, period, expected): - # GH49441 - result = date_range(start=start, periods=period, freq="C") - expected = DatetimeIndex(expected) - tm.assert_index_equal(result, expected) - - -def test_date_range_with_custom_holidays(): - # GH 30593 - freq = offsets.CustomBusinessHour(start="15:00", holidays=["2020-11-26"]) - result = date_range(start="2020-11-25 15:00", periods=4, freq=freq) - expected = DatetimeIndex( - [ - "2020-11-25 15:00:00", - "2020-11-25 16:00:00", - "2020-11-27 15:00:00", - "2020-11-27 16:00:00", - ], - freq=freq, - ) - tm.assert_index_equal(result, expected) - - -class TestDateRangeNonNano: - def test_date_range_reso_validation(self): - msg = "'unit' must be one of 's', 'ms', 'us', 'ns'" - with pytest.raises(ValueError, match=msg): - date_range("2016-01-01", "2016-03-04", periods=3, unit="h") - - def test_date_range_freq_higher_than_reso(self): - # freq being higher-resolution than reso is a problem - msg = "Use a lower freq or a higher unit instead" - with pytest.raises(ValueError, match=msg): - # # TODO give a more useful or informative message? - date_range("2016-01-01", "2016-01-02", freq="ns", unit="ms") - - def test_date_range_freq_matches_reso(self): - # GH#49106 matching reso is OK - dti = date_range("2016-01-01", "2016-01-01 00:00:01", freq="ms", unit="ms") - rng = np.arange(1_451_606_400_000, 1_451_606_401_001, dtype=np.int64) - expected = DatetimeIndex(rng.view("M8[ms]"), freq="ms") - tm.assert_index_equal(dti, expected) - - dti = date_range("2016-01-01", "2016-01-01 00:00:01", freq="us", unit="us") - rng = np.arange(1_451_606_400_000_000, 1_451_606_401_000_001, dtype=np.int64) - expected = DatetimeIndex(rng.view("M8[us]"), freq="us") - tm.assert_index_equal(dti, expected) - - dti = date_range("2016-01-01", "2016-01-01 00:00:00.001", freq="ns", unit="ns") - rng = np.arange( - 1_451_606_400_000_000_000, 1_451_606_400_001_000_001, dtype=np.int64 - ) - expected = DatetimeIndex(rng.view("M8[ns]"), freq="ns") - tm.assert_index_equal(dti, expected) - - def test_date_range_freq_lower_than_endpoints(self): - start = Timestamp("2022-10-19 11:50:44.719781") - end = Timestamp("2022-10-19 11:50:47.066458") - - # start and end cannot be cast to "s" unit without lossy rounding, - # so we do not allow this in date_range - with pytest.raises(ValueError, match="Cannot losslessly convert units"): - date_range(start, end, periods=3, unit="s") - - # but we can losslessly cast to "us" - dti = date_range(start, end, periods=2, unit="us") - rng = np.array( - [start.as_unit("us")._value, end.as_unit("us")._value], dtype=np.int64 - ) - expected = DatetimeIndex(rng.view("M8[us]")) - tm.assert_index_equal(dti, expected) - - def test_date_range_non_nano(self): - start = np.datetime64("1066-10-14") # Battle of Hastings - end = np.datetime64("2305-07-13") # Jean-Luc Picard's birthday - - dti = date_range(start, end, freq="D", unit="s") - assert dti.freq == "D" - assert dti.dtype == "M8[s]" - - exp = np.arange( - start.astype("M8[s]").view("i8"), - (end + 1).astype("M8[s]").view("i8"), - 24 * 3600, - ).view("M8[s]") - - tm.assert_numpy_array_equal(dti.to_numpy(), exp) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_timezones.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_timezones.py deleted file mode 100644 index 09b06ecd5630d9d7e879d922c4566e7f692df1e1..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_timezones.py +++ /dev/null @@ -1,1214 +0,0 @@ -""" -Tests for DatetimeIndex timezone-related methods -""" -from datetime import ( - date, - datetime, - time, - timedelta, - timezone, - tzinfo, -) - -import dateutil -from dateutil.tz import ( - gettz, - tzlocal, -) -import numpy as np -import pytest -import pytz - -try: - from zoneinfo import ZoneInfo -except ImportError: - # Cannot assign to a type [misc] - ZoneInfo = None # type: ignore[misc, assignment] - -from pandas._libs.tslibs import ( - conversion, - timezones, -) -import pandas.util._test_decorators as td - -import pandas as pd -from pandas import ( - DatetimeIndex, - Index, - Timestamp, - bdate_range, - date_range, - isna, - to_datetime, -) -import pandas._testing as tm - - -class FixedOffset(tzinfo): - """Fixed offset in minutes east from UTC.""" - - def __init__(self, offset, name) -> None: - self.__offset = timedelta(minutes=offset) - self.__name = name - - def utcoffset(self, dt): - return self.__offset - - def tzname(self, dt): - return self.__name - - def dst(self, dt): - return timedelta(0) - - -fixed_off = FixedOffset(-420, "-07:00") -fixed_off_no_name = FixedOffset(-330, None) - - -class TestDatetimeIndexTimezones: - # ------------------------------------------------------------- - # DatetimeIndex.tz_convert - def test_tz_convert_nat(self): - # GH#5546 - dates = [pd.NaT] - idx = DatetimeIndex(dates) - idx = idx.tz_localize("US/Pacific") - tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific")) - idx = idx.tz_convert("US/Eastern") - tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern")) - idx = idx.tz_convert("UTC") - tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC")) - - dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT] - idx = DatetimeIndex(dates) - idx = idx.tz_localize("US/Pacific") - tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific")) - idx = idx.tz_convert("US/Eastern") - expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT] - tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) - - idx = idx + pd.offsets.Hour(5) - expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT] - tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) - idx = idx.tz_convert("US/Pacific") - expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT] - tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific")) - - idx = idx + np.timedelta64(3, "h") - expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT] - tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific")) - - idx = idx.tz_convert("US/Eastern") - expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT] - tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) - - @pytest.mark.parametrize("prefix", ["", "dateutil/"]) - def test_dti_tz_convert_compat_timestamp(self, prefix): - strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] - idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern") - - conv = idx[0].tz_convert(prefix + "US/Pacific") - expected = idx.tz_convert(prefix + "US/Pacific")[0] - - assert conv == expected - - def test_dti_tz_convert_hour_overflow_dst(self): - # Regression test for: - # https://github.com/pandas-dev/pandas/issues/13306 - - # sorted case US/Eastern -> UTC - ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"] - tt = DatetimeIndex(ts).tz_localize("US/Eastern") - ut = tt.tz_convert("UTC") - expected = Index([13, 14, 13], dtype=np.int32) - tm.assert_index_equal(ut.hour, expected) - - # sorted case UTC -> US/Eastern - ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"] - tt = DatetimeIndex(ts).tz_localize("UTC") - ut = tt.tz_convert("US/Eastern") - expected = Index([9, 9, 9], dtype=np.int32) - tm.assert_index_equal(ut.hour, expected) - - # unsorted case US/Eastern -> UTC - ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"] - tt = DatetimeIndex(ts).tz_localize("US/Eastern") - ut = tt.tz_convert("UTC") - expected = Index([13, 14, 13], dtype=np.int32) - tm.assert_index_equal(ut.hour, expected) - - # unsorted case UTC -> US/Eastern - ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"] - tt = DatetimeIndex(ts).tz_localize("UTC") - ut = tt.tz_convert("US/Eastern") - expected = Index([9, 9, 9], dtype=np.int32) - tm.assert_index_equal(ut.hour, expected) - - @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) - def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz): - # Regression test for GH#13306 - - # sorted case US/Eastern -> UTC - ts = [ - Timestamp("2008-05-12 09:50:00", tz=tz), - Timestamp("2008-12-12 09:50:35", tz=tz), - Timestamp("2009-05-12 09:50:32", tz=tz), - ] - tt = DatetimeIndex(ts) - ut = tt.tz_convert("UTC") - expected = Index([13, 14, 13], dtype=np.int32) - tm.assert_index_equal(ut.hour, expected) - - # sorted case UTC -> US/Eastern - ts = [ - Timestamp("2008-05-12 13:50:00", tz="UTC"), - Timestamp("2008-12-12 14:50:35", tz="UTC"), - Timestamp("2009-05-12 13:50:32", tz="UTC"), - ] - tt = DatetimeIndex(ts) - ut = tt.tz_convert("US/Eastern") - expected = Index([9, 9, 9], dtype=np.int32) - tm.assert_index_equal(ut.hour, expected) - - # unsorted case US/Eastern -> UTC - ts = [ - Timestamp("2008-05-12 09:50:00", tz=tz), - Timestamp("2008-12-12 09:50:35", tz=tz), - Timestamp("2008-05-12 09:50:32", tz=tz), - ] - tt = DatetimeIndex(ts) - ut = tt.tz_convert("UTC") - expected = Index([13, 14, 13], dtype=np.int32) - tm.assert_index_equal(ut.hour, expected) - - # unsorted case UTC -> US/Eastern - ts = [ - Timestamp("2008-05-12 13:50:00", tz="UTC"), - Timestamp("2008-12-12 14:50:35", tz="UTC"), - Timestamp("2008-05-12 13:50:32", tz="UTC"), - ] - tt = DatetimeIndex(ts) - ut = tt.tz_convert("US/Eastern") - expected = Index([9, 9, 9], dtype=np.int32) - tm.assert_index_equal(ut.hour, expected) - - @pytest.mark.parametrize("freq, n", [("H", 1), ("T", 60), ("S", 3600)]) - def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n): - # Regression test for tslib.tz_convert(vals, tz1, tz2). - # See https://github.com/pandas-dev/pandas/issues/4496 for details. - idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq) - idx = idx.tz_localize("UTC") - idx = idx.tz_convert("Europe/Moscow") - - expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1])) - tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) - - def test_dti_tz_convert_dst(self): - for freq, n in [("H", 1), ("T", 60), ("S", 3600)]: - # Start DST - idx = date_range( - "2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC" - ) - idx = idx.tz_convert("US/Eastern") - expected = np.repeat( - np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]), - np.array([n, n, n, n, n, n, n, n, n, n, 1]), - ) - tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) - - idx = date_range( - "2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern" - ) - idx = idx.tz_convert("UTC") - expected = np.repeat( - np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), - np.array([n, n, n, n, n, n, n, n, n, n, 1]), - ) - tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) - - # End DST - idx = date_range( - "2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC" - ) - idx = idx.tz_convert("US/Eastern") - expected = np.repeat( - np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]), - np.array([n, n, n, n, n, n, n, n, n, n, 1]), - ) - tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) - - idx = date_range( - "2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern" - ) - idx = idx.tz_convert("UTC") - expected = np.repeat( - np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), - np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]), - ) - tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) - - # daily - # Start DST - idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC") - idx = idx.tz_convert("US/Eastern") - tm.assert_index_equal(idx.hour, Index([19, 19], dtype=np.int32)) - - idx = date_range( - "2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern" - ) - idx = idx.tz_convert("UTC") - tm.assert_index_equal(idx.hour, Index([5, 5], dtype=np.int32)) - - # End DST - idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC") - idx = idx.tz_convert("US/Eastern") - tm.assert_index_equal(idx.hour, Index([20, 20], dtype=np.int32)) - - idx = date_range( - "2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern" - ) - idx = idx.tz_convert("UTC") - tm.assert_index_equal(idx.hour, Index([4, 4], dtype=np.int32)) - - def test_tz_convert_roundtrip(self, tz_aware_fixture): - tz = tz_aware_fixture - idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="M", tz="UTC") - exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="M") - - idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC") - exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D") - - idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="H", tz="UTC") - exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="H") - - idx4 = date_range(start="2014-08-01", end="2014-10-31", freq="T", tz="UTC") - exp4 = date_range(start="2014-08-01", end="2014-10-31", freq="T") - - for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), (idx4, exp4)]: - converted = idx.tz_convert(tz) - reset = converted.tz_convert(None) - tm.assert_index_equal(reset, expected) - assert reset.tzinfo is None - expected = converted.tz_convert("UTC").tz_localize(None) - expected = expected._with_freq("infer") - tm.assert_index_equal(reset, expected) - - def test_dti_tz_convert_tzlocal(self): - # GH#13583 - # tz_convert doesn't affect to internal - dti = date_range(start="2001-01-01", end="2001-03-01", tz="UTC") - dti2 = dti.tz_convert(dateutil.tz.tzlocal()) - tm.assert_numpy_array_equal(dti2.asi8, dti.asi8) - - dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal()) - dti2 = dti.tz_convert(None) - tm.assert_numpy_array_equal(dti2.asi8, dti.asi8) - - @pytest.mark.parametrize( - "tz", - [ - "US/Eastern", - "dateutil/US/Eastern", - pytz.timezone("US/Eastern"), - gettz("US/Eastern"), - ], - ) - def test_dti_tz_convert_utc_to_local_no_modify(self, tz): - rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc") - rng_eastern = rng.tz_convert(tz) - - # Values are unmodified - tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8) - - assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz)) - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_tz_convert_unsorted(self, tzstr): - dr = date_range("2012-03-09", freq="H", periods=100, tz="utc") - dr = dr.tz_convert(tzstr) - - result = dr[::-1].hour - exp = dr.hour[::-1] - tm.assert_almost_equal(result, exp) - - # ------------------------------------------------------------- - # DatetimeIndex.tz_localize - - def test_tz_localize_utc_copies(self, utc_fixture): - # GH#46460 - times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"] - index = DatetimeIndex(times) - - res = index.tz_localize(utc_fixture) - assert not tm.shares_memory(res, index) - - res2 = index._data.tz_localize(utc_fixture) - assert not tm.shares_memory(index._data, res2) - - def test_dti_tz_localize_nonexistent_raise_coerce(self): - # GH#13057 - times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"] - index = DatetimeIndex(times) - tz = "US/Eastern" - with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)): - index.tz_localize(tz=tz) - - with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)): - index.tz_localize(tz=tz, nonexistent="raise") - - result = index.tz_localize(tz=tz, nonexistent="NaT") - test_times = ["2015-03-08 01:00-05:00", "NaT", "2015-03-08 03:00-04:00"] - dti = to_datetime(test_times, utc=True) - expected = dti.tz_convert("US/Eastern") - tm.assert_index_equal(result, expected) - - easts = [pytz.timezone("US/Eastern"), gettz("US/Eastern")] - if ZoneInfo is not None: - try: - tz = ZoneInfo("US/Eastern") - except KeyError: - # no tzdata - pass - else: - easts.append(tz) - - @pytest.mark.parametrize("tz", easts) - def test_dti_tz_localize_ambiguous_infer(self, tz): - # November 6, 2011, fall back, repeat 2 AM hour - # With no repeated hours, we cannot infer the transition - dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour()) - with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): - dr.tz_localize(tz) - - # With repeated hours, we can infer the transition - dr = date_range( - datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz - ) - times = [ - "11/06/2011 00:00", - "11/06/2011 01:00", - "11/06/2011 01:00", - "11/06/2011 02:00", - "11/06/2011 03:00", - ] - di = DatetimeIndex(times) - localized = di.tz_localize(tz, ambiguous="infer") - expected = dr._with_freq(None) - tm.assert_index_equal(expected, localized) - tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous="infer")) - - # When there is no dst transition, nothing special happens - dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour()) - localized = dr.tz_localize(tz) - localized_infer = dr.tz_localize(tz, ambiguous="infer") - tm.assert_index_equal(localized, localized_infer) - - @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) - def test_dti_tz_localize_ambiguous_times(self, tz): - # March 13, 2011, spring forward, skip from 2 AM to 3 AM - dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, freq=pd.offsets.Hour()) - with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:30:00"): - dr.tz_localize(tz) - - # after dst transition, it works - dr = date_range( - datetime(2011, 3, 13, 3, 30), periods=3, freq=pd.offsets.Hour(), tz=tz - ) - - # November 6, 2011, fall back, repeat 2 AM hour - dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3, freq=pd.offsets.Hour()) - with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): - dr.tz_localize(tz) - - # UTC is OK - dr = date_range( - datetime(2011, 3, 13), periods=48, freq=pd.offsets.Minute(30), tz=pytz.utc - ) - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_dti_tz_localize_pass_dates_to_utc(self, tzstr): - strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] - - idx = DatetimeIndex(strdates) - conv = idx.tz_localize(tzstr) - - fromdates = DatetimeIndex(strdates, tz=tzstr) - - assert conv.tz == fromdates.tz - tm.assert_numpy_array_equal(conv.values, fromdates.values) - - @pytest.mark.parametrize("prefix", ["", "dateutil/"]) - def test_dti_tz_localize(self, prefix): - tzstr = prefix + "US/Eastern" - dti = date_range(start="1/1/2005", end="1/1/2005 0:00:30.256", freq="L") - dti2 = dti.tz_localize(tzstr) - - dti_utc = date_range( - start="1/1/2005 05:00", end="1/1/2005 5:00:30.256", freq="L", tz="utc" - ) - - tm.assert_numpy_array_equal(dti2.values, dti_utc.values) - - dti3 = dti2.tz_convert(prefix + "US/Pacific") - tm.assert_numpy_array_equal(dti3.values, dti_utc.values) - - dti = date_range(start="11/6/2011 1:59", end="11/6/2011 2:00", freq="L") - with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): - dti.tz_localize(tzstr) - - dti = date_range(start="3/13/2011 1:59", end="3/13/2011 2:00", freq="L") - with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:00:00"): - dti.tz_localize(tzstr) - - @pytest.mark.parametrize( - "tz", - [ - "US/Eastern", - "dateutil/US/Eastern", - pytz.timezone("US/Eastern"), - gettz("US/Eastern"), - ], - ) - def test_dti_tz_localize_utc_conversion(self, tz): - # Localizing to time zone should: - # 1) check for DST ambiguities - # 2) convert to UTC - - rng = date_range("3/10/2012", "3/11/2012", freq="30T") - - converted = rng.tz_localize(tz) - expected_naive = rng + pd.offsets.Hour(5) - tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8) - - # DST ambiguity, this should fail - rng = date_range("3/11/2012", "3/12/2012", freq="30T") - # Is this really how it should fail?? - with pytest.raises(pytz.NonExistentTimeError, match="2012-03-11 02:00:00"): - rng.tz_localize(tz) - - def test_dti_tz_localize_roundtrip(self, tz_aware_fixture): - # note: this tz tests that a tz-naive index can be localized - # and de-localized successfully, when there are no DST transitions - # in the range. - idx = date_range(start="2014-06-01", end="2014-08-30", freq="15T") - tz = tz_aware_fixture - localized = idx.tz_localize(tz) - # can't localize a tz-aware object - with pytest.raises( - TypeError, match="Already tz-aware, use tz_convert to convert" - ): - localized.tz_localize(tz) - reset = localized.tz_localize(None) - assert reset.tzinfo is None - expected = idx._with_freq(None) - tm.assert_index_equal(reset, expected) - - def test_dti_tz_localize_naive(self): - rng = date_range("1/1/2011", periods=100, freq="H") - - conv = rng.tz_localize("US/Pacific") - exp = date_range("1/1/2011", periods=100, freq="H", tz="US/Pacific") - - tm.assert_index_equal(conv, exp._with_freq(None)) - - def test_dti_tz_localize_tzlocal(self): - # GH#13583 - offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1)) - offset = int(offset.total_seconds() * 1000000000) - - dti = date_range(start="2001-01-01", end="2001-03-01") - dti2 = dti.tz_localize(dateutil.tz.tzlocal()) - tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8) - - dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal()) - dti2 = dti.tz_localize(None) - tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8) - - @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) - def test_dti_tz_localize_ambiguous_nat(self, tz): - times = [ - "11/06/2011 00:00", - "11/06/2011 01:00", - "11/06/2011 01:00", - "11/06/2011 02:00", - "11/06/2011 03:00", - ] - di = DatetimeIndex(times) - localized = di.tz_localize(tz, ambiguous="NaT") - - times = [ - "11/06/2011 00:00", - np.nan, - np.nan, - "11/06/2011 02:00", - "11/06/2011 03:00", - ] - di_test = DatetimeIndex(times, tz="US/Eastern") - - # left dtype is datetime64[ns, US/Eastern] - # right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')] - tm.assert_numpy_array_equal(di_test.values, localized.values) - - @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) - def test_dti_tz_localize_ambiguous_flags(self, tz): - # November 6, 2011, fall back, repeat 2 AM hour - - # Pass in flags to determine right dst transition - dr = date_range( - datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz - ) - times = [ - "11/06/2011 00:00", - "11/06/2011 01:00", - "11/06/2011 01:00", - "11/06/2011 02:00", - "11/06/2011 03:00", - ] - - # Test tz_localize - di = DatetimeIndex(times) - is_dst = [1, 1, 0, 0, 0] - localized = di.tz_localize(tz, ambiguous=is_dst) - expected = dr._with_freq(None) - tm.assert_index_equal(expected, localized) - tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous=is_dst)) - - localized = di.tz_localize(tz, ambiguous=np.array(is_dst)) - tm.assert_index_equal(dr, localized) - - localized = di.tz_localize(tz, ambiguous=np.array(is_dst).astype("bool")) - tm.assert_index_equal(dr, localized) - - # Test constructor - localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst) - tm.assert_index_equal(dr, localized) - - # Test duplicate times where inferring the dst fails - times += times - di = DatetimeIndex(times) - - # When the sizes are incompatible, make sure error is raised - msg = "Length of ambiguous bool-array must be the same size as vals" - with pytest.raises(Exception, match=msg): - di.tz_localize(tz, ambiguous=is_dst) - - # When sizes are compatible and there are repeats ('infer' won't work) - is_dst = np.hstack((is_dst, is_dst)) - localized = di.tz_localize(tz, ambiguous=is_dst) - dr = dr.append(dr) - tm.assert_index_equal(dr, localized) - - # When there is no dst transition, nothing special happens - dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour()) - is_dst = np.array([1] * 10) - localized = dr.tz_localize(tz) - localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst) - tm.assert_index_equal(localized, localized_is_dst) - - # TODO: belongs outside tz_localize tests? - @pytest.mark.parametrize("tz", ["Europe/London", "dateutil/Europe/London"]) - def test_dti_construction_ambiguous_endpoint(self, tz): - # construction with an ambiguous end-point - # GH#11626 - - with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): - date_range( - "2013-10-26 23:00", "2013-10-27 01:00", tz="Europe/London", freq="H" - ) - - times = date_range( - "2013-10-26 23:00", "2013-10-27 01:00", freq="H", tz=tz, ambiguous="infer" - ) - assert times[0] == Timestamp("2013-10-26 23:00", tz=tz) - assert times[-1] == Timestamp("2013-10-27 01:00:00+0000", tz=tz) - - @pytest.mark.parametrize( - "tz, option, expected", - [ - ["US/Pacific", "shift_forward", "2019-03-10 03:00"], - ["dateutil/US/Pacific", "shift_forward", "2019-03-10 03:00"], - ["US/Pacific", "shift_backward", "2019-03-10 01:00"], - ["dateutil/US/Pacific", "shift_backward", "2019-03-10 01:00"], - ["US/Pacific", timedelta(hours=1), "2019-03-10 03:00"], - ], - ) - def test_dti_construction_nonexistent_endpoint(self, tz, option, expected): - # construction with an nonexistent end-point - - with pytest.raises(pytz.NonExistentTimeError, match="2019-03-10 02:00:00"): - date_range( - "2019-03-10 00:00", "2019-03-10 02:00", tz="US/Pacific", freq="H" - ) - - times = date_range( - "2019-03-10 00:00", "2019-03-10 02:00", freq="H", tz=tz, nonexistent=option - ) - assert times[-1] == Timestamp(expected, tz=tz) - - def test_dti_tz_localize_bdate_range(self): - dr = bdate_range("1/1/2009", "1/1/2010") - dr_utc = bdate_range("1/1/2009", "1/1/2010", tz=pytz.utc) - localized = dr.tz_localize(pytz.utc) - tm.assert_index_equal(dr_utc, localized) - - @pytest.mark.parametrize( - "start_ts, tz, end_ts, shift", - [ - ["2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:00:00", "forward"], - [ - "2015-03-29 02:20:00", - "Europe/Warsaw", - "2015-03-29 01:59:59.999999999", - "backward", - ], - [ - "2015-03-29 02:20:00", - "Europe/Warsaw", - "2015-03-29 03:20:00", - timedelta(hours=1), - ], - [ - "2015-03-29 02:20:00", - "Europe/Warsaw", - "2015-03-29 01:20:00", - timedelta(hours=-1), - ], - ["2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:00:00", "forward"], - [ - "2018-03-11 02:33:00", - "US/Pacific", - "2018-03-11 01:59:59.999999999", - "backward", - ], - [ - "2018-03-11 02:33:00", - "US/Pacific", - "2018-03-11 03:33:00", - timedelta(hours=1), - ], - [ - "2018-03-11 02:33:00", - "US/Pacific", - "2018-03-11 01:33:00", - timedelta(hours=-1), - ], - ], - ) - @pytest.mark.parametrize("tz_type", ["", "dateutil/"]) - def test_dti_tz_localize_nonexistent_shift( - self, start_ts, tz, end_ts, shift, tz_type - ): - # GH 8917 - tz = tz_type + tz - if isinstance(shift, str): - shift = "shift_" + shift - dti = DatetimeIndex([Timestamp(start_ts)]) - result = dti.tz_localize(tz, nonexistent=shift) - expected = DatetimeIndex([Timestamp(end_ts)]).tz_localize(tz) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize("offset", [-1, 1]) - def test_dti_tz_localize_nonexistent_shift_invalid(self, offset, warsaw): - # GH 8917 - tz = warsaw - dti = DatetimeIndex([Timestamp("2015-03-29 02:20:00")]) - msg = "The provided timedelta will relocalize on a nonexistent time" - with pytest.raises(ValueError, match=msg): - dti.tz_localize(tz, nonexistent=timedelta(seconds=offset)) - - # ------------------------------------------------------------- - # DatetimeIndex.normalize - - def test_normalize_tz(self): - rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="US/Eastern") - - result = rng.normalize() # does not preserve freq - expected = date_range("1/1/2000", periods=10, freq="D", tz="US/Eastern") - tm.assert_index_equal(result, expected._with_freq(None)) - - assert result.is_normalized - assert not rng.is_normalized - - rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="UTC") - - result = rng.normalize() - expected = date_range("1/1/2000", periods=10, freq="D", tz="UTC") - tm.assert_index_equal(result, expected) - - assert result.is_normalized - assert not rng.is_normalized - - rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal()) - result = rng.normalize() # does not preserve freq - expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal()) - tm.assert_index_equal(result, expected._with_freq(None)) - - assert result.is_normalized - assert not rng.is_normalized - - @td.skip_if_windows - @pytest.mark.parametrize( - "timezone", - [ - "US/Pacific", - "US/Eastern", - "UTC", - "Asia/Kolkata", - "Asia/Shanghai", - "Australia/Canberra", - ], - ) - def test_normalize_tz_local(self, timezone): - # GH#13459 - with tm.set_timezone(timezone): - rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal()) - - result = rng.normalize() - expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal()) - expected = expected._with_freq(None) - tm.assert_index_equal(result, expected) - - assert result.is_normalized - assert not rng.is_normalized - - # ------------------------------------------------------------ - # DatetimeIndex.__new__ - - @pytest.mark.parametrize("prefix", ["", "dateutil/"]) - def test_dti_constructor_static_tzinfo(self, prefix): - # it works! - index = DatetimeIndex([datetime(2012, 1, 1)], tz=prefix + "EST") - index.hour - index[0] - - def test_dti_constructor_with_fixed_tz(self): - off = FixedOffset(420, "+07:00") - start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off) - end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off) - rng = date_range(start=start, end=end) - assert off == rng.tz - - rng2 = date_range(start, periods=len(rng), tz=off) - tm.assert_index_equal(rng, rng2) - - rng3 = date_range("3/11/2012 05:00:00+07:00", "6/11/2012 05:00:00+07:00") - assert (rng.values == rng3.values).all() - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_dti_convert_datetime_list(self, tzstr): - dr = date_range("2012-06-02", periods=10, tz=tzstr, name="foo") - dr2 = DatetimeIndex(list(dr), name="foo", freq="D") - tm.assert_index_equal(dr, dr2) - - def test_dti_construction_univalent(self): - rng = date_range("03/12/2012 00:00", periods=10, freq="W-FRI", tz="US/Eastern") - rng2 = DatetimeIndex(data=rng, tz="US/Eastern") - tm.assert_index_equal(rng, rng2) - - @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) - def test_dti_from_tzaware_datetime(self, tz): - d = [datetime(2012, 8, 19, tzinfo=tz)] - - index = DatetimeIndex(d) - assert timezones.tz_compare(index.tz, tz) - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_dti_tz_constructors(self, tzstr): - """Test different DatetimeIndex constructions with timezone - Follow-up of GH#4229 - """ - arr = ["11/10/2005 08:00:00", "11/10/2005 09:00:00"] - - idx1 = to_datetime(arr).tz_localize(tzstr) - idx2 = date_range(start="2005-11-10 08:00:00", freq="H", periods=2, tz=tzstr) - idx2 = idx2._with_freq(None) # the others all have freq=None - idx3 = DatetimeIndex(arr, tz=tzstr) - idx4 = DatetimeIndex(np.array(arr), tz=tzstr) - - for other in [idx2, idx3, idx4]: - tm.assert_index_equal(idx1, other) - - # ------------------------------------------------------------- - # Unsorted - - @pytest.mark.parametrize( - "dtype", - [None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"], - ) - def test_date_accessor(self, dtype): - # Regression test for GH#21230 - expected = np.array([date(2018, 6, 4), pd.NaT]) - - index = DatetimeIndex(["2018-06-04 10:00:00", pd.NaT], dtype=dtype) - result = index.date - - tm.assert_numpy_array_equal(result, expected) - - @pytest.mark.parametrize( - "dtype", - [None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"], - ) - def test_time_accessor(self, dtype): - # Regression test for GH#21267 - expected = np.array([time(10, 20, 30), pd.NaT]) - - index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], dtype=dtype) - result = index.time - - tm.assert_numpy_array_equal(result, expected) - - def test_timetz_accessor(self, tz_naive_fixture): - # GH21358 - tz = timezones.maybe_get_tz(tz_naive_fixture) - - expected = np.array([time(10, 20, 30, tzinfo=tz), pd.NaT]) - - index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], tz=tz) - result = index.timetz - - tm.assert_numpy_array_equal(result, expected) - - def test_dti_drop_dont_lose_tz(self): - # GH#2621 - ind = date_range("2012-12-01", periods=10, tz="utc") - ind = ind.drop(ind[-1]) - - assert ind.tz is not None - - def test_dti_tz_conversion_freq(self, tz_naive_fixture): - # GH25241 - t3 = DatetimeIndex(["2019-01-01 10:00"], freq="H") - assert t3.tz_localize(tz=tz_naive_fixture).freq == t3.freq - t4 = DatetimeIndex(["2019-01-02 12:00"], tz="UTC", freq="T") - assert t4.tz_convert(tz="UTC").freq == t4.freq - - def test_drop_dst_boundary(self): - # see gh-18031 - tz = "Europe/Brussels" - freq = "15min" - - start = Timestamp("201710290100", tz=tz) - end = Timestamp("201710290300", tz=tz) - index = date_range(start=start, end=end, freq=freq) - - expected = DatetimeIndex( - [ - "201710290115", - "201710290130", - "201710290145", - "201710290200", - "201710290215", - "201710290230", - "201710290245", - "201710290200", - "201710290215", - "201710290230", - "201710290245", - "201710290300", - ], - tz=tz, - freq=freq, - ambiguous=[ - True, - True, - True, - True, - True, - True, - True, - False, - False, - False, - False, - False, - ], - ) - result = index.drop(index[0]) - tm.assert_index_equal(result, expected) - - def test_date_range_localize(self): - rng = date_range("3/11/2012 03:00", periods=15, freq="H", tz="US/Eastern") - rng2 = DatetimeIndex(["3/11/2012 03:00", "3/11/2012 04:00"], tz="US/Eastern") - rng3 = date_range("3/11/2012 03:00", periods=15, freq="H") - rng3 = rng3.tz_localize("US/Eastern") - - tm.assert_index_equal(rng._with_freq(None), rng3) - - # DST transition time - val = rng[0] - exp = Timestamp("3/11/2012 03:00", tz="US/Eastern") - - assert val.hour == 3 - assert exp.hour == 3 - assert val == exp # same UTC value - tm.assert_index_equal(rng[:2], rng2) - - # Right before the DST transition - rng = date_range("3/11/2012 00:00", periods=2, freq="H", tz="US/Eastern") - rng2 = DatetimeIndex( - ["3/11/2012 00:00", "3/11/2012 01:00"], tz="US/Eastern", freq="H" - ) - tm.assert_index_equal(rng, rng2) - exp = Timestamp("3/11/2012 00:00", tz="US/Eastern") - assert exp.hour == 0 - assert rng[0] == exp - exp = Timestamp("3/11/2012 01:00", tz="US/Eastern") - assert exp.hour == 1 - assert rng[1] == exp - - rng = date_range("3/11/2012 00:00", periods=10, freq="H", tz="US/Eastern") - assert rng[2].hour == 3 - - def test_timestamp_equality_different_timezones(self): - utc_range = date_range("1/1/2000", periods=20, tz="UTC") - eastern_range = utc_range.tz_convert("US/Eastern") - berlin_range = utc_range.tz_convert("Europe/Berlin") - - for a, b, c in zip(utc_range, eastern_range, berlin_range): - assert a == b - assert b == c - assert a == c - - assert (utc_range == eastern_range).all() - assert (utc_range == berlin_range).all() - assert (berlin_range == eastern_range).all() - - def test_dti_intersection(self): - rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") - - left = rng[10:90][::-1] - right = rng[20:80][::-1] - - assert left.tz == rng.tz - result = left.intersection(right) - assert result.tz == left.tz - - def test_dti_equals_with_tz(self): - left = date_range("1/1/2011", periods=100, freq="H", tz="utc") - right = date_range("1/1/2011", periods=100, freq="H", tz="US/Eastern") - - assert not left.equals(right) - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_dti_tz_nat(self, tzstr): - idx = DatetimeIndex([Timestamp("2013-1-1", tz=tzstr), pd.NaT]) - - assert isna(idx[1]) - assert idx[0].tzinfo is not None - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_dti_astype_asobject_tzinfos(self, tzstr): - # GH#1345 - - # dates around a dst transition - rng = date_range("2/13/2010", "5/6/2010", tz=tzstr) - - objs = rng.astype(object) - for i, x in enumerate(objs): - exval = rng[i] - assert x == exval - assert x.tzinfo == exval.tzinfo - - objs = rng.astype(object) - for i, x in enumerate(objs): - exval = rng[i] - assert x == exval - assert x.tzinfo == exval.tzinfo - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_dti_with_timezone_repr(self, tzstr): - rng = date_range("4/13/2010", "5/6/2010") - - rng_eastern = rng.tz_localize(tzstr) - - rng_repr = repr(rng_eastern) - assert "2010-04-13 00:00:00" in rng_repr - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_dti_take_dont_lose_meta(self, tzstr): - rng = date_range("1/1/2000", periods=20, tz=tzstr) - - result = rng.take(range(5)) - assert result.tz == rng.tz - assert result.freq == rng.freq - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_utc_box_timestamp_and_localize(self, tzstr): - tz = timezones.maybe_get_tz(tzstr) - - rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc") - rng_eastern = rng.tz_convert(tzstr) - - expected = rng[-1].astimezone(tz) - - stamp = rng_eastern[-1] - assert stamp == expected - assert stamp.tzinfo == expected.tzinfo - - # right tzinfo - rng = date_range("3/13/2012", "3/14/2012", freq="H", tz="utc") - rng_eastern = rng.tz_convert(tzstr) - # test not valid for dateutil timezones. - # assert 'EDT' in repr(rng_eastern[0].tzinfo) - assert "EDT" in repr(rng_eastern[0].tzinfo) or "tzfile" in repr( - rng_eastern[0].tzinfo - ) - - def test_dti_to_pydatetime(self): - dt = dateutil.parser.parse("2012-06-13T01:39:00Z") - dt = dt.replace(tzinfo=tzlocal()) - - arr = np.array([dt], dtype=object) - - result = to_datetime(arr, utc=True) - assert result.tz is timezone.utc - - rng = date_range("2012-11-03 03:00", "2012-11-05 03:00", tz=tzlocal()) - arr = rng.to_pydatetime() - result = to_datetime(arr, utc=True) - assert result.tz is timezone.utc - - def test_dti_to_pydatetime_fizedtz(self): - dates = np.array( - [ - datetime(2000, 1, 1, tzinfo=fixed_off), - datetime(2000, 1, 2, tzinfo=fixed_off), - datetime(2000, 1, 3, tzinfo=fixed_off), - ] - ) - dti = DatetimeIndex(dates) - - result = dti.to_pydatetime() - tm.assert_numpy_array_equal(dates, result) - - result = dti._mpl_repr() - tm.assert_numpy_array_equal(dates, result) - - @pytest.mark.parametrize("tz", [pytz.timezone("US/Central"), gettz("US/Central")]) - def test_with_tz(self, tz): - # just want it to work - start = datetime(2011, 3, 12, tzinfo=pytz.utc) - dr = bdate_range(start, periods=50, freq=pd.offsets.Hour()) - assert dr.tz is pytz.utc - - # DateRange with naive datetimes - dr = bdate_range("1/1/2005", "1/1/2009", tz=pytz.utc) - dr = bdate_range("1/1/2005", "1/1/2009", tz=tz) - - # normalized - central = dr.tz_convert(tz) - assert central.tz is tz - naive = central[0].to_pydatetime().replace(tzinfo=None) - comp = conversion.localize_pydatetime(naive, tz).tzinfo - assert central[0].tz is comp - - # compare vs a localized tz - naive = dr[0].to_pydatetime().replace(tzinfo=None) - comp = conversion.localize_pydatetime(naive, tz).tzinfo - assert central[0].tz is comp - - # datetimes with tzinfo set - dr = bdate_range( - datetime(2005, 1, 1, tzinfo=pytz.utc), datetime(2009, 1, 1, tzinfo=pytz.utc) - ) - msg = "Start and end cannot both be tz-aware with different timezones" - with pytest.raises(Exception, match=msg): - bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc), "1/1/2009", tz=tz) - - @pytest.mark.parametrize("prefix", ["", "dateutil/"]) - def test_field_access_localize(self, prefix): - strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] - rng = DatetimeIndex(strdates, tz=prefix + "US/Eastern") - assert (rng.hour == 0).all() - - # a more unusual time zone, #1946 - dr = date_range( - "2011-10-02 00:00", freq="h", periods=10, tz=prefix + "America/Atikokan" - ) - - expected = Index(np.arange(10, dtype=np.int32)) - tm.assert_index_equal(dr.hour, expected) - - @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) - def test_dti_convert_tz_aware_datetime_datetime(self, tz): - # GH#1581 - dates = [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)] - - dates_aware = [conversion.localize_pydatetime(x, tz) for x in dates] - result = DatetimeIndex(dates_aware) - assert timezones.tz_compare(result.tz, tz) - - converted = to_datetime(dates_aware, utc=True) - ex_vals = np.array([Timestamp(x).as_unit("ns")._value for x in dates_aware]) - tm.assert_numpy_array_equal(converted.asi8, ex_vals) - assert converted.tz is timezone.utc - - # Note: not difference, as there is no symmetry requirement there - @pytest.mark.parametrize("setop", ["union", "intersection", "symmetric_difference"]) - def test_dti_setop_aware(self, setop): - # non-overlapping - # GH#39328 as of 2.0 we cast these to UTC instead of object - rng = date_range("2012-11-15 00:00:00", periods=6, freq="H", tz="US/Central") - - rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="H", tz="US/Eastern") - - result = getattr(rng, setop)(rng2) - - left = rng.tz_convert("UTC") - right = rng2.tz_convert("UTC") - expected = getattr(left, setop)(right) - tm.assert_index_equal(result, expected) - assert result.tz == left.tz - if len(result): - assert result[0].tz is timezone.utc - assert result[-1].tz is timezone.utc - - def test_dti_union_mixed(self): - # GH 21671 - rng = DatetimeIndex([Timestamp("2011-01-01"), pd.NaT]) - rng2 = DatetimeIndex(["2012-01-01", "2012-01-02"], tz="Asia/Tokyo") - result = rng.union(rng2) - expected = Index( - [ - Timestamp("2011-01-01"), - pd.NaT, - Timestamp("2012-01-01", tz="Asia/Tokyo"), - Timestamp("2012-01-02", tz="Asia/Tokyo"), - ], - dtype=object, - ) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize( - "tz", [None, "UTC", "US/Central", dateutil.tz.tzoffset(None, -28800)] - ) - def test_iteration_preserves_nanoseconds(self, tz): - # GH 19603 - index = DatetimeIndex( - ["2018-02-08 15:00:00.168456358", "2018-02-08 15:00:00.168456359"], tz=tz - ) - for i, ts in enumerate(index): - assert ts == index[i] # pylint: disable=unnecessary-list-index-lookup - - -def test_tz_localize_invalidates_freq(): - # we only preserve freq in unambiguous cases - - # if localized to US/Eastern, this crosses a DST transition - dti = date_range("2014-03-08 23:00", "2014-03-09 09:00", freq="H") - assert dti.freq == "H" - - result = dti.tz_localize(None) # no-op - assert result.freq == "H" - - result = dti.tz_localize("UTC") # unambiguous freq preservation - assert result.freq == "H" - - result = dti.tz_localize("US/Eastern", nonexistent="shift_forward") - assert result.freq is None - assert result.inferred_freq is None # i.e. we are not _too_ strict here - - # Case where we _can_ keep freq because we're length==1 - dti2 = dti[:1] - result = dti2.tz_localize("US/Eastern") - assert result.freq == "H" diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/html.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/html.py deleted file mode 100644 index 27fb7534cd1818696cdfaa2bd9346a1fcd864fff..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/html.py +++ /dev/null @@ -1,623 +0,0 @@ -""" - pygments.lexers.html - ~~~~~~~~~~~~~~~~~~~~ - - Lexers for HTML, XML and related markup. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \ - default, using -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Punctuation, Whitespace -from pygments.util import looks_like_xml, html_doctype_matches - -from pygments.lexers.javascript import JavascriptLexer -from pygments.lexers.jvm import ScalaLexer -from pygments.lexers.css import CssLexer, _indentation, _starts_block -from pygments.lexers.ruby import RubyLexer - -__all__ = ['HtmlLexer', 'DtdLexer', 'XmlLexer', 'XsltLexer', 'HamlLexer', - 'ScamlLexer', 'PugLexer', 'UrlEncodedLexer'] - - -class HtmlLexer(RegexLexer): - """ - For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted - by the appropriate lexer. - """ - - name = 'HTML' - url = 'https://html.spec.whatwg.org/' - aliases = ['html'] - filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt'] - mimetypes = ['text/html', 'application/xhtml+xml'] - - flags = re.IGNORECASE | re.DOTALL - tokens = { - 'root': [ - ('[^<&]+', Text), - (r'&\S*?;', Name.Entity), - (r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc), - (r'', Comment.Multiline), - (r'<\?.*?\?>', Comment.Preproc), - (']*>', Comment.Preproc), - (r'(<)(\s*)(script)(\s*)', - bygroups(Punctuation, Text, Name.Tag, Text), - ('script-content', 'tag')), - (r'(<)(\s*)(style)(\s*)', - bygroups(Punctuation, Text, Name.Tag, Text), - ('style-content', 'tag')), - # note: this allows tag names not used in HTML like , - # this is to support yet-unknown template engines and the like - (r'(<)(\s*)([\w:.-]+)', - bygroups(Punctuation, Text, Name.Tag), 'tag'), - (r'(<)(\s*)(/)(\s*)([\w:.-]+)(\s*)(>)', - bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text, - Punctuation)), - ], - 'tag': [ - (r'\s+', Text), - (r'([\w:-]+\s*)(=)(\s*)', bygroups(Name.Attribute, Operator, Text), - 'attr'), - (r'[\w:-]+', Name.Attribute), - (r'(/?)(\s*)(>)', bygroups(Punctuation, Text, Punctuation), '#pop'), - ], - 'script-content': [ - (r'(<)(\s*)(/)(\s*)(script)(\s*)(>)', - bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text, - Punctuation), '#pop'), - (r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)), - # fallback cases for when there is no closing script tag - # first look for newline and then go back into root state - # if that fails just read the rest of the file - # this is similar to the error handling logic in lexer.py - (r'.+?\n', using(JavascriptLexer), '#pop'), - (r'.+', using(JavascriptLexer), '#pop'), - ], - 'style-content': [ - (r'(<)(\s*)(/)(\s*)(style)(\s*)(>)', - bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text, - Punctuation),'#pop'), - (r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)), - # fallback cases for when there is no closing style tag - # first look for newline and then go back into root state - # if that fails just read the rest of the file - # this is similar to the error handling logic in lexer.py - (r'.+?\n', using(CssLexer), '#pop'), - (r'.+', using(CssLexer), '#pop'), - ], - 'attr': [ - ('".*?"', String, '#pop'), - ("'.*?'", String, '#pop'), - (r'[^\s>]+', String, '#pop'), - ], - } - - def analyse_text(text): - if html_doctype_matches(text): - return 0.5 - - -class DtdLexer(RegexLexer): - """ - A lexer for DTDs (Document Type Definitions). - - .. versionadded:: 1.5 - """ - - flags = re.MULTILINE | re.DOTALL - - name = 'DTD' - aliases = ['dtd'] - filenames = ['*.dtd'] - mimetypes = ['application/xml-dtd'] - - tokens = { - 'root': [ - include('common'), - - (r'(\s]+)', - bygroups(Keyword, Text, Name.Tag)), - (r'PUBLIC|SYSTEM', Keyword.Constant), - (r'[\[\]>]', Keyword), - ], - - 'common': [ - (r'\s+', Text), - (r'(%|&)[^;]*;', Name.Entity), - ('', Comment, '#pop'), - ('-', Comment), - ], - - 'element': [ - include('common'), - (r'EMPTY|ANY|#PCDATA', Keyword.Constant), - (r'[^>\s|()?+*,]+', Name.Tag), - (r'>', Keyword, '#pop'), - ], - - 'attlist': [ - include('common'), - (r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION', - Keyword.Constant), - (r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant), - (r'xml:space|xml:lang', Keyword.Reserved), - (r'[^>\s|()?+*,]+', Name.Attribute), - (r'>', Keyword, '#pop'), - ], - - 'entity': [ - include('common'), - (r'SYSTEM|PUBLIC|NDATA', Keyword.Constant), - (r'[^>\s|()?+*,]+', Name.Entity), - (r'>', Keyword, '#pop'), - ], - - 'notation': [ - include('common'), - (r'SYSTEM|PUBLIC', Keyword.Constant), - (r'[^>\s|()?+*,]+', Name.Attribute), - (r'>', Keyword, '#pop'), - ], - } - - def analyse_text(text): - if not looks_like_xml(text) and \ - ('', Comment.Preproc), - (r'', Comment.Multiline), - (r'<\?.*?\?>', Comment.Preproc), - (']*>', Comment.Preproc), - (r'<\s*[\w:.-]+', Name.Tag, 'tag'), - (r'<\s*/\s*[\w:.-]+\s*>', Name.Tag), - ], - 'tag': [ - (r'\s+', Whitespace), - (r'[\w.:-]+\s*=', Name.Attribute, 'attr'), - (r'/?\s*>', Name.Tag, '#pop'), - ], - 'attr': [ - (r'\s+', Whitespace), - ('".*?"', String, '#pop'), - ("'.*?'", String, '#pop'), - (r'[^\s>]+', String, '#pop'), - ], - } - - def analyse_text(text): - if looks_like_xml(text): - return 0.45 # less than HTML - - -class XsltLexer(XmlLexer): - """ - A lexer for XSLT. - - .. versionadded:: 0.10 - """ - - name = 'XSLT' - aliases = ['xslt'] - filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc - mimetypes = ['application/xsl+xml', 'application/xslt+xml'] - - EXTRA_KEYWORDS = { - 'apply-imports', 'apply-templates', 'attribute', - 'attribute-set', 'call-template', 'choose', 'comment', - 'copy', 'copy-of', 'decimal-format', 'element', 'fallback', - 'for-each', 'if', 'import', 'include', 'key', 'message', - 'namespace-alias', 'number', 'otherwise', 'output', 'param', - 'preserve-space', 'processing-instruction', 'sort', - 'strip-space', 'stylesheet', 'template', 'text', 'transform', - 'value-of', 'variable', 'when', 'with-param' - } - - def get_tokens_unprocessed(self, text): - for index, token, value in XmlLexer.get_tokens_unprocessed(self, text): - m = re.match(']*)/?>?', value) - - if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS: - yield index, Keyword, value - else: - yield index, token, value - - def analyse_text(text): - if looks_like_xml(text) and ']{1,2}(?=[ \t=])', Punctuation), - include('eval-or-plain'), - ], - - 'plain': [ - (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text), - (r'(#\{)(' + _dot + r'*?)(\})', - bygroups(String.Interpol, using(RubyLexer), String.Interpol)), - (r'\n', Text, 'root'), - ], - - 'html-attributes': [ - (r'\s+', Text), - (r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'), - (r'[\w:-]+', Name.Attribute), - (r'\)', Text, '#pop'), - ], - - 'html-attribute-value': [ - (r'[ \t]+', Text), - (r'\w+', Name.Variable, '#pop'), - (r'@\w+', Name.Variable.Instance, '#pop'), - (r'\$\w+', Name.Variable.Global, '#pop'), - (r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'), - (r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'), - ], - - 'html-comment-block': [ - (_dot + '+', Comment), - (r'\n', Text, 'root'), - ], - - 'haml-comment-block': [ - (_dot + '+', Comment.Preproc), - (r'\n', Text, 'root'), - ], - - 'filter-block': [ - (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator), - (r'(#\{)(' + _dot + r'*?)(\})', - bygroups(String.Interpol, using(RubyLexer), String.Interpol)), - (r'\n', Text, 'root'), - ], - } - - -class ScamlLexer(ExtendedRegexLexer): - """ - For `Scaml markup `_. Scaml is Haml for Scala. - - .. versionadded:: 1.4 - """ - - name = 'Scaml' - aliases = ['scaml'] - filenames = ['*.scaml'] - mimetypes = ['text/x-scaml'] - - flags = re.IGNORECASE - # Scaml does not yet support the " |\n" notation to - # wrap long lines. Once it does, use the custom faux - # dot instead. - # _dot = r'(?: \|\n(?=.* \|)|.)' - _dot = r'.' - - tokens = { - 'root': [ - (r'[ \t]*\n', Text), - (r'[ \t]*', _indentation), - ], - - 'css': [ - (r'\.[\w:-]+', Name.Class, 'tag'), - (r'\#[\w:-]+', Name.Function, 'tag'), - ], - - 'eval-or-plain': [ - (r'[&!]?==', Punctuation, 'plain'), - (r'([&!]?[=~])(' + _dot + r'*\n)', - bygroups(Punctuation, using(ScalaLexer)), - 'root'), - default('plain'), - ], - - 'content': [ - include('css'), - (r'%[\w:-]+', Name.Tag, 'tag'), - (r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'), - (r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)', - bygroups(Comment, Comment.Special, Comment), - '#pop'), - (r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'), - '#pop'), - (r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc, - 'scaml-comment-block'), '#pop'), - (r'(-@\s*)(import)?(' + _dot + r'*\n)', - bygroups(Punctuation, Keyword, using(ScalaLexer)), - '#pop'), - (r'(-)(' + _dot + r'*\n)', - bygroups(Punctuation, using(ScalaLexer)), - '#pop'), - (r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'), - '#pop'), - include('eval-or-plain'), - ], - - 'tag': [ - include('css'), - (r'\{(,\n|' + _dot + r')*?\}', using(ScalaLexer)), - (r'\[' + _dot + r'*?\]', using(ScalaLexer)), - (r'\(', Text, 'html-attributes'), - (r'/[ \t]*\n', Punctuation, '#pop:2'), - (r'[<>]{1,2}(?=[ \t=])', Punctuation), - include('eval-or-plain'), - ], - - 'plain': [ - (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text), - (r'(#\{)(' + _dot + r'*?)(\})', - bygroups(String.Interpol, using(ScalaLexer), String.Interpol)), - (r'\n', Text, 'root'), - ], - - 'html-attributes': [ - (r'\s+', Text), - (r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'), - (r'[\w:-]+', Name.Attribute), - (r'\)', Text, '#pop'), - ], - - 'html-attribute-value': [ - (r'[ \t]+', Text), - (r'\w+', Name.Variable, '#pop'), - (r'@\w+', Name.Variable.Instance, '#pop'), - (r'\$\w+', Name.Variable.Global, '#pop'), - (r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'), - (r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'), - ], - - 'html-comment-block': [ - (_dot + '+', Comment), - (r'\n', Text, 'root'), - ], - - 'scaml-comment-block': [ - (_dot + '+', Comment.Preproc), - (r'\n', Text, 'root'), - ], - - 'filter-block': [ - (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator), - (r'(#\{)(' + _dot + r'*?)(\})', - bygroups(String.Interpol, using(ScalaLexer), String.Interpol)), - (r'\n', Text, 'root'), - ], - } - - -class PugLexer(ExtendedRegexLexer): - """ - For Pug markup. - Pug is a variant of Scaml, see: - http://scalate.fusesource.org/documentation/scaml-reference.html - - .. versionadded:: 1.4 - """ - - name = 'Pug' - aliases = ['pug', 'jade'] - filenames = ['*.pug', '*.jade'] - mimetypes = ['text/x-pug', 'text/x-jade'] - - flags = re.IGNORECASE - _dot = r'.' - - tokens = { - 'root': [ - (r'[ \t]*\n', Text), - (r'[ \t]*', _indentation), - ], - - 'css': [ - (r'\.[\w:-]+', Name.Class, 'tag'), - (r'\#[\w:-]+', Name.Function, 'tag'), - ], - - 'eval-or-plain': [ - (r'[&!]?==', Punctuation, 'plain'), - (r'([&!]?[=~])(' + _dot + r'*\n)', - bygroups(Punctuation, using(ScalaLexer)), 'root'), - default('plain'), - ], - - 'content': [ - include('css'), - (r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'), - (r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)', - bygroups(Comment, Comment.Special, Comment), - '#pop'), - (r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'), - '#pop'), - (r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc, - 'scaml-comment-block'), '#pop'), - (r'(-@\s*)(import)?(' + _dot + r'*\n)', - bygroups(Punctuation, Keyword, using(ScalaLexer)), - '#pop'), - (r'(-)(' + _dot + r'*\n)', - bygroups(Punctuation, using(ScalaLexer)), - '#pop'), - (r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'), - '#pop'), - (r'[\w:-]+', Name.Tag, 'tag'), - (r'\|', Text, 'eval-or-plain'), - ], - - 'tag': [ - include('css'), - (r'\{(,\n|' + _dot + r')*?\}', using(ScalaLexer)), - (r'\[' + _dot + r'*?\]', using(ScalaLexer)), - (r'\(', Text, 'html-attributes'), - (r'/[ \t]*\n', Punctuation, '#pop:2'), - (r'[<>]{1,2}(?=[ \t=])', Punctuation), - include('eval-or-plain'), - ], - - 'plain': [ - (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text), - (r'(#\{)(' + _dot + r'*?)(\})', - bygroups(String.Interpol, using(ScalaLexer), String.Interpol)), - (r'\n', Text, 'root'), - ], - - 'html-attributes': [ - (r'\s+', Text), - (r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'), - (r'[\w:-]+', Name.Attribute), - (r'\)', Text, '#pop'), - ], - - 'html-attribute-value': [ - (r'[ \t]+', Text), - (r'\w+', Name.Variable, '#pop'), - (r'@\w+', Name.Variable.Instance, '#pop'), - (r'\$\w+', Name.Variable.Global, '#pop'), - (r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'), - (r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'), - ], - - 'html-comment-block': [ - (_dot + '+', Comment), - (r'\n', Text, 'root'), - ], - - 'scaml-comment-block': [ - (_dot + '+', Comment.Preproc), - (r'\n', Text, 'root'), - ], - - 'filter-block': [ - (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator), - (r'(#\{)(' + _dot + r'*?)(\})', - bygroups(String.Interpol, using(ScalaLexer), String.Interpol)), - (r'\n', Text, 'root'), - ], - } -JadeLexer = PugLexer # compat - - -class UrlEncodedLexer(RegexLexer): - """ - Lexer for urlencoded data - - .. versionadded:: 2.16 - """ - - name = 'urlencoded' - aliases = ['urlencoded'] - mimetypes = ['application/x-www-form-urlencoded'] - - tokens = { - 'root': [ - ('([^&=]*)(=)([^=&]*)(&?)', bygroups(Name.Tag, Operator, String, Punctuation)), - ], - } diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/mips.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/mips.py deleted file mode 100644 index 257605d7e5854e29588dd84cebe93f2747c4b393..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/mips.py +++ /dev/null @@ -1,128 +0,0 @@ -""" - pygments.lexers.mips - ~~~~~~~~~~~~~~~~~~~~ - - Lexers for MIPS assembly. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, words -from pygments.token import Whitespace, Comment, String, Keyword, Name, Text - -__all__ = ["MIPSLexer"] - - -class MIPSLexer(RegexLexer): - """ - A MIPS Assembly Lexer. - - Based on the Emacs major mode by hlissner: - https://github.com/hlissner/emacs-mips-mode - """ - - name = 'MIPS' - aliases = ['mips'] - # TODO: add '*.s' and '*.asm', which will require designing an analyse_text - # method for this lexer and refactoring those from Gas and Nasm in order to - # have relatively reliable detection - filenames = ['*.mips', '*.MIPS'] - - keywords = [ - # Arithmetic insturctions - "add", "sub", "subu", "addi", "subi", "addu", "addiu", - # Multiplication/division - "mul", "mult", "multu", "mulu", "madd", "maddu", "msub", "msubu", "div", "divu", - # Bitwise operations - "and", "or", "nor", "xor", "andi", "ori", "xori", "clo", "clz", - # Shifts - "sll", "srl", "sllv", "srlv", "sra", "srav", - # Comparisons - "slt", "sltu", "slti", "sltiu", - # Move data - "mfhi", "mthi", "mflo", "mtlo", "movn", "movz", "movf", "movt", - # Jump - "j", "jal", "jalr", "jr", - # branch - "bc1f", "bc1t", "beq", "bgez", "bgezal", "bgtz", "blez", "bltzal", "bltz", "bne", - # Load - "lui", "lb", "lbu", "lh", "lhu", "lw", "lwcl", "lwl", "lwr", - # Store - "sb", "sh", "sw", "swl", "swr", # coproc: swc1 sdc1 - # Concurrent load/store - "ll", "sc", - # Trap handling - "teq", "teqi", "tne", "tneqi", "tge", "tgeu", "tgei", "tgeiu", "tlt", "tltu", "tlti", - "tltiu", - # Exception / Interrupt - "eret", "break", "bop", "syscall", - # --- Floats ----------------------------------------------------- - # Arithmetic - "add.s", "add.d", "sub.s", "sub.d", "mul.s", "mul.d", "div.s", "div.d", "neg.d", - "neg.s", - # Comparison - "c.e.d", "c.e.s", "c.le.d", "c.le.s", "c.lt.s", "c.lt.d", # "c.gt.s", "c.gt.d", - "madd.s", "madd.d", "msub.s", "msub.d", - # Move Floats - "mov.d", "move.s", "movf.d", "movf.s", "movt.d", "movt.s", "movn.d", "movn.s", - "movnzd", "movz.s", "movz.d", - # Conversion - "cvt.d.s", "cvt.d.w", "cvt.s.d", "cvt.s.w", "cvt.w.d", "cvt.w.s", "trunc.w.d", - "trunc.w.s", - # Math - "abs.s", "abs.d", "sqrt.s", "sqrt.d", "ceil.w.d", "ceil.w.s", "floor.w.d", - "floor.w.s", "round.w.d", "round.w.s", - ] - - pseudoinstructions = [ - # Arithmetic & logical - "rem", "remu", "mulo", "mulou", "abs", "neg", "negu", "not", "rol", "ror", - # branches - "b", "beqz", "bge", "bgeu", "bgt", "bgtu", "ble", "bleu", "blt", "bltu", "bnez", - # loads - "la", "li", "ld", "ulh", "ulhu", "ulw", - # Store - "sd", "ush", "usw", - # move - "move", # coproc: "mfc1.d", - # comparisons - "sgt", "sgtu", "sge", "sgeu", "sle", "sleu", "sne", "seq", - # --- Floats ----------------------------------------------------- - # load-store - "l.d", "l.s", "s.d", "s.s", - ] - - directives = [ - ".align", ".ascii", ".asciiz", ".byte", ".data", ".double", ".extern", ".float", - ".globl", ".half", ".kdata", ".ktext", ".space", ".text", ".word", - ] - - deprecated = [ - "beql", "bnel", "bgtzl", "bgezl", "bltzl", "blezl", "bltzall", "bgezall", - ] - - tokens = { - 'root': [ - (r'\s+', Whitespace), - (r'#.*', Comment), - (r'"', String, 'string'), - (r'-?[0-9]+?', Keyword.Constant), - (r'\w*:', Name.Function), - (words(deprecated, suffix=r'\b'), Keyword.Pseudo), # need warning face - (words(pseudoinstructions, suffix=r'\b'), Name.Variable), - (words(keywords, suffix=r'\b'), Keyword), - (r'[slm][ftwd]c[0-9]([.]d)?', Keyword), - (r'\$(f?[0-2][0-9]|f?3[01]|[ft]?[0-9]|[vk][01]|a[0-3]|s[0-7]|[gsf]p|ra|at|zero)', - Keyword.Type), - (words(directives, suffix=r'\b'), Name.Entity), # Preprocessor? - (r':|,|;|\{|\}|=>|@|\$|=', Name.Builtin), - (r'\w+', Text), - (r'.', Text), - ], - 'string': [ - (r'\\.', String.Escape), - (r'"', String, '#pop'), - (r'[^\\"]+', String), - ], - } diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tzdata/zoneinfo/Brazil/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tzdata/zoneinfo/Brazil/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/qiantong-xu/sambanovasystems-codegen-16B-mono-toolbench/README.md b/spaces/qiantong-xu/sambanovasystems-codegen-16B-mono-toolbench/README.md deleted file mode 100644 index e24833eed9838f89f6f83132a55c08497df6ee22..0000000000000000000000000000000000000000 --- a/spaces/qiantong-xu/sambanovasystems-codegen-16B-mono-toolbench/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Sambanovasystems Codegen 16B Mono Toolbench -emoji: 🔥 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.32.0 -app_file: app.py -pinned: false -license: bsd-3-clause ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Bmw Inpa 720 Torrent.md b/spaces/quidiaMuxgu/Expedit-SAM/Bmw Inpa 720 Torrent.md deleted file mode 100644 index 5b86c6e3e69c5d4ecb3bb6025cd71f2fe3cb4520..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Bmw Inpa 720 Torrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

            Bmw Inpa 720 Torrent


            Downloadhttps://geags.com/2uCsy4



            -
            -BMW INPA Ediabas NCS Expert tool WinKFP 2012.torrent. I haven't ... AliciaKeys Baloise Session 2017 HDTV x264-LiNKLE[TGx], 421 MB, 21.10.2019, 0, 0. 1fdad05405
            -
            -
            -

            diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Documalis Free Pdf Scanner Serial.md b/spaces/quidiaMuxgu/Expedit-SAM/Documalis Free Pdf Scanner Serial.md deleted file mode 100644 index 4498c9ad3d6b853e24c01b0c5a20ec8ab198c5e5..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Documalis Free Pdf Scanner Serial.md +++ /dev/null @@ -1,45 +0,0 @@ -
            -

            Documalis Free PDF Scanner: A Review

            -

            Documalis Free PDF Scanner is a software that allows you to scan documents and save them as PDF files. It is easy to use and has some useful features, such as automatic document detection, image enhancement, OCR, and batch scanning. However, it also has some drawbacks, such as limited output formats, watermarking, and lack of updates. In this article, we will review the pros and cons of Documalis Free PDF Scanner and compare it with some alternatives.

            -

            Pros of Documalis Free PDF Scanner

            -
              -
            • It is free to download and use for personal and non-commercial purposes.
            • -
            • It has a simple and intuitive interface that guides you through the scanning process.
            • -
            • It supports various scanners and cameras, including TWAIN and WIA devices.
            • -
            • It can automatically detect the document size and orientation, and crop the image accordingly.
            • -
            • It can enhance the image quality by adjusting the brightness, contrast, color, and resolution.
            • -
            • It can perform OCR (optical character recognition) on the scanned documents and extract the text content.
            • -
            • It can scan multiple documents at once and save them as a single PDF file or separate files.
            • -
            • It can compress the PDF files to reduce their size and optimize them for web viewing.
            • -
            -

            Cons of Documalis Free PDF Scanner

            -
              -
            • It only supports PDF as the output format. You cannot save the scanned documents as images or other formats.
            • -
            • It adds a watermark to the bottom of each page of the PDF file, which may be annoying or unprofessional.
            • -
            • It has not been updated since 2014, so it may not be compatible with newer operating systems or devices.
            • -
            • It does not have advanced features such as editing, annotating, signing, encrypting, or merging PDF files.
            • -
            • It may contain adware or malware that may harm your computer or privacy.
            • -
            -

            Alternatives to Documalis Free PDF Scanner

            -

            If you are looking for a better or more reliable PDF scanner software, you may want to consider some of these alternatives:

            -

            documalis free pdf scanner serial


            Download 🆓 https://geags.com/2uCrD9



            -
              -
            • NAPS2: A free and open-source software that supports scanning to PDF, TIFF, JPEG, PNG, and other formats. It also has OCR, image enhancement, editing, encryption, and other features.
            • -
            • ABBYY FineReader: A professional software that offers high-quality scanning and OCR. It also has editing, annotating, converting, comparing, and collaborating features. It is not free but offers a trial version.
            • -
            • Adobe Acrobat: A popular software that allows you to create, edit, sign, and share PDF files. It also has scanning, OCR, compression, encryption, and other features. It is not free but offers a subscription plan.
            • -
            - -

            How to Use Documalis Free PDF Scanner

            -

            To use Documalis Free PDF Scanner, you need to download and install it from its official website. Then, you need to connect your scanner or camera to your computer and launch the software. You will see a window with four tabs: Scan, Enhance, OCR, and Save. You can follow these steps to scan your documents:

            -
              -
            1. Click on the Scan tab and select your scanner or camera from the drop-down menu. You can also adjust the scan settings, such as resolution, color mode, and duplex mode.
            2. -
            3. Place your document on the scanner or hold your camera over it and click on the Scan button. You will see a preview of the scanned image on the right panel. You can crop, rotate, or delete the image if needed.
            4. -
            5. Click on the Enhance tab and use the sliders to adjust the brightness, contrast, color, and resolution of the image. You can also apply filters, such as grayscale, sepia, or negative.
            6. -
            7. Click on the OCR tab and choose the language and output format of the text. You can also edit or correct the text if needed.
            8. -
            9. Click on the Save tab and choose the destination folder and file name for your PDF file. You can also compress or optimize the file for web viewing.
            10. -
            11. Click on the Save button and wait for the software to create your PDF file. You can then open or share it as you wish.
            12. -
            -

            Conclusion

            -

            Documalis Free PDF Scanner is a software that allows you to scan documents and save them as PDF files. It has some useful features, such as automatic document detection, image enhancement, OCR, and batch scanning. However, it also has some drawbacks, such as limited output formats, watermarking, and lack of updates. If you are looking for a better or more reliable PDF scanner software, you may want to consider some of the alternatives we mentioned above.

            d5da3c52bf
            -
            -
            \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Download Filmes Pornos De Zoofilia Torrent.md b/spaces/quidiaMuxgu/Expedit-SAM/Download Filmes Pornos De Zoofilia Torrent.md deleted file mode 100644 index 07a91d184beb7f0917096a82799cd7bf8c184bcd..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Download Filmes Pornos De Zoofilia Torrent.md +++ /dev/null @@ -1,5 +0,0 @@ -
            -

            Some of the best German shepherds for sale at Bargains15.com. The best free filters and maximum speed for android! Best rated Tablets for Android! Download Blackberry now! Best deals for android: Blackberry 5 & Blackberry Curve 8430. Blackberry Playbook 1 & 3.00 Blackberry Playbook 2 8.00 Blackberry Playbook 4 8.00 Blackberry Playbook 5.00 Blackberry Playbook 6.00 Blackberry Playbook 2 8.00 Blackberry Playbook 3.00 Blackberry Playbook 7.00 Blackberry Playbook 8.00 Blackberry Playbook 5.00 Blackberry Playbook 8.00 Blackberry Playbook 2.00 Blackberry Playbook 3.00 Blackberry Playbook 7.00 Blackberry Playbook 12.00 Blackberry Playbook 2 8.00 Blackberry Playbook 3 8.00 Blackberry Playbook 4 8.00 Blackberry Playbook 5.00 Blackberry Playbook 6.00 Blackberry Playbook 7.00 Blackberry Playbook 7.00 Blackberry Playbook 8.00 Blackberry Playbook 8.00 Blackberry Playbook 9.00 Blackberry Playbook 2 8.00 Blackberry Playbook 3.00 Blackberry Playbook 11.00 Blackberry Playbook 4 8.00 Blackberry Playbook 5.00 Blackberry Playbook 5.00 Blackberry Playbook 6.00 Blackberry Playbook 7.00 Blackberry Playbook 7.00 Blackberry Playbook 9.00 Blackberry Playbook 2 8.00 Blackberry Playbook 3 8.00 Blackberry Playbook 3.00 Blackberry Playbook 4 8.00 Blackberry Playbook 8.00 Blackberry Playbook 5.00 Blackberry Playbook 7.00 Blackberry Playbook 7.00 Blackberry Playbook 8.00 Blackberry Playbook 10.00 Blackberry Playbook 2 8.00 Blackberry Playbook 3 8.00 Blackberry Playbook 3.00 Blackberry Playbook 3.00 Blackberry Playbook 5.00 Blackberry Playbook 6.00 Blackberry Playbook 7.00 Blackberry Playbook 7.00 Blackberry Playbook 10.00 Blackberry Playbook 8.00 Blackberry Playbook 9.00 Blackberry Playbook 11.00 Blackberry Playbook 12.00 Blackberry Playbook 2 8.00 Blackberry Playbook 3.00 Blackberry Playbook 4.00 Blackberry Playbook 4.00 Blackberry Playbook 5.00 Blackberry Playbook 5.00 Blackberry Playbook 5.00 Blackberry Playbook 5.00 Blackberry Playbook 6.00 Blackberry Playbook 6.00 Blackberry Playbook 6.00 Blackberry Playbook 7.00 Blackberry Playbook 7.00 Blackberry Playbook 8.00 Blackberry Playbook 8.00 Blackberry Playbook 9.00 Blackberry Playbook 9.00 Blackberry Playbook 9.00 Blackberry Playbook 10.00 Blackberry Playbook 10.00 Blackberry Playbook 10.00 Blackberry Playbook 11.00 Blackberry Playbook 11.00 Blackberry Playbook 11.00 Blackberry Playbook 12.00 Blackberry Playbook 12.00 Blackberry Playbook 12.00 Blackberry Playbook 12.00 Blackberry Playbook 12.00 Blackberry Playbook 12.00 Blackberry Playbook 13.00 Blackberry Playbook 14.00 Blackberry Playbook 15.00 Blackberry Playbook 2.00 Blackberry Playbook 4.00 Blackberry Playbook 4.00 Blackberry Playbook 5.00 Blackberry Playbook 5.00 Blackberry Playbook 6.00 Blackberry Playbook 6.00 Blackberry Playbook 7.

            -

            Download Filmes Pornos De Zoofilia Torrent


            Download 🆓 https://geags.com/2uCsw3



            899543212b
            -
            -
            \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Download Lebanon Car Directory For Pc.md b/spaces/quidiaMuxgu/Expedit-SAM/Download Lebanon Car Directory For Pc.md deleted file mode 100644 index 16a856ed15353d7d637f48f68582b1472ca2ca2f..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Download Lebanon Car Directory For Pc.md +++ /dev/null @@ -1,98 +0,0 @@ -
            -

            Download Lebanon Car Directory For PC: A Useful Tool for Car Owners and Buyers

            - -

            If you are looking for a way to find out more information about any car in Lebanon, you might want to download Lebanon Car Directory for PC. This is an app that allows you to enter the car's license plate number and get all the details related to the car and its owner. You can also check the mechanic due dates, fines, and taxes for any car.

            -

            Download Lebanon Car Directory For Pc


            DOWNLOAD --->>> https://geags.com/2uCpZE



            - -

            Downloading Lebanon Car Directory for PC is easy and free. You just need to follow these simple steps:

            - -
              -
            1. Download an Android emulator on your PC. An emulator is a software that lets you run Android apps on your computer. There are many emulators available online, such as Bluestacks, Nox, or MEmu.
            2. -
            3. Install the emulator on your PC and launch it.
            4. -
            5. Open the Google Play Store app on the emulator and search for "Car Details Lebanon".
            6. -
            7. Select the app from the search results and click on "Install".
            8. -
            9. Wait for the app to download and install on your PC.
            10. -
            11. Open the app and enter the license plate number of any car in Lebanon.
            12. -
            13. Enjoy the app and its features.
            14. -
            - -

            Why Download Lebanon Car Directory For PC?

            - -

            There are many reasons why you might want to download Lebanon Car Directory for PC. Here are some of them:

            - -
              -
            • You can find out who owns any car in Lebanon and contact them if they are blocking your parking spot or if you are interested in buying their car.
            • -
            • You can check the history of any car in Lebanon and avoid buying a stolen or damaged car.
            • -
            • You can keep track of your own car's maintenance and payments and avoid fines and penalties.
            • -
            • You can access the app from your PC anytime and anywhere without using your mobile data or battery.
            • -
            • You can enjoy a larger screen and better graphics than on your mobile device.
            • -
            - -

            Download Lebanon Car Directory For PC Today

            - -

            Downloading Lebanon Car Directory for PC is a smart move if you want to have more information and control over any car in Lebanon. This app is easy to use, reliable, and updated. You can download it for free from the Google Play Store using an Android emulator on your PC. Don't miss this opportunity and download Lebanon Car Directory for PC today.

            -

            What are the Features of Lebanon Car Directory For PC?

            - -

            Lebanon Car Directory for PC is not just a simple app that shows you the car owner's name and phone number. It also has many other features that make it a useful tool for car owners and buyers. Here are some of them:

            - -
              -
            • You can search for any car in Lebanon by plate number, VIN number, or chassis number.
            • -
            • You can view the car's model, year, color, engine size, fuel type, and registration date.
            • -
            • You can see the car's mechanic due dates for inspection, insurance, and road tax.
            • -
            • You can check if the car has any fines or penalties from the traffic police or the municipality.
            • -
            • You can see if the car has any loans or mortgages from banks or financial institutions.
            • -
            • You can see if the car has been involved in any accidents or damages.
            • -
            • You can see if the car has been reported stolen or missing.
            • -
            • You can see if the car has any modifications or alterations that affect its performance or safety.
            • -
            - -
            How to Use Lebanon Car Directory For PC?
            - -

            Using Lebanon Car Directory for PC is very easy and intuitive. You just need to follow these simple steps:

            -

            - -
              -
            1. Open the app on your PC and enter the license plate number of the car you want to search for.
            2. -
            3. Wait for a few seconds while the app retrieves the information from its database.
            4. -
            5. View the results on your screen and scroll down to see more details.
            6. -
            7. If you want to contact the car owner, you can click on the phone icon and call them directly from the app.
            8. -
            9. If you want to share the results with someone else, you can click on the share icon and choose your preferred method of sharing.
            10. -
            - -
            Download Lebanon Car Directory For PC: A Must-Have App for Car Lovers
            - -

            If you are a car lover, you will definitely appreciate downloading Lebanon Car Directory for PC. This app will help you find out everything you need to know about any car in Lebanon. You will also be able to contact the car owner if you have any questions or offers. You will also be able to avoid buying a bad car or getting into trouble with the law. Downloading Lebanon Car Directory for PC is a smart decision that will save you time, money, and hassle. Don't wait any longer and download Lebanon Car Directory for PC today.

            -What are the Advantages of Lebanon Car Directory For PC? - -

            Lebanon Car Directory for PC is not just a handy app for car owners and buyers. It also has many advantages that make it a valuable tool for anyone who lives or works in Lebanon. Here are some of them:

            - -
              -
            • You can save time and money by avoiding unnecessary trips to the mechanic, the traffic police, or the bank. You can check everything online from your PC.
            • -
            • You can protect yourself and your family from fraud, theft, or violence. You can verify the identity and background of any car owner before dealing with them.
            • -
            • You can support the environment and the economy by choosing cars that are eco-friendly and tax-efficient. You can compare different cars and their features online.
            • -
            • You can contribute to the improvement of the public services and infrastructure in Lebanon. You can report any errors or discrepancies in the app's database to the developers.
            • -
            • You can enjoy a user-friendly and secure app that respects your privacy and data. You can use the app without any ads or subscriptions.
            • -
            - -How to Download Lebanon Car Directory For PC? - -

            If you are convinced by the benefits of downloading Lebanon Car Directory for PC, you might be wondering how to do it. Well, it's very simple and fast. You just need to follow these easy steps:

            - -
              -
            1. Click on this link to download an Android emulator on your PC. An emulator is a software that lets you run Android apps on your computer.
            2. -
            3. Install the emulator on your PC and launch it.
            4. -
            5. Open the Google Play Store app on the emulator and search for "Car Details Lebanon".
            6. -
            7. Select the app from the search results and click on "Install".
            8. -
            9. Wait for the app to download and install on your PC.
            10. -
            11. Open the app and enter the license plate number of any car in Lebanon.
            12. -
            13. Enjoy the app and its features.
            14. -
            - -Conclusion - -

            Downloading Lebanon Car Directory for PC is a smart decision that will make your life easier and safer. This app will help you find out everything you need to know about any car in Lebanon. You will also be able to contact the car owner if you have any questions or offers. You will also be able to avoid buying a bad car or getting into trouble with the law. Downloading Lebanon Car Directory for PC is easy and free. You just need to use an Android emulator on your PC and follow a few simple steps. Don't miss this opportunity and download Lebanon Car Directory for PC today.

            -Conclusion - -

            Downloading Lebanon Car Directory for PC is a smart decision that will make your life easier and safer. This app will help you find out everything you need to know about any car in Lebanon. You will also be able to contact the car owner if you have any questions or offers. You will also be able to avoid buying a bad car or getting into trouble with the law. Downloading Lebanon Car Directory for PC is easy and free. You just need to use an Android emulator on your PC and follow a few simple steps. Don't miss this opportunity and download Lebanon Car Directory for PC today.

            3cee63e6c2
            -
            -
            \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/HD Online Player (video Comparer 1 06 Keygen Free).md b/spaces/quidiaMuxgu/Expedit-SAM/HD Online Player (video Comparer 1 06 Keygen Free).md deleted file mode 100644 index 57da6101707946069c23e9141d17b86b9b992942..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/HD Online Player (video Comparer 1 06 Keygen Free).md +++ /dev/null @@ -1,32 +0,0 @@ -

            HD Online Player (video comparer 1 06 keygen free)


            Download File > https://geags.com/2uCrW2



            - - . . “uname -a” Linux Ubuntu 14.04.3 . . . “cat /etc/issue” Ubuntu GNU/Linux.. . . . - -While this is not a complete, or even working (yet) method to compare Ubuntu, I do see some similarities in the output of “cat /etc/issue” and “uname -a”. Unfortunately, I have yet to find a working method to map the output of the two commands back to the correct name of the Ubuntu version that the laptop is running.  - -I am wondering, have any of you ever tried to map the output of “uname -a” and “cat /etc/issue” so that I can at least determine which version of Ubuntu my laptop is running? - -A: - -Try this command. - -$ lsb_release -a - -Output: - -LSB Version: 10.0 - -Distributor ID: Debian - -Description: Debian GNU/Linux 10 (buster) - -Release: 10 - -Codename: buster - -By CCN.com: The Federal Reserve will raise interest rates for the third time this year in September. The timing is largely beside the point. What matters is that the organization will eventually raise rates. - -The Fed will raise rates to hold their “target” rate at 2.25% to 2.5%. Once again, that’s the one-year rate. For the second time in history, the Fed will announce the increase just 4fefd39f24
            -
            -
            -

            diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Introducao A Administracao Maximiano.pdf.md b/spaces/quidiaMuxgu/Expedit-SAM/Introducao A Administracao Maximiano.pdf.md deleted file mode 100644 index 38e54f71e8a4b7d1d083fd617f242efca3e2c816..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Introducao A Administracao Maximiano.pdf.md +++ /dev/null @@ -1,6 +0,0 @@ -

            Introducao A Administracao Maximiano.pdf


            Download Filehttps://geags.com/2uCqBk



            - -... AMARU MAXIMIANO PDF - Title Slide of Introdução à administração ... Gestão pública: abordagem integrada da administração e do direito ... 1fdad05405
            -
            -
            -

            diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Kirk Franklin Songs For The Storm Vol. 1 Full Album Zip.md b/spaces/quidiaMuxgu/Expedit-SAM/Kirk Franklin Songs For The Storm Vol. 1 Full Album Zip.md deleted file mode 100644 index cc6ea671f83d22edc68134e5cc440293d00d33bf..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Kirk Franklin Songs For The Storm Vol. 1 Full Album Zip.md +++ /dev/null @@ -1,46 +0,0 @@ -

            Kirk Franklin, Songs for the Storm, Vol. 1 full album zip


            Download Ziphttps://geags.com/2uCrxV



            - -5. I'll Be Okay - -6. Right Now - -7. That's My Name - -8. I Can't Wait - -9. Smiling Through It All - -10. I'm Not Afraid - -11. When It Hurts So Bad - -12. You'll Take the Pain Away - -13. Oh, My God - -14. Leave My World Alone - -15. D-R-U-N-K - -16. If Ever I Was - -17. I've Got To Change My Ways - -18. Tears in the Rain - -19. Promise Me You'll Be There - -20. You'll Take The Pain Away (Reprise) - -21. Finally - -More Hits From Kirk Franklin - -"When I was in high school, I heard a song on the radio called I'll Take The Pain Away. I wrote the words down and I've been singing them to myself for over ten years. They never fail to remind me of the ultimate truth: 'I am forgiven and I am loved.' This song is for me, my friends, and the millions who have heard the song."The present invention relates to a two-stroke engine provided with an auxiliary combustion chamber. - -A two-stroke engine is well known in the art. Such a two-stroke engine includes an engine cylinder having a wall which is defined by a crankcase and a cylinder head. A plurality of cylinders are formed in the wall. The cylinder has a piston slidably fitted in the cylinder. The cylinder head is attached to the wall of the crankcase. The cylinder head has a plurality of intake ports and exhaust ports. The intake ports and exhaust ports are disposed opposite to the respective cylinders of the cylinder head. The engine further includes a piston crank mechanism including a piston crank shaft which rotates in synchronism with the rotation of the crankshaft of the engine. The piston crank shaft has a crank chamber formed therein. The piston is fitted on the piston crank shaft. The crank chamber has an outlet port for discharging an exhaust gas to the outside of the crankcase. - -The two-stroke engine is provided with a crankcase scavenging type ignition system. In the crankcase scavenging type ignition system, the exhaust gas is admitted into the crankcase and then discharged therefrom through the outlet port. In the two-stroke engine, the exhaust gas must be discharged through the outlet port without delay. The exhaust gas is passed through the crankcase in an effort to discharge it through the outlet port without delay. The pressure in 4fefd39f24
            -
            -
            -

            diff --git a/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/apps/__init__.py b/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/apps/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/radames/UserControllableLT-Latent-Transformer/models/stylegan2/op/__init__.py b/spaces/radames/UserControllableLT-Latent-Transformer/models/stylegan2/op/__init__.py deleted file mode 100644 index d0918d92285955855be89f00096b888ee5597ce3..0000000000000000000000000000000000000000 --- a/spaces/radames/UserControllableLT-Latent-Transformer/models/stylegan2/op/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .fused_act import FusedLeakyReLU, fused_leaky_relu -from .upfirdn2d import upfirdn2d diff --git a/spaces/raedeXanto/academic-chatgpt-beta/AVG PC Tuneup 2019 19.3.1402.209 Incl Crack-2019 keygen The Ultimate Solution for PC Maintenance and Repair.md b/spaces/raedeXanto/academic-chatgpt-beta/AVG PC Tuneup 2019 19.3.1402.209 Incl Crack-2019 keygen The Ultimate Solution for PC Maintenance and Repair.md deleted file mode 100644 index 7aefc759482c9df11bb33686748d5fab0d46473e..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/AVG PC Tuneup 2019 19.3.1402.209 Incl Crack-2019 keygen The Ultimate Solution for PC Maintenance and Repair.md +++ /dev/null @@ -1,131 +0,0 @@ -
            -

            AVG PC Tuneup 2019: A Comprehensive Review

            -

            If you are looking for a way to improve your PC's performance, speed, and stability, you may have heard of AVG PC Tuneup 2019. This is a popular software that claims to optimize your PC with over 40 tools and features. But is it really worth it? In this article, we will review AVG PC Tuneup 2019 in detail and help you decide if it is the right choice for you.

            -

            What is AVG PC Tuneup 2019 and why do you need it?

            -

            AVG PC Tuneup 2019 is a software that helps you clean, fix, and optimize your PC. It can help you solve common problems such as slow startup, crashes, freezes, errors, disk space issues, registry issues, battery drain, and more. It can also help you enhance your PC's performance by removing junk files, disabling unnecessary programs, updating drivers, defragmenting disks, and more. It can also help you protect your privacy and security by deleting sensitive files, updating outdated software, and more.

            -

            AVG PC Tuneup 2019 19.3.1402.209 Incl Crack-2019 keygen


            Download Zip ===> https://tinourl.com/2uKZ86



            -

            Why do you need AVG PC Tuneup 2019? Well, if you use your PC regularly, you may notice that it becomes slower and less reliable over time. This is because your PC accumulates a lot of clutter and errors that affect its performance. Also, your PC may be exposed to various threats such as malware, hackers, phishing, etc. that can compromise your privacy and security. AVG PC Tuneup 2019 can help you prevent these problems and keep your PC running smoothly and safely.

            -

            The main features of AVG PC Tuneup 2019

            -

            AVG PC Tuneup 2019 has over 40 tools and features that can help you improve your PC in various ways. Here are some of the main ones:

            -

            Speed up your PC with one-click optimization

            -

            One of the easiest ways to boost your PC's speed is to use the one-click optimization feature of AVG PC Tuneup 2019. This feature scans your PC for issues that slow it down and fixes them automatically. It can also optimize your settings for better performance. You can use this feature anytime you feel your PC is sluggish or needs a tune-up.

            -

            Clean up your disk space and registry with advanced tools

            -

            Another way to improve your PC's performance is to clean up your disk space and registry. These are two areas where a lot of junk files and errors accumulate over time. AVG PC Tuneup 2019 has advanced tools that can help you clean up these areas effectively. For example, you can use the Disk Cleaner to remove temporary files, cache files, log files, etc. that take up valuable space on your hard drive. You can also use the Registry Cleaner to fix invalid entries, broken links, missing references, etc. that cause errors and crashes on your system.

            -

            Boost your battery life and performance with economy mode

            -

            If you use a laptop or a tablet, you may want to save battery life while still enjoying good performance. AVG PC Tuneup 2019 has a feature called Economy Mode that can help you do that. This feature reduces the power consumption of your device by disabling unnecessary processes, services, devices, etc. that drain your battery. It also optimizes your settings for better energy efficiency. You can use this feature when you are on the go or when you want to extend your battery life.

            -

            Protect your privacy and security with file shredder and software updater

            -

            Besides improving your PC's performance, AVG PC Tuneup 2019 can also help you protect your privacy and security. One of the features that can help you do that is the File Shredder. This feature allows you to permanently delete sensitive files from your hard drive so that no one can recover them. This is useful when you want to dispose of old documents, photos, videos, etc. that contain personal or confidential information. Another feature that can help you do that is the Software Updater. This feature checks for outdated software on your PC and updates them automatically. This is important because outdated software can have security vulnerabilities that hackers can exploit.

            -

            How to install and activate AVG PC Tuneup 2019 with crack and keygen

            -

            If you are interested in trying out AVG PC Tuneup 2019 for free, you can download it from the official website or from the link below. However, if you want to use it without any limitations or restrictions, you will need to activate it with a crack and a keygen. Here are the steps to do that:

            -

            Download the setup file and crack from the link below

            -

            The first step is to download the setup file and crack from the link below. The setup file is about 60 MB in size and the crack is about 10 MB in size. You will need to unzip them before running them.

            -

            AVG PC Tuneup 2019 full version with crack and keygen
            -How to activate AVG PC Tuneup 2019 for free with crack
            -AVG PC Tuneup 2019 19.3.1402.209 serial key generator
            -Download AVG PC Tuneup 2019 cracked version
            -AVG PC Tuneup 2019 license key and patch
            -AVG PC Tuneup 2019 crack download for windows 10
            -AVG PC Tuneup 2019 keygen and activation code
            -AVG PC Tuneup 2019 crack only download
            -AVG PC Tuneup 2019 torrent with crack and keygen
            -AVG PC Tuneup 2019 review and features
            -AVG PC Tuneup 2019 system requirements and installation guide
            -AVG PC Tuneup 2019 latest update and changelog
            -AVG PC Tuneup 2019 best settings and tips
            -AVG PC Tuneup 2019 comparison with other optimization software
            -AVG PC Tuneup 2019 discount and coupon code
            -AVG PC Tuneup 2019 free trial and download link
            -AVG PC Tuneup 2019 alternative and similar software
            -AVG PC Tuneup 2019 support and customer service
            -AVG PC Tuneup 2019 crack fix and troubleshooting
            -AVG PC Tuneup 2019 online activation and registration
            -AVG PC Tuneup 2019 lifetime license and crack
            -AVG PC Tuneup 2019 portable and standalone version
            -AVG PC Tuneup 2019 multilingual and language pack
            -AVG PC Tuneup 2019 compatibility and performance issues
            -AVG PC Tuneup 2019 benefits and advantages
            -AVG PC Tuneup 2019 drawbacks and disadvantages
            -AVG PC Tuneup 2019 testimonials and feedback
            -AVG PC Tuneup 2019 pros and cons
            -AVG PC Tuneup 2019 crack safe and virus free download
            -AVG PC Tuneup 2019 crack working and verified download
            -How to uninstall AVG PC Tuneup 2019 completely with crack
            -How to upgrade to AVG PC Tuneup 2020 with crack
            -How to downgrade to AVG PC Tuneup 2018 with crack
            -How to transfer AVG PC Tuneup 2019 license to another computer with crack
            -How to backup and restore AVG PC Tuneup 2019 settings with crack
            -How to use AVG PC Tuneup 2019 offline mode with crack
            -How to customize AVG PC Tuneup 2019 interface with crack
            -How to schedule AVG PC Tuneup 2019 scans and tasks with crack
            -How to optimize your computer with AVG PC Tuneup 2019 with crack
            -How to clean your registry with AVG PC Tuneup 2019 with crack
            -How to defrag your disk with AVG PC Tuneup 2019 with crack
            -How to remove junk files with AVG PC Tuneup 2019 with crack
            -How to boost your startup speed with AVG PC Tuneup 2019 with crack
            -How to fix errors and crashes with AVG PC Tuneup 2019 with crack
            -How to improve your battery life with AVG PC Tuneup 2019 with crack
            -How to update your drivers with AVG PC Tuneup 2019 with crack
            -How to protect your privacy with AVG PC Tuneup 2019 with crack
            -How to recover deleted files with AVG PC Tuneup 2019 with crack
            -How to monitor your system health with AVG PC Tuneup 2019 with crack

            -

            Run the setup file and follow the instructions

            -

            The next step is to run the setup file and follow the instructions on the screen. You will need to accept the terms and conditions, choose a destination folder, create a shortcut icon, etc. The installation process will take a few minutes.

            -

            Copy the crack file to the installation folder

            -

            The third step is to copy the crack file to the installation folder of AVG PC Tuneup 2019. The installation folder is usually located at C:\Program Files (x86)\AVG\AVG TuneUp or C:\Program Files\AVG\AVG TuneUp depending on your system architecture. You will need to replace the original file with the cracked one.

            -

            Run the keygen and generate a license key

            -

            The final step is to run the keygen and generate a license key for AVG PC Tuneup 2019. The keygen is a small program that can create random keys for various software products. You will need to open it and click on Generate button until you get a valid key for AVG PC Tuneup 2019.

            -

            Enter the license key in the program and activate it

            -

            The last step is to enter the license key in the program and activate it. You will need to open AVG PC Tuneup 2019 and go to Menu > About > Activate Product. You will need to enter the license key in the field provided and click on Activate button. You will see a confirmation message that says "Your product has been successfully activated". You can now enjoy all the features of AVG PC Tuneup 2019 without any limitations or restrictions.

            -

            The pros and cons of AVG PC Tuneup 2019

            -

            Like any other software product, AVG PC Tuneup 2019 has its advantages and disadvantages. Here are some of them:

            -

            The pros: easy to use, effective, affordable, compatible, reliable

            -
              -
            • Easy to use: AVG PC Tuneup 2019 has a user-friendly interface that makes it easy to navigate and use its features.
            • -
            • Effective: AVG PC Tuneup 2019 has proven results in improving PC's performance, speed, stability, and security.
            • -
            • Affordable: AVG PC Tuneup 2019 has a reasonable price compared to other similar products on the market.
            • -
            • Compatible: AVG PC Tuneup 2019 works well with Windows XP/Vista/7/8/8.1/10 (32-bit or 64-bit) systems.
            • -
            • Reliable: AVG PC Tuneup 2019 has a good reputation among users and experts for its quality and reliability.
            • -
            -

            The cons: may slow down some processes,

            The cons: may slow down some processes, may delete some useful files, may cause some compatibility issues with other programs

            -
              -
            • May slow down some processes: AVG PC Tuneup 2019 may sometimes slow down some processes on your PC, such as boot time, shutdown time, or internet speed. This is because it performs a lot of tasks in the background that consume resources.
            • -
            • May delete some useful files: AVG PC Tuneup 2019 may sometimes delete some files that you may need or want to keep, such as cookies, history, downloads, etc. This is because it tries to free up disk space and protect your privacy by removing unnecessary files.
            • -
            • May cause some compatibility issues with other programs: AVG PC Tuneup 2019 may sometimes cause some compatibility issues with other programs on your PC, such as antivirus software, firewall software, or other optimization software. This is because it may interfere with their settings or functions.
            • -
            -

            Conclusion: Is AVG PC Tuneup 2019 worth it?

            -

            In conclusion, AVG PC Tuneup 2019 is a software that can help you improve your PC's performance, speed, stability, and security. It has over 40 tools and features that can help you clean, fix, and optimize your PC. It also has a crack and a keygen that can help you activate it for free. However, it also has some drawbacks that you should be aware of before using it. It may slow down some processes, delete some useful files, or cause some compatibility issues with other programs.

            -

            So, is AVG PC Tuneup 2019 worth it? Well, that depends on your needs and preferences. If you are looking for a simple and effective way to boost your PC's performance and protect your privacy and security, you may find AVG PC Tuneup 2019 useful and beneficial. However, if you are looking for a more advanced and customizable way to optimize your PC and avoid any potential problems, you may want to look for other alternatives or use AVG PC Tuneup 2019 with caution.

            -

            The choice is yours. You can download AVG PC Tuneup 2019 from the link below and try it out for yourself. You can also check out other reviews and feedback from other users and experts to get more insights and opinions. Whatever you decide, we hope this article has helped you learn more about AVG PC Tuneup 2019 and make an informed decision.

            -

            FAQs

            -

            Here are some frequently asked questions about AVG PC Tuneup 2019:

            -
              -
            1. Is AVG PC Tuneup 2019 safe to use?
            2. -

              Yes, AVG PC Tuneup 2019 is safe to use as long as you download it from the official website or from the link below. It does not contain any viruses, malware, spyware, or other harmful components. However, you should always scan any file you download with your antivirus software before running it.

              -
            3. Is AVG PC Tuneup 2019 free to use?
            4. -

              No, AVG PC Tuneup 2019 is not free to use. It has a trial version that you can use for 30 days without any limitations or restrictions. After that, you will need to purchase a license key to continue using it. However, you can also use a crack and a keygen to activate it for free. You can find them in the link below.

              -
            5. How do I uninstall AVG PC Tuneup 2019?
            6. -

              If you want to uninstall AVG PC Tuneup 2019 from your PC, you can follow these steps:

              -
                -
              • Go to Start > Control Panel > Programs > Uninstall a program.
              • -
              • Select AVG PC Tuneup 2019 from the list and click on Uninstall.
              • -
              • Follow the instructions on the screen to complete the uninstallation process.
              • -
              • Restart your PC.
              • -
              -
            7. How do I contact AVG support?
            8. -

              If you have any questions or issues with AVG PC Tuneup 2019 or any other AVG product, you can contact AVG support through these channels:

              -
                -
              • Email: support@avg.com
              • -
              • Phone: +1-844-234-6038 (USA), +44-800-085-4139 (UK), +61-1800-429-319 (Australia)
              • -
              • Live chat: https://support.avg.com/chat
              • -
              • Forum: https://support.avg.com/community
              • -
              -
            9. What are some alternatives to AVG PC Tuneup 2019?
            10. -

              If you are looking for some alternatives to AVG PC Tuneup 2019, you can check out these products:

              -
                -
              • CCleaner: A popular software that can help you clean and optimize your PC.
              • -
              • Iolo System Mechanic: A powerful software that can help you fix and speed up your PC.
              • -
              • Ashampoo WinOptimizer: A comprehensive software that can help you improve and maintain your PC.
              • -
              -

              0a6ba089eb
              -
              -
              \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Battlefield 4 Crack Serial Key Download the Full Version for Free Now.md b/spaces/raedeXanto/academic-chatgpt-beta/Battlefield 4 Crack Serial Key Download the Full Version for Free Now.md deleted file mode 100644 index 2b78f1a370a5b663df18181969715f4eb27c361e..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Battlefield 4 Crack Serial Key Download the Full Version for Free Now.md +++ /dev/null @@ -1,121 +0,0 @@ - -

              Battlefield 4 Crack Serial Key Updated Full Free Download

              -

              Are you a fan of first-person shooter games? Do you want to experience the thrill of realistic combat scenarios, dynamic destructible environments, and epic vehicular warfare? If yes, then you should definitely try Battlefield 4, one of the most popular and acclaimed games in the genre. However, if you don't want to spend a lot of money on buying the game or subscribing to Origin, you might be looking for a way to get a crack serial key for Battlefield 4. In this article, we will show you how to do that in three easy methods. But first, let's see what Battlefield 4 is and why you need a crack serial key for it.

              -

              Battlefield 4 Crack Serial Key Updated Full Free Download


              Downloadhttps://tinourl.com/2uL1px



              -

              Introduction

              -

              What is Battlefield 4?

              -

              Battlefield 4 is a first-person shooter game developed by EA DICE and published by Electronic Arts in 2013. It is the sequel to Battlefield 3 and the fourth main installment in the Battlefield series. The game features a single-player campaign that follows the story of a group of US soldiers who are caught in a global conflict between the US, China, and Russia. The game also features a multiplayer mode that supports up to 64 players on PC and allows them to choose from four classes: Assault, Engineer, Support, and Recon. The multiplayer mode also features various game modes, such as Conquest, Rush, Team Deathmatch, Domination, and more.

              -

              Battlefield 4 is known for its realistic graphics, immersive sound design, and dynamic gameplay that allows players to interact with the environment and use various vehicles and weapons. The game also features a new feature called Levolution, which enables players to trigger events that change the map layout and create new tactical opportunities. For example, players can collapse a skyscraper, flood a city, or destroy a dam.

              -

              Why do you need a crack serial key for Battlefield 4?

              -

              Battlefield 4 is a premium game that requires an Origin account and an activation code to play. The activation code is also known as a serial key or a CD key. It is a unique combination of letters and numbers that verifies that you have purchased a legitimate copy of the game. Without a valid serial key, you cannot install or play Battlefield 4 on your PC.

              -

              How to get Battlefield 4 Crack Serial Key for free
              -Battlefield 4 Crack Serial Key generator online
              -Battlefield 4 Crack Serial Key no survey no password
              -Download Battlefield 4 Crack Serial Key full version
              -Battlefield 4 Crack Serial Key activation code
              -Battlefield 4 Crack Serial Key license key
              -Battlefield 4 Crack Serial Key patch
              -Battlefield 4 Crack Serial Key torrent
              -Battlefield 4 Crack Serial Key working
              -Battlefield 4 Crack Serial Key latest update
              -Battlefield 4 Crack Serial Key reddit
              -Battlefield 4 Crack Serial Key youtube
              -Battlefield 4 Crack Serial Key review
              -Battlefield 4 Crack Serial Key tutorial
              -Battlefield 4 Crack Serial Key gameplay
              -Battlefield 4 Crack Serial Key features
              -Battlefield 4 Crack Serial Key system requirements
              -Battlefield 4 Crack Serial Key download link
              -Battlefield 4 Crack Serial Key installation guide
              -Battlefield 4 Crack Serial Key error fix
              -Battlefield 4 Crack Serial Key cheats
              -Battlefield 4 Crack Serial Key mods
              -Battlefield 4 Crack Serial Key multiplayer
              -Battlefield 4 Crack Serial Key single player
              -Battlefield 4 Crack Serial Key campaign
              -Battlefield 4 Crack Serial Key missions
              -Battlefield 4 Crack Serial Key weapons
              -Battlefield 4 Crack Serial Key vehicles
              -Battlefield 4 Crack Serial Key maps
              -Battlefield 4 Crack Serial Key graphics
              -Battlefield 4 Crack Serial Key sound
              -Battlefield 4 Crack Serial Key performance
              -Battlefield 4 Crack Serial Key optimization
              -Battlefield 4 Crack Serial Key comparison
              -Battlefield 4 Crack Serial Key vs original game
              -Battlefield 4 Crack Serial Key vs other cracks
              -Battlefield 4 Crack Serial Key pros and cons
              -Battlefield 4 Crack Serial Key benefits and drawbacks
              -Battlefield 4 Crack Serial Key advantages and disadvantages
              -Battlefield 4 Crack Serial Key testimonials and feedbacks
              -Battlefield 4 Crack Serial Key ratings and scores
              -Battlefield 4 Crack Serial Key quality and reliability
              -Battlefield 4 Crack Serial Key safety and security
              -Battlefield 4 Crack Serial Key legality and legitimacy
              -Battlefield 4 Crack Serial Key risks and dangers
              -Battlefield 4 Crack Serial Key alternatives and substitutes
              -Battlefield 4 Crack Serial Key recommendations and suggestions
              -Battlefield 4 Crack Serial Key tips and tricks
              -Battlefield 4 Crack Serial Key best practices and guidelines

              -

              However, not everyone can afford to buy Battlefield 4 or subscribe to Origin. Some people might also want to try the game before buying it or play it offline without any restrictions. That's why some people look for ways to get a crack serial key for Battlefield 4. A crack serial key is a fake or hacked serial key that bypasses the activation process and allows you to play Battlefield 4 without paying anything.

              -

              How to get a crack serial key for Battlefield 4?

              -

              There are three main methods to get a crack serial key for Battlefield 4: using a keygen, using a cracked multiplayer client, or buying a cheap CD key online. Let's see how each method works and what are the pros and cons of each one.

              -

              Method 1: Use a keygen

              -

              applications. You can download a keygen for Battlefield 4 from various websites that offer cracked games and software. However, you should be careful when downloading and using a keygen, as it might contain viruses or malware that can harm your computer or steal your personal information. You should also scan the keygen with an antivirus program before running it.

              -

              To use a keygen for Battlefield 4, you need to follow these steps:

              -
                -
              1. Download the archive that contains the keygen from a reliable source.
              2. -
              3. Extract the contents of the archive to a folder on your computer.
              4. -
              5. Open and run the keygen as administrator.
              6. -
              7. Click generate and copy the generated serial key.
              8. -
              9. Redeem the game using the serial key on Origin or on the official website of Battlefield 4.
              10. -
              11. Install and play the game.
              12. -
              -

              The advantage of using a keygen is that you can get a free serial key for Battlefield 4 without paying anything. The disadvantage is that you might get a fake or invalid serial key that won't work or will get banned by EA. You might also risk infecting your computer with malware or getting caught by anti-piracy measures.

              -

              Method 2: Use a cracked multiplayer client

              -

              A cracked multiplayer client is a modified version of the game that allows you to play online with other players who use the same client. You can download a cracked multiplayer client for Battlefield 4 from various websites that offer cracked games and software. However, you should be careful when downloading and using a cracked multiplayer client, as it might contain viruses or malware that can harm your computer or steal your personal information. You should also scan the cracked multiplayer client with an antivirus program before running it.

              -

              To use a cracked multiplayer client for Battlefield 4, you need to follow these steps:

              -
                -
              1. Download the archive that contains the cracked multiplayer client from a reliable source.
              2. -
              3. Extract the contents of the archive to a folder on your computer.
              4. -
              5. Open and run the ZClient.exe and login with your email and password that you have created on ZLOEmu.org.
              6. -
              7. Now run the Origin.exe located in the same folder and change the download location of games to one folder before the folder where you have installed Battlefield 4.
              8. -
              9. Launch Battlefield 4 from Origin and enjoy playing online with other players who use the same client.
              10. -
              -

              The advantage of using a cracked multiplayer client is that you can play online with other players without paying anything. The disadvantage is that you might not be able to play with players who use the official version of the game or access all the features and updates of the game. You might also risk infecting your computer with malware or getting caught by anti-piracy measures.

              -

              Method 3: Buy a cheap CD key online

              -

              A cheap CD key is a serial key that is sold by third-party sellers at a lower price than the official one. You can buy a cheap CD key for Battlefield 4 from various websites that offer discounted games and software . However, you should be careful when buying and using a cheap CD key, as it might be stolen, used, or invalid. You should also check the reputation and reviews of the seller before making a purchase.

              -

              To buy and use a cheap CD key for Battlefield 4, you need to follow these steps:

              -
                -
              1. Browse through different websites that offer cheap CD keys for Battlefield 4 and compare their prices and ratings.
              2. -
              3. Select a seller that has positive feedback and offers a secure payment method.
              4. -
              5. Buy the CD key and receive it via email or in your account on the website.
              6. -
              7. Redeem the game using the CD key on Origin or on the official website of Battlefield 4.
              8. -
              9. Install and play the game.
              10. -
              -

              The advantage of buying a cheap CD key is that you can get a valid serial key for Battlefield 4 at a lower price than the official one. The disadvantage is that you might get scammed by a fraudulent seller or get banned by EA if they detect that your CD key is illegitimate.

              -

              Conclusion

              -

              Battlefield 4 is an amazing first-person shooter game that offers an immersive and thrilling gameplay experience. However, if you don't want to pay full price for it or subscribe to Origin, you might want to get a crack serial key for it. In this article, we have shown you three methods to do that: using a keygen, using a cracked multiplayer client, or buying a cheap CD key online. Each method has its pros and cons, so you should weigh them carefully before choosing one. We hope this article has helped you find a way to play Battlefield 4 without breaking your bank. Happy gaming!

              -

              FAQs

              -

              Q: Is it legal to use a crack serial key for Battlefield 4?

              -

              A: No, it is not legal to use a crack serial key for Battlefield 4. It is considered piracy and violates the terms of service of EA and Origin. You might face legal consequences if you get caught by anti-piracy measures.

              -

              Q: Is it safe to use a crack serial key for Battlefield 4?

              -

              A: No, it is not safe to use a crack serial key for Battlefield 4. You might expose your computer to viruses or malware that can harm your system or steal your personal information. You might also risk losing your account or getting banned by EA if they detect that your serial key is fake or hacked.

              -

              Q: Can I play online with a crack serial key for Battlefield 4?

              -

              the same client. If you buy a cheap CD key online, you will be able to play online with other players who use the official version of the game.

              -

              Conclusion

              -

              Battlefield 4 is an amazing first-person shooter game that offers an immersive and thrilling gameplay experience. However, if you don't want to pay full price for it or subscribe to Origin, you might want to get a crack serial key for it. In this article, we have shown you three methods to do that: using a keygen, using a cracked multiplayer client, or buying a cheap CD key online. Each method has its pros and cons, so you should weigh them carefully before choosing one. We hope this article has helped you find a way to play Battlefield 4 without breaking your bank. Happy gaming!

              -

              FAQs

              -

              Q: Is it legal to use a crack serial key for Battlefield 4?

              -

              A: No, it is not legal to use a crack serial key for Battlefield 4. It is considered piracy and violates the terms of service of EA and Origin. You might face legal consequences if you get caught by anti-piracy measures.

              -

              Q: Is it safe to use a crack serial key for Battlefield 4?

              -

              A: No, it is not safe to use a crack serial key for Battlefield 4. You might expose your computer to viruses or malware that can harm your system or steal your personal information. You might also risk losing your account or getting banned by EA if they detect that your serial key is fake or hacked.

              -

              Q: Can I play online with a crack serial key for Battlefield 4?

              -

              A: It depends on which method you use to get a crack serial key for Battlefield 4. If you use a keygen, you will not be able to play online with other players who use the official version of the game. If you use a cracked multiplayer client, you will be able to play online with other players who use the same client. If you buy a cheap CD key online, you will be able to play online with other players who use the official version of the game.

              -

              Q: What are the advantages and disadvantages of each method?

              -

              A: The advantages and disadvantages of each method are summarized in the table below:

              - | Method | Advantages | Disadvantages | | --- | --- | --- | | Keygen | Free serial key | Fake or invalid serial key; Virus or malware risk; No online play | | Cracked multiplayer client | Free online play | Virus or malware risk; Limited features and updates; No official online play | | Cheap CD key online | Valid serial key; Official online play | Scam or fraud risk; Ban risk |

              Q: Where can I download a keygen, a cracked multiplayer client, or buy a cheap CD key online?

              -

              A: You can download a keygen from various websites that offer cracked games and software. You can download a cracked multiplayer client from ZLOEmu.org. You can buy a cheap CD key online from various websites that offer discounted games and software . However, you should always be careful and check the reputation and reviews of the sources before downloading or buying anything.

              -

              0a6ba089eb
              -
              -
              \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Call of Duty WWII Digital Deluxe Edition RePack by BlackBox Crack Free Download Enjoy the Ultimate WWII Shooter Experience.md b/spaces/raedeXanto/academic-chatgpt-beta/Call of Duty WWII Digital Deluxe Edition RePack by BlackBox Crack Free Download Enjoy the Ultimate WWII Shooter Experience.md deleted file mode 100644 index f05dd6978c6987471b5ad2fdf8027310aafad1da..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Call of Duty WWII Digital Deluxe Edition RePack by BlackBox Crack Free Download Enjoy the Ultimate WWII Shooter Experience.md +++ /dev/null @@ -1,156 +0,0 @@ - -

              Call of Duty WWII Digital Deluxe Edition RePack by BlackBox crack free

              -

              If you are a fan of first-person shooter games set in World War II, you might be interested in downloading Call of Duty WWII Digital Deluxe Edition RePack by BlackBox crack free. This is a repack version of the popular game Call of Duty WWII, which includes all the DLCs, multiplayer and zombies modes, and a crack that allows you to play it without any restrictions. In this article, we will tell you more about this repack, its features, and how to download and install it on your PC.

              -

              Call of Duty WWII Digital Deluxe Edition RePack by BlackBox crack free


              Download ✵✵✵ https://tinourl.com/2uL0FA



              -

              Introduction

              -

              Call of Duty WWII is a game that was released in 2017 by Activision. It is the fourteenth installment in the Call of Duty series and the first one to return to the World War II setting since 2008. The game has three modes: Campaign, Multiplayer and Zombies. The Campaign mode follows the story of a squad of American soldiers who fight in various battles across Europe. The Multiplayer mode offers a variety of modes and maps that are based on historical locations and events. The Zombies mode is a co-op mode that features a separate story and characters who have to survive against waves of undead Nazis.

              -

              What is Call of Duty WWII Digital Deluxe Edition?

              -

              The Digital Deluxe Edition of Call of Duty WWII is a special edition that includes the base game and some extra content. The extra content consists of:

              -
                -
              • The Season Pass, which gives access to four DLC packs that add new maps, weapons, characters and zombies chapters.
              • -
              • The Carentan map, which is a remake of a classic map from Call of Duty 2.
              • -
              • The Nazi Zombies Camo, which is a weapon skin that can be used in multiplayer and zombies modes.
              • -
              • The Divisions pack, which includes five outfits for each of the five divisions in multiplayer mode.
              • -
              • The Call of Duty Endowment Fear Not and Bravery packs, which are cosmetic items that support veterans.
              • -
              -

              What is RePack by BlackBox?

              -

              RePack by BlackBox is a group of repackers who create compressed versions of games that are easy to install and run. They remove unnecessary files, such as languages, videos and sounds that are not used in the game. They also include cracks that bypass the DRM protection and allow users to play without any limitations. RePack by BlackBox is known for their high-quality repacks that have small sizes and fast installation times.

              -

              Why download Call of Duty WWII Digital Deluxe Edition RePack by BlackBox crack free?

              -

              There are many reasons why you might want to download Call of Duty WWII Digital Deluxe Edition RePack by BlackBox crack free. Some of them are:

              -
                -
              • You can save money by not buying the original game and the DLCs.
              • -
              • You can save space on your hard drive by downloading a smaller version of the game.
              • -
              • You can save time by installing the game faster and easier.
              • -
              • You can play the game offline without any internet connection.
              • -
              • You can play the game with any language you want.
              • -
              -

              Features of Call of Duty WWII Digital Deluxe Edition RePack by BlackBox

              -

              Call of Duty WWII Digital Deluxe Edition RePack by BlackBox has many features that make it a great choice for gamers. Some of these features are:

              -

              Call of Duty WWII DODI Repack download
              -Call of Duty WWII FitGirl Repack torrent
              -Call of Duty WWII SoulFlyers crack only
              -Call of Duty WWII Digital Deluxe Edition all DLCs
              -Call of Duty WWII Multiplayer and Zombies mode
              -Call of Duty WWII build 7831931 latest version
              -Call of Duty WWII fast install repack
              -Call of Duty WWII free download full game
              -Call of Duty WWII MULTi12 languages
              -Call of Duty WWII Season Pass included
              -Call of Duty WWII The Resistance DLC download
              -Call of Duty WWII The War Machine DLC torrent
              -Call of Duty WWII United Front DLC crack
              -Call of Duty WWII Shadow War DLC free
              -Call of Duty WWII Endowment Fear Not Pack
              -Call of Duty WWII Endowment Bravery Pack
              -Call of Duty WWII Carentan Map download
              -Call of Duty WWII Nazi Zombies Camo
              -Call of Duty WWII Divisions Pack torrent
              -Call of Duty WWII Steam rip repack
              -Call of Duty WWII Sledgehammer Games developer
              -Call of Duty WWII Raven Software publisher
              -Call of Duty WWII Action Shooter genre
              -Call of Duty WWII 3D 1st Person perspective
              -Call of Duty WWII World War II setting
              -Call of Duty WWII minimum system requirements
              -Call of Duty WWII DirectX 11 compatible
              -Call of Duty WWII 90 GB storage space needed
              -Call of Duty WWII selective download feature
              -Call of Duty WWII lossless MD5 perfect repack
              -Call of Duty WWII nothing ripped or re-encoded
              -Call of Duty WWII skip multiplayer zombie files option
              -Call of Duty WWII English audio language available
              -Call of Duty WWII French audio language available
              -Call of Duty WWII Italian audio language available
              -Call of Duty WWII German audio language available
              -Call of Duty WWII Spanish audio language available
              -Call of Duty WWII Polish audio language available
              -Call of Duty WWII Portuguese-Brazil audio language available
              -Call of Duty WWII Russian audio language available
              -Call of Duty WWII Japanese interface language available
              -Call of Duty WWII Simplified Chinese interface language available
              -Call of Duty WWII Traditional Chinese interface language available
              -Call of Duty WWII Korean interface language available
              -How to install Call of Duty WWII repack guide
              -How to play Call of Duty WWII multiplayer online
              -How to fix Call of Duty WWII errors and bugs
              -How to update Call of Duty WWII to latest patch
              -How to change Call of Duty WWII language settings
              -How to uninstall Call of Duty WWII repack safely

              -

              High-quality graphics and sound

              -

              The game has stunning graphics that recreate the atmosphere and realism of World War II. The game uses an advanced engine that supports dynamic lighting, shadows, reflections, particle effects and more. The game also has immersive sound effects that enhance the gameplay experience. You can hear the bullets whizzing past your ears, the explosions shaking the ground, and the screams of your enemies and allies.

              -

              Three modes: Campaign, Multiplayer and Zombies

              -

              The game offers three different modes that cater to different tastes and preferences. The Campaign mode is a single-player mode that follows the story of a squad of American soldiers who fight in various battles across Europe. The campaign has 11 missions that take place in France, Belgium, Germany and more. The campaign also has cinematic cutscenes that show the drama and emotion of war. The Multiplayer mode is an online mode that allows you to compete with other players around the world. The multiplayer has several modes, such as Team Deathmatch, Domination, Capture the Flag and more. The multiplayer also has a progression system that lets you customize your character, weapons and skills. The Zombies mode is a co-op mode that lets you team up with up to three other players to fight against hordes of undead Nazis. The zombies mode has four chapters that have different settings, objectives and enemies. The zombies mode also has a perk system that gives you special abilities and power-ups.

              -

              All DLCs included

              -

              The repack includes all the DLCs that were released for the game. These DLCs are:

              -
                -
              • DLC Pack 1: The Resistance - This DLC adds three new multiplayer maps (Anthropoid, Occupation and Valkyrie), a new war mode map (Operation Intercept) and a new zombies chapter (The Darkest Shore).
              • -
              • DLC Pack 2: The War Machine - This DLC adds three new multiplayer maps (Dunkirk, Egypt and V2), a new war mode map (Operation Husky) and a new zombies chapter (The Shadowed Throne).
              • -
              • DLC Pack 3: United Front - This DLC adds three new multiplayer maps (Market Garden, Monte Cassino and Stalingrad), a new war mode map (Operation Supercharge) and a new zombies chapter (The Tortured Path).
              • -
              • DLC Pack 4: Shadow War - This DLC adds three new multiplayer maps (Airship, Chancellery and Excavation), a new war mode map (Operation Arcane) and a new zombies chapter (The Frozen Dawn).
              • -
              -

              Fast and easy installation

              -

              The repack has a fast and easy installation process that does not require any technical skills or knowledge. You just need to follow these steps:

              -
                -
              1. Run the installer as administrator.
              2. -
              3. Click on the page icon.
              4. -
              5. Press the up arrow on your keyboard.
              6. -
              7. Click Install.
              8. -
              9. Click Continue.
              10. -
              11. Select installation destination.
              12. -
              13. Click Next.
              14. -
              15. Select components you want to install.
              16. -
              17. Wait for installation to finish.
              18. -
              -

              Multi-language support

              -

              The repack supports multiple languages for both interface and audio. You can choose from 12 languages for interface: English, French, Italian, German, Spanish, Japanese, Polish, Portuguese-Brazilian, Russian, Simplified Chinese, Traditional Chinese or Korean. You can choose from 8 languages for audio: English, French, Italian, German, Spanish, Polish, Portuguese-Brazilian or Russian. You can also change languages anytime from the game settings menu. -

              How to download and install Call of Duty WWII Digital Deluxe Edition RePack by BlackBox crack free?

              -

              If you want to download and install Call of Duty WWII Digital Deluxe Edition RePack by BlackBox crack free, you need to meet some requirements and follow some steps. Here are they:

              -

              Requirements

              -

              To download and install Call of Duty WWII Digital Deluxe Edition RePack by BlackBox crack free, you need to have a PC that meets the minimum system requirements for the game. These are:

              - | Requirement | Specification | | --- | --- | | OS | Windows 7 64-bit or later | | CPU | Intel Core i3 3225 3.3 GHz or AMD Ryzen 5 1400 | | RAM | 8 GB | | GPU | NVIDIA GeForce GTX 660 @ 2 GB / GTX 1050 or AMD Radeon HD 7850 @ 2GB / AMD RX 550 | | DirectX | Version 11.0 compatible video card or equivalent | | HDD | 90 GB available hard drive space | | Network | Broadband internet connection | | Sound card | DirectX compatible |

              Download links

              -

              To download Call of Duty WWII Digital Deluxe Edition RePack by BlackBox crack free, you can use one of the following links:

              -
                -
              • Torrent - Click here
              • -
              • Direct link - Click here
              • -
              • Gdrive - Click here
              • -
              • OneDrive - Click here
              • -
              -

              These links will take you to the download page of the repack, where you can choose the components and languages you want to download. The repack size is from 69.6 GB, depending on your selection.

              -

              Installation steps

              -

              After downloading the repack, you need to install it on your PC. The installation steps are:

              -
                -
              1. Run the installer as administrator.
              2. -
              3. Click on the page icon.
              4. -
              5. Press the up arrow on your keyboard.
              6. -
              7. Click Install.
              8. -
              9. Click Continue.
              10. -
              11. Select installation destination.
              12. -
              13. Click Next.
              14. -
              15. Select components you want to install.
              16. -
              17. Wait for installation to finish.
              18. -
              -

              The installation time is from 20 to 40 minutes, depending on your PC specs and selection. After installation, you can run the game from the desktop shortcut or the game folder.

              -

              Conclusion

              -

              In this article, we have shown you how to download and install Call of Duty WWII Digital Deluxe Edition RePack by BlackBox crack free. This is a great way to enjoy one of the best first-person shooter games set in World War II, with all the DLCs, multiplayer and zombies modes, and a crack that allows you to play without any restrictions. We hope you have found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below.

              -

              Summary of the article

              -

              This article is about how to download and install Call of Duty WWII Digital Deluxe Edition RePack by BlackBox crack free. It covers the following topics:

              -
                -
              • What is Call of Duty WWII Digital Deluxe Edition and RePack by BlackBox?
              • -
              • Why download Call of Duty WWII Digital Deluxe Edition RePack by BlackBox crack free?
              • -
              • What are the features of Call of Duty WWII Digital Deluxe Edition RePack by BlackBox?
              • -
              • What are the requirements for Call of Duty WWII Digital Deluxe Edition RePack by BlackBox?
              • -
              • What are the download links for Call of Duty WWII Digital Deluxe Edition RePack by BlackBox?
              • -
              • What are the installation steps for Call of Duty WWII Digital Deluxe Edition RePack by BlackBox?
              • -
              -

              FAQs

              -

              Here are some frequently asked questions about Call of Duty WWII Digital Deluxe Edition RePack by BlackBox crack free:

              -
                -
              1. Is this repack safe and virus-free?
                Yes, this repack is safe and virus-free. It has been tested and verified by many users and antivirus programs. However, some antivirus programs may detect the crack as a false positive, so you may need to disable them temporarily during installation.
              2. -
              3. Can I play online with this repack?
                Yes, you can play online with this repack, but only with other players who have the same repack and crack. You cannot play online with players who have the original game or a different repack or crack.
              4. -
              5. Can I update this repack?
                No, you cannot update this repack. If you update it, you will lose the crack and the game will not work. You will have to wait for a new repack that includes the latest updates and DLCs.
              6. -
              7. Can I change languages after installation?
                Yes, you can change languages after installation. You can do this from the game settings menu or by editing the language.txt file in the game folder.
              8. -
              9. Can I request a different game repack?
                Yes, you can request a different game repack by leaving a comment below or contacting us via email. We will try our best to fulfill your request as soon as possible.
              10. -
              -

              0a6ba089eb
              -
              -
              \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Creation Master 09 For Windows 7 64 Bit.md b/spaces/raedeXanto/academic-chatgpt-beta/Creation Master 09 For Windows 7 64 Bit.md deleted file mode 100644 index 1b9c6a5c36012777a04aff520e7f836009c5679e..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Creation Master 09 For Windows 7 64 Bit.md +++ /dev/null @@ -1,65 +0,0 @@ - -

              Creation Master 09 for Windows 7 64 bit: How to Install and Use It to Create Custom FIFA 09 Patches

              -

              If you are a fan of FIFA 09, you might have wondered how to customize your game with new players, teams, kits, stadiums, balls, and more. Well, there is a tool that can help you do that easily and quickly. It is called Creation Master 09, and it is one of the most popular and powerful editing tools for FIFA games.

              -

              Creation Master 09 For Windows 7 64 Bit


              DOWNLOAD ••• https://tinourl.com/2uL3LO



              -

              However, if you have a Windows 7 64 bit operating system, you might have encountered some problems when trying to install and run Creation Master 09. Don't worry, there are solutions for that too. In this article, we will show you how to install and use Creation Master 09 on Windows 7 64 bit without any issues. We will also show you how to use Creation Master 09 to create your own custom FIFA 09 patches.

              -

              So, let's get started!

              -

              Introduction

              -

              Before we dive into the installation and usage of Creation Master 09, let's first understand what it is and what are its benefits.

              -

              What is Creation Master 09 and what are its benefits?

              -

              Creation Master 09 is a software application that allows you to edit the FIFA 09 database and create custom patches for the game. It was developed by Rinaldo Zocca, also known as FIFA Master, a famous modder and programmer in the FIFA community. You can download Creation Master 09 from his official website [here].

              -

              Creation Master 09 has many benefits for FIFA 09 fans. With this tool, you can:

              -

              -
                -
              • Add, modify, or delete players, teams, leagues, kits, faces, stadiums, balls, boots, banners, flags, logos, and more.
              • -
              • Import and export graphics, sounds, and data from other FIFA games or external sources.
              • -
              • Create your own tournaments, leagues, cups, and competitions with custom rules and formats.
              • -
              • Change the gameplay settings, such as difficulty, speed, injuries, weather, etc.
              • -
              • Test your changes in the game with the built-in launcher.
              • -
              • Share your creations with other users online or offline.
              • -
              -

              As you can see, Creation Master 09 gives you a lot of freedom and creativity to customize your FIFA 09 game according to your preferences and tastes. It is a must-have tool for any FIFA 09 enthusiast.

              -

              What are the common issues and solutions for using Creation Master 09 on Windows 7 64 bit?

              -

              However, not everything is perfect with Creation Master 09. If you have a Windows 7 64 bit operating system, you might have faced some problems when trying to install and run Creation Master 09. Some of the common issues are:

              -
                -
              • The setup file does not run or gives an error message.
              • -
              • The program does not open or crashes frequently.
              • -
              • The program does not save the changes or corrupts the database.
              • -
              • The program does not recognize the FIFA 09 game folder or the registry keys.
              • -
              -

              Fortunately, there are solutions for these issues. Here are some of the steps you can take to fix them:

              -
                -
              • Run the setup file as administrator and in Windows XP compatibility mode. To do this, right-click on the setup file and select Properties. Then go to the Compatibility tab and check the boxes for "Run this program as an administrator" and "Run this program in compatibility mode for: Windows XP (Service Pack 3)". Then click OK and run the setup file normally.
              • -
              • Install the latest version of Creation Master 09 from a reliable source. The latest version is 9.0.0.0 and you can download it from [here]. Do not use older versions or unofficial versions as they might be incompatible or contain viruses.
              • -
              • Update your FIFA 09 game to the latest patch. The latest patch is 1.2 and you can download it from [here]. This will ensure that your game is compatible with Creation Master 09 and prevent any conflicts or errors.
              • -
              • Backup your FIFA 09 database before using Creation Master 09. This will allow you to restore your original database in case something goes wrong or you want to undo your changes. You can use Database Master 09, another tool from FIFA Master, to backup and restore your FIFA 09 database. You can download it from [here].
              • -
              -

              If you follow these steps, you should be able to install and use Creation Master 09 on Windows 7 64 bit without any problems. However, if you still encounter any issues, you can visit the official forum of FIFA Master [here] and ask for help from other users or from Rinaldo himself.

              -

              What are the main features of Creation Master 09 and how to use them?

              -

              Now that you have installed Creation Master 09 on your Windows 7 64 bit system, you might be wondering how to use it to create custom FIFA 09 patches. Well, Creation Master 09 has many features and options that allow you to edit almost every aspect of the game. Here are some of the main features and how to use them:

              - - H4: Editing players - How to add new players or modify existing ones - How to edit player attributes, skills, appearance, contracts, etc. - How to assign players to teams or transfer them between teams - H4: Editing teams - How to add new teams or modify existing ones - How to edit team names, logos, kits, stadiums, etc. - How to assign teams to leagues or create custom leagues - H4: Editing leagues - How to add new leagues or modify existing ones - How to edit league names - How to edit league logos, flags, banners, etc. - How to create custom tournaments, cups, and competitions - H4: Editing kits - How to add new kits or modify existing ones - How to edit kit colors, patterns, sponsors, numbers, etc. - How to import and export kits from other sources - H4: Editing faces - How to add new faces or modify existing ones - How to edit face shapes, textures, hair, etc. - How to import and export faces from other sources - H4: Editing stadiums - How to add new stadiums or modify existing ones - How to edit stadium names, locations, capacities, etc. - How to import and export stadiums from other sources - H4: Editing balls - How to add new balls or modify existing ones - How to edit ball names, colors, logos, etc. - How to import and export balls from other sources - H4: Editing gameplay settings - How to change the gameplay settings such as difficulty, speed, injuries, weather, etc. - How to create custom gameplay sliders and profiles - How to test the gameplay changes in the game To use these features, you need to open an existing FIFA 09 database or create a new one. You can do this by clicking on the File menu and selecting Open FIFA 09 or New FIFA 09. Then you can browse through the tabs and menus of Creation Master 09 and select the items you want to edit. You can also use the search function or the filters to find the items you are looking for. You can use the buttons and sliders on the right panel to edit the properties and values of the items. You can also use the preview window on the bottom panel to see how the items look in the game. When you are done editing, you need to save your changes and export your patch. You can do this by clicking on the File menu and selecting Save FIFA 09 or Export Patch. You can choose to export your patch as a .cmp file or a .exe file. A .cmp file is a compressed file that contains only the changes you made. You can use it to install your patch on your FIFA 09 game folder or share it with other users who have Creation Master 09. A .exe file is an executable file that contains your entire FIFA 09 database. You can use it to install your patch on any FIFA 09 game folder or share it with other users who do not have Creation Master 09.

              Conclusion

              -

              In this article, we have shown you how to install and use Creation Master 09 on Windows 7 64 bit. We have also shown you how to use Creation Master 09 to create custom FIFA 09 patches. We hope that you have found this article useful and informative.

              -

              Creation Master 09 is a great tool for FIFA 09 fans who want to customize their game with new players, teams, kits, stadiums, balls, and more. It is easy to use and has many features and options that allow you to edit almost every aspect of the game. It is also compatible with Windows 7 64 bit if you follow some simple steps.

              -

              So, what are you waiting for? Download Creation Master 09 today and start creating your own custom FIFA 09 patches. You will be amazed by what you can do with this tool. You will also have a lot of fun and satisfaction in making your FIFA 09 game more personal and unique.

              -

              Here are some links to useful resources and tutorials for further learning:

              -
                -
              • [FIFA Master official website]: Here you can download Creation Master 09 and other tools from FIFA Master. You can also find manuals, tutorials, videos, news, updates, and support.
              • -
              • [FIFA Infinity]: Here you can find many custom FIFA 09 patches created by other users using Creation Master 09. You can also find tutorials, forums, downloads, news, and more.
              • -
              • [Soccer Gaming]: Here you can find another community of FIFA 09 modders and fans. You can also find tutorials, forums, downloads, news, and more.
              • -
              -

              FAQs

              -

              Here are some frequently asked questions about Creation Master 09:

              -
                -
              1. What are the system requirements for running Creation Master 09?
              2. -

                You need a PC with Windows XP, Vista, or 7 (32 or 64 bit), at least 512 MB of RAM, and at least 1 GB of free disk space.

                -
              3. Where can I download the latest version of Creation Master 09?
              4. -

                You can download it from [FIFA Infinity](^1 ^), [Soccer Gaming], or [Mod DB].

                -
              5. How can I run Creation Master 09 on Windows XP SP3?
              6. -

                You need to install a patch that fixes the "no sliding tackle" glitch. You can find it [here].

                -
              7. How can I backup my FIFA 09 database before using Creation Master 09?
              8. -

                You can use Database Master 09, another tool from FIFA Master, to backup and restore your FIFA 09 database. You can download it from [here].

                -
              9. How can I share my custom FIFA 09 patches with other users?
              10. -

                You can upload your .cmp or .exe files to online file hosting services like [Dropbox](https://www.dropbox.com/), [Mediafire](https://www.mediafire.com/), or [Google Drive](https://www.google.com/drive/). Then you can share the download links on forums like [FIFA Infinity] or [Soccer Gaming].

                -

              b2dd77e56b
              -
              -
              \ No newline at end of file diff --git a/spaces/rainy3/chatgpt_academic/crazy_functions/test_project/cpp/cppipc/queue.h b/spaces/rainy3/chatgpt_academic/crazy_functions/test_project/cpp/cppipc/queue.h deleted file mode 100644 index a21f3446e06b5826af7b554c8a7d9c5d80848b62..0000000000000000000000000000000000000000 --- a/spaces/rainy3/chatgpt_academic/crazy_functions/test_project/cpp/cppipc/queue.h +++ /dev/null @@ -1,216 +0,0 @@ -#pragma once - -#include -#include -#include // [[since C++14]]: std::exchange -#include -#include -#include -#include -#include -#include -#include // assert - -#include "libipc/def.h" -#include "libipc/shm.h" -#include "libipc/rw_lock.h" - -#include "libipc/utility/log.h" -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_def.h" - -namespace ipc { -namespace detail { - -class queue_conn { -protected: - circ::cc_t connected_ = 0; - shm::handle elems_h_; - - template - Elems* open(char const * name) { - if (name == nullptr || name[0] == '\0') { - ipc::error("fail open waiter: name is empty!\n"); - return nullptr; - } - if (!elems_h_.acquire(name, sizeof(Elems))) { - return nullptr; - } - auto elems = static_cast(elems_h_.get()); - if (elems == nullptr) { - ipc::error("fail acquire elems: %s\n", name); - return nullptr; - } - elems->init(); - return elems; - } - - void close() { - elems_h_.release(); - } - -public: - queue_conn() = default; - queue_conn(const queue_conn&) = delete; - queue_conn& operator=(const queue_conn&) = delete; - - bool connected() const noexcept { - return connected_ != 0; - } - - circ::cc_t connected_id() const noexcept { - return connected_; - } - - template - auto connect(Elems* elems) noexcept - /*needs 'optional' here*/ - -> std::tuple().cursor())> { - if (elems == nullptr) return {}; - // if it's already connected, just return - if (connected()) return {connected(), false, 0}; - connected_ = elems->connect_receiver(); - return {connected(), true, elems->cursor()}; - } - - template - bool disconnect(Elems* elems) noexcept { - if (elems == nullptr) return false; - // if it's already disconnected, just return false - if (!connected()) return false; - elems->disconnect_receiver(std::exchange(connected_, 0)); - return true; - } -}; - -template -class queue_base : public queue_conn { - using base_t = queue_conn; - -public: - using elems_t = Elems; - using policy_t = typename elems_t::policy_t; - -protected: - elems_t * elems_ = nullptr; - decltype(std::declval().cursor()) cursor_ = 0; - bool sender_flag_ = false; - -public: - using base_t::base_t; - - queue_base() = default; - - explicit queue_base(char const * name) - : queue_base{} { - elems_ = open(name); - } - - explicit queue_base(elems_t * elems) noexcept - : queue_base{} { - assert(elems != nullptr); - elems_ = elems; - } - - /* not virtual */ ~queue_base() { - base_t::close(); - } - - elems_t * elems() noexcept { return elems_; } - elems_t const * elems() const noexcept { return elems_; } - - bool ready_sending() noexcept { - if (elems_ == nullptr) return false; - return sender_flag_ || (sender_flag_ = elems_->connect_sender()); - } - - void shut_sending() noexcept { - if (elems_ == nullptr) return; - if (!sender_flag_) return; - elems_->disconnect_sender(); - } - - bool connect() noexcept { - auto tp = base_t::connect(elems_); - if (std::get<0>(tp) && std::get<1>(tp)) { - cursor_ = std::get<2>(tp); - return true; - } - return std::get<0>(tp); - } - - bool disconnect() noexcept { - return base_t::disconnect(elems_); - } - - std::size_t conn_count() const noexcept { - return (elems_ == nullptr) ? static_cast(invalid_value) : elems_->conn_count(); - } - - bool valid() const noexcept { - return elems_ != nullptr; - } - - bool empty() const noexcept { - return !valid() || (cursor_ == elems_->cursor()); - } - - template - bool push(F&& prep, P&&... params) { - if (elems_ == nullptr) return false; - return elems_->push(this, [&](void* p) { - if (prep(p)) ::new (p) T(std::forward

              (params)...); - }); - } - - template - bool force_push(F&& prep, P&&... params) { - if (elems_ == nullptr) return false; - return elems_->force_push(this, [&](void* p) { - if (prep(p)) ::new (p) T(std::forward

              (params)...); - }); - } - - template - bool pop(T& item, F&& out) { - if (elems_ == nullptr) { - return false; - } - return elems_->pop(this, &(this->cursor_), [&item](void* p) { - ::new (&item) T(std::move(*static_cast(p))); - }, std::forward(out)); - } -}; - -} // namespace detail - -template -class queue final : public detail::queue_base> { - using base_t = detail::queue_base>; - -public: - using value_t = T; - - using base_t::base_t; - - template - bool push(P&&... params) { - return base_t::template push(std::forward

              (params)...); - } - - template - bool force_push(P&&... params) { - return base_t::template force_push(std::forward

              (params)...); - } - - bool pop(T& item) { - return base_t::pop(item, [](bool) {}); - } - - template - bool pop(T& item, F&& out) { - return base_t::pop(item, std::forward(out)); - } -}; - -} // namespace ipc diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/@notionhq/client/build/src/fetch-types.js b/spaces/rayan-saleh/whisper2notion/server/node_modules/@notionhq/client/build/src/fetch-types.js deleted file mode 100644 index 616cef0feca4fb785c474ed6e20e79adf5900af7..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/@notionhq/client/build/src/fetch-types.js +++ /dev/null @@ -1,3 +0,0 @@ -"use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -//# sourceMappingURL=fetch-types.js.map \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Basketball Scoreboard Pro 2.0.9 Crack High Qualityed.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Basketball Scoreboard Pro 2.0.9 Crack High Qualityed.md deleted file mode 100644 index 8762a014a3e37bf656b8ff26c9568501bfc58993..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Basketball Scoreboard Pro 2.0.9 Crack High Qualityed.md +++ /dev/null @@ -1,6 +0,0 @@ -

              basketball scoreboard pro 2.0.9 cracked


              Download File >>>>> https://urlgoal.com/2uCMCZ



              - -B.M.I. Calculator Pro for Windows 8 1.0.0.6 :: 2013-09-27 B.O.B. Rapid Browser ... Basketball Scoreboard Standard 2.0.9 :: 2015-09-09. Basketball Scoreboard ... 1fdad05405
              -
              -
              -

              diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Birds Evolution Pro Crack.rar.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Birds Evolution Pro Crack.rar.md deleted file mode 100644 index 0c66a0c9df2d2930031e431691a923cd65243dfb..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Birds Evolution Pro Crack.rar.md +++ /dev/null @@ -1,10 +0,0 @@ -
              -

              reduced wing loading is a common adaptation of early birds that is. 2010, you make the comments at the bottom of the story. king’s dead bird a trophy for. bird chooses mates after seeing wing length then. https://coub.com/stories/3137677-chrome-70-crack-new-versions.com/stories/3414151-assessment-of-the-diet-of-the-brown-owl-man-white-ba.

              -

              Birds Evolution Pro crack.rar


              Download Ziphttps://urlgoal.com/2uCJfL



              -

              the first day of school was fine until. 1991, only 20 years before our study, in the same. the evolutionary emergence of herbivory in sauropodomorph birds. https://coub.com/stories/3174270-unlocking-the-lovely-new-version-of-motor-pro-7-ios. you can use one of the cracks at the bottom of the.

              -

              eredation is what happens when an offspring is produced by a parent that is an ancestor of the offspring and the offspring is either the same or a genetically different, independent individual as the parent.. the degree of morphological integration shows that, at least for ants. the species' main breeding habitat is ____. and the species has ____ degrees of morphological integration with its host plant. doing his research, he soon discovered that in the same region as dusty south, in georgia.bird cracker pdf visit pigpens. pdf.

              -

              enation, individual variations, the need for shelter and breeding grounds, influences on offspring development, and morphological integration. here is an example of the figure showing that influence that. free download crack for windows 7 pro 2012.rar

              -

              -

              he species' main breeding habitat is ____.. and the species has ____ degrees of morphological integration with its host plant. . he showed that as the population of the geometrid moth increased, the level of parasitism by the braconid wasp also increased until finally so many parasitoids were present that the host population. was eliminated. as the parasitoid population grew, the prey.

              899543212b
              -
              -
              \ No newline at end of file diff --git a/spaces/riccorl/relik-entity-linking/relik/reader/utils/save_load_utilities.py b/spaces/riccorl/relik-entity-linking/relik/reader/utils/save_load_utilities.py deleted file mode 100644 index 1e635650c1f69c0e223d268f97ec9d6e0677742c..0000000000000000000000000000000000000000 --- a/spaces/riccorl/relik-entity-linking/relik/reader/utils/save_load_utilities.py +++ /dev/null @@ -1,76 +0,0 @@ -import argparse -import os -from typing import Tuple - -import omegaconf -import torch - -from relik.common.utils import from_cache -from relik.reader.lightning_modules.relik_reader_pl_module import RelikReaderPLModule -from relik.reader.relik_reader_core import RelikReaderCoreModel - -CKPT_FILE_NAME = "model.ckpt" -CONFIG_FILE_NAME = "cfg.yaml" - - -def convert_pl_module(pl_module_ckpt_path: str, output_dir: str) -> None: - if not os.path.exists(output_dir): - os.makedirs(output_dir) - else: - print(f"{output_dir} already exists, aborting operation") - exit(1) - - relik_pl_module: RelikReaderPLModule = RelikReaderPLModule.load_from_checkpoint( - pl_module_ckpt_path - ) - torch.save( - relik_pl_module.relik_reader_core_model, f"{output_dir}/{CKPT_FILE_NAME}" - ) - with open(f"{output_dir}/{CONFIG_FILE_NAME}", "w") as f: - omegaconf.OmegaConf.save( - omegaconf.OmegaConf.create(relik_pl_module.hparams["cfg"]), f - ) - - -def load_model_and_conf( - model_dir_path: str, -) -> Tuple[RelikReaderCoreModel, omegaconf.DictConfig]: - # TODO: quick workaround to load the model from HF hub - model_dir = from_cache( - model_dir_path, - filenames=[CKPT_FILE_NAME, CONFIG_FILE_NAME], - cache_dir=None, - force_download=False, - ) - - ckpt_path = f"{model_dir}/{CKPT_FILE_NAME}" - model = torch.load(ckpt_path, map_location=torch.device("cpu")) - - model_cfg_path = f"{model_dir}/{CONFIG_FILE_NAME}" - model_conf = omegaconf.OmegaConf.load(model_cfg_path) - return model, model_conf - - -def parse_arg() -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument( - "--ckpt", - help="Path to the pytorch lightning ckpt you want to convert.", - required=True, - ) - parser.add_argument( - "--output-dir", - "-o", - help="The output dir to store the bare models and the config.", - required=True, - ) - return parser.parse_args() - - -def main(): - args = parse_arg() - convert_pl_module(args.ckpt, args.output_dir) - - -if __name__ == "__main__": - main() diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/core/bbox/match_costs/__init__.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/core/bbox/match_costs/__init__.py deleted file mode 100644 index 1b636795082cf7b731e3125f7ae36b51e4bfb5a3..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/core/bbox/match_costs/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .builder import build_match_cost -from .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost, - DiceCost, FocalLossCost, IoUCost) - -__all__ = [ - 'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost', - 'FocalLossCost', 'DiceCost', 'CrossEntropyLossCost' -] diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/projects/instance_segment_anything/models/hdetr/models/segmentation.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/projects/instance_segment_anything/models/hdetr/models/segmentation.py deleted file mode 100644 index 18c70cca99a5bb274b2d77298ac236d75663cc28..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/projects/instance_segment_anything/models/hdetr/models/segmentation.py +++ /dev/null @@ -1,427 +0,0 @@ -# ------------------------------------------------------------------------ -# Deformable DETR -# Copyright (c) 2020 SenseTime. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Modified from DETR (https://github.com/facebookresearch/detr) -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -# ------------------------------------------------------------------------ - -""" -This file provides the definition of the convolutional heads used to predict masks, as well as the losses -""" -import io -from collections import defaultdict - -import torch -import torch.nn as nn -import torch.nn.functional as F -from PIL import Image - -from .util import box_ops -from .util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list - -try: - from panopticapi.utils import id2rgb, rgb2id -except ImportError: - pass - - -class DETRsegm(nn.Module): - def __init__(self, detr, freeze_detr=False): - super().__init__() - self.detr = detr - - if freeze_detr: - for p in self.parameters(): - p.requires_grad_(False) - - hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead - self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0) - self.mask_head = MaskHeadSmallConv( - hidden_dim + nheads, [1024, 512, 256], hidden_dim - ) - - def forward(self, samples: NestedTensor): - if not isinstance(samples, NestedTensor): - samples = nested_tensor_from_tensor_list(samples) - features, pos = self.detr.backbone(samples) - - bs = features[-1].tensors.shape[0] - - src, mask = features[-1].decompose() - src_proj = self.detr.input_proj(src) - hs, memory = self.detr.transformer( - src_proj, mask, self.detr.query_embed.weight, pos[-1] - ) - - outputs_class = self.detr.class_embed(hs) - outputs_coord = self.detr.bbox_embed(hs).sigmoid() - out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]} - if self.detr.aux_loss: - out["aux_outputs"] = [ - {"pred_logits": a, "pred_boxes": b} - for a, b in zip(outputs_class[:-1], outputs_coord[:-1]) - ] - - # FIXME h_boxes takes the last one computed, keep this in mind - bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask) - - seg_masks = self.mask_head( - src_proj, - bbox_mask, - [features[2].tensors, features[1].tensors, features[0].tensors], - ) - outputs_seg_masks = seg_masks.view( - bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1] - ) - - out["pred_masks"] = outputs_seg_masks - return out - - -class MaskHeadSmallConv(nn.Module): - """ - Simple convolutional head, using group norm. - Upsampling is done using a FPN approach - """ - - def __init__(self, dim, fpn_dims, context_dim): - super().__init__() - - inter_dims = [ - dim, - context_dim // 2, - context_dim // 4, - context_dim // 8, - context_dim // 16, - context_dim // 64, - ] - self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1) - self.gn1 = torch.nn.GroupNorm(8, dim) - self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1) - self.gn2 = torch.nn.GroupNorm(8, inter_dims[1]) - self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) - self.gn3 = torch.nn.GroupNorm(8, inter_dims[2]) - self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) - self.gn4 = torch.nn.GroupNorm(8, inter_dims[3]) - self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) - self.gn5 = torch.nn.GroupNorm(8, inter_dims[4]) - self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1) - - self.dim = dim - - self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1) - self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1) - self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_uniform_(m.weight, a=1) - nn.init.constant_(m.bias, 0) - - def forward(self, x, bbox_mask, fpns): - def expand(tensor, length): - return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) - - x = torch.cat([expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) - - x = self.lay1(x) - x = self.gn1(x) - x = F.relu(x) - x = self.lay2(x) - x = self.gn2(x) - x = F.relu(x) - - cur_fpn = self.adapter1(fpns[0]) - if cur_fpn.size(0) != x.size(0): - cur_fpn = expand(cur_fpn, x.size(0) / cur_fpn.size(0)) - x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") - x = self.lay3(x) - x = self.gn3(x) - x = F.relu(x) - - cur_fpn = self.adapter2(fpns[1]) - if cur_fpn.size(0) != x.size(0): - cur_fpn = expand(cur_fpn, x.size(0) / cur_fpn.size(0)) - x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") - x = self.lay4(x) - x = self.gn4(x) - x = F.relu(x) - - cur_fpn = self.adapter3(fpns[2]) - if cur_fpn.size(0) != x.size(0): - cur_fpn = expand(cur_fpn, x.size(0) / cur_fpn.size(0)) - x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") - x = self.lay5(x) - x = self.gn5(x) - x = F.relu(x) - - x = self.out_lay(x) - return x - - -class MHAttentionMap(nn.Module): - """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" - - def __init__(self, query_dim, hidden_dim, num_heads, dropout=0, bias=True): - super().__init__() - self.num_heads = num_heads - self.hidden_dim = hidden_dim - self.dropout = nn.Dropout(dropout) - - self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) - self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) - - nn.init.zeros_(self.k_linear.bias) - nn.init.zeros_(self.q_linear.bias) - nn.init.xavier_uniform_(self.k_linear.weight) - nn.init.xavier_uniform_(self.q_linear.weight) - self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 - - def forward(self, q, k, mask=None): - q = self.q_linear(q) - k = F.conv2d( - k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias - ) - qh = q.view( - q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads - ) - kh = k.view( - k.shape[0], - self.num_heads, - self.hidden_dim // self.num_heads, - k.shape[-2], - k.shape[-1], - ) - weights = torch.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh) - - if mask is not None: - weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf")) - weights = F.softmax(weights.flatten(2), dim=-1).view_as(weights) - weights = self.dropout(weights) - return weights - - -def dice_loss(inputs, targets, num_boxes): - """ - Compute the DICE loss, similar to generalized IOU for masks - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - """ - inputs = inputs.sigmoid() - inputs = inputs.flatten(1) - numerator = 2 * (inputs * targets).sum(1) - denominator = inputs.sum(-1) + targets.sum(-1) - loss = 1 - (numerator + 1) / (denominator + 1) - return loss.sum() / num_boxes - - -def sigmoid_focal_loss( - inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2 -): - """ - Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - alpha: (optional) Weighting factor in range (0,1) to balance - positive vs negative examples. Default = -1 (no weighting). - gamma: Exponent of the modulating factor (1 - p_t) to - balance easy vs hard examples. - Returns: - Loss tensor - """ - prob = inputs.sigmoid() - ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") - p_t = prob * targets + (1 - prob) * (1 - targets) - loss = ce_loss * ((1 - p_t) ** gamma) - - if alpha >= 0: - alpha_t = alpha * targets + (1 - alpha) * (1 - targets) - loss = alpha_t * loss - - return loss.mean(1).sum() / num_boxes - - -class PostProcessSegm(nn.Module): - def __init__(self, threshold=0.5): - super().__init__() - self.threshold = threshold - - @torch.no_grad() - def forward(self, results, outputs, orig_target_sizes, max_target_sizes): - assert len(orig_target_sizes) == len(max_target_sizes) - max_h, max_w = max_target_sizes.max(0)[0].tolist() - outputs_masks = outputs["pred_masks"].squeeze(2) - outputs_masks = F.interpolate( - outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False - ) - outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu() - - for i, (cur_mask, t, tt) in enumerate( - zip(outputs_masks, max_target_sizes, orig_target_sizes) - ): - img_h, img_w = t[0], t[1] - results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1) - results[i]["masks"] = F.interpolate( - results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest" - ).byte() - - return results - - -class PostProcessPanoptic(nn.Module): - """This class converts the output of the model to the final panoptic result, in the format expected by the - coco panoptic API """ - - def __init__(self, is_thing_map, threshold=0.85): - """ - Parameters: - is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether - the class is a thing (True) or a stuff (False) class - threshold: confidence threshold: segments with confidence lower than this will be deleted - """ - super().__init__() - self.threshold = threshold - self.is_thing_map = is_thing_map - - def forward(self, outputs, processed_sizes, target_sizes=None): - """ This function computes the panoptic prediction from the model's predictions. - Parameters: - outputs: This is a dict coming directly from the model. See the model doc for the content. - processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the - model, ie the size after data augmentation but before batching. - target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size - of each prediction. If left to None, it will default to the processed_sizes - """ - if target_sizes is None: - target_sizes = processed_sizes - assert len(processed_sizes) == len(target_sizes) - out_logits, raw_masks, raw_boxes = ( - outputs["pred_logits"], - outputs["pred_masks"], - outputs["pred_boxes"], - ) - assert len(out_logits) == len(raw_masks) == len(target_sizes) - preds = [] - - def to_tuple(tup): - if isinstance(tup, tuple): - return tup - return tuple(tup.cpu().tolist()) - - for cur_logits, cur_masks, cur_boxes, size, target_size in zip( - out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes - ): - # we filter empty queries and detection below threshold - scores, labels = cur_logits.softmax(-1).max(-1) - keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & ( - scores > self.threshold - ) - cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) - cur_scores = cur_scores[keep] - cur_classes = cur_classes[keep] - cur_masks = cur_masks[keep] - cur_masks = interpolate( - cur_masks[None], to_tuple(size), mode="bilinear" - ).squeeze(0) - cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep]) - - h, w = cur_masks.shape[-2:] - assert len(cur_boxes) == len(cur_classes) - - # It may be that we have several predicted masks for the same stuff class. - # In the following, we track the list of masks ids for each stuff class (they are merged later on) - cur_masks = cur_masks.flatten(1) - stuff_equiv_classes = defaultdict(lambda: []) - for k, label in enumerate(cur_classes): - if not self.is_thing_map[label.item()]: - stuff_equiv_classes[label.item()].append(k) - - def get_ids_area(masks, scores, dedup=False): - # This helper function creates the final panoptic segmentation image - # It also returns the area of the masks that appears on the image - - m_id = masks.transpose(0, 1).softmax(-1) - - if m_id.shape[-1] == 0: - # We didn't detect any mask :( - m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device) - else: - m_id = m_id.argmax(-1).view(h, w) - - if dedup: - # Merge the masks corresponding to the same stuff class - for equiv in stuff_equiv_classes.values(): - if len(equiv) > 1: - for eq_id in equiv: - m_id.masked_fill_(m_id.eq(eq_id), equiv[0]) - - final_h, final_w = to_tuple(target_size) - - seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy())) - seg_img = seg_img.resize( - size=(final_w, final_h), resample=Image.NEAREST - ) - - np_seg_img = ( - torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())) - .view(final_h, final_w, 3) - .numpy() - ) - m_id = torch.from_numpy(rgb2id(np_seg_img)) - - area = [] - for i in range(len(scores)): - area.append(m_id.eq(i).sum().item()) - return area, seg_img - - area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True) - if cur_classes.numel() > 0: - # We know filter empty masks as long as we find some - while True: - filtered_small = torch.as_tensor( - [area[i] <= 4 for i, c in enumerate(cur_classes)], - dtype=torch.bool, - device=keep.device, - ) - if filtered_small.any().item(): - cur_scores = cur_scores[~filtered_small] - cur_classes = cur_classes[~filtered_small] - cur_masks = cur_masks[~filtered_small] - area, seg_img = get_ids_area(cur_masks, cur_scores) - else: - break - - else: - cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device) - - segments_info = [] - for i, a in enumerate(area): - cat = cur_classes[i].item() - segments_info.append( - { - "id": i, - "isthing": self.is_thing_map[cat], - "category_id": cat, - "area": a, - } - ) - del cur_classes - - with io.BytesIO() as out: - seg_img.save(out, format="PNG") - predictions = { - "png_string": out.getvalue(), - "segments_info": segments_info, - } - preds.append(predictions) - return preds diff --git a/spaces/rorallitri/biomedical-language-models/logs/Download and Install Jeppesen Mobile FD with IDM and Use Serial Number Crack to Unlock All Features.md b/spaces/rorallitri/biomedical-language-models/logs/Download and Install Jeppesen Mobile FD with IDM and Use Serial Number Crack to Unlock All Features.md deleted file mode 100644 index 262c6ae022208f67517ebb7bae3bce17bb49b7f2..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Download and Install Jeppesen Mobile FD with IDM and Use Serial Number Crack to Unlock All Features.md +++ /dev/null @@ -1,6 +0,0 @@ -

              jeppesen mobile fd serial number crack for idm


              DOWNLOAD »»» https://tinurll.com/2uzoyU



              - - aaccfb2cb3
              -
              -
              -

              diff --git a/spaces/rorallitri/biomedical-language-models/logs/Evermotion Archmodels Vol 105 59 Detailed 3D Models of Plants Fountains Chairs and Bridges.md b/spaces/rorallitri/biomedical-language-models/logs/Evermotion Archmodels Vol 105 59 Detailed 3D Models of Plants Fountains Chairs and Bridges.md deleted file mode 100644 index 8c09db383be34a19c914e9332fd45975992ab7e6..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Evermotion Archmodels Vol 105 59 Detailed 3D Models of Plants Fountains Chairs and Bridges.md +++ /dev/null @@ -1,6 +0,0 @@ -

              Evermotion Archmodels Vol 105


              Download File ->->->-> https://tinurll.com/2uzmGj



              - - aaccfb2cb3
              -
              -
              -

              diff --git a/spaces/rorallitri/biomedical-language-models/logs/HDClone Enterprise Edition 4.2 Crack _HOT_.md b/spaces/rorallitri/biomedical-language-models/logs/HDClone Enterprise Edition 4.2 Crack _HOT_.md deleted file mode 100644 index ccdbec87097a0c516a2bd34dcda24378b7be8f7e..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/HDClone Enterprise Edition 4.2 Crack _HOT_.md +++ /dev/null @@ -1,9 +0,0 @@ - -

              Embrace the world of cool.HDClone Enterprise Edition is the perfect tool for backups and for creating copies of entire software or operating system installations. Parallel mass copies and deployment. Creating up to 4, 8, or 16 clones in one run, depending on variant. Directly or from an image. Perfect for production environments.

              -

              HDClone Enterprise Edition 4.2 Crack


              Download File https://tinurll.com/2uzomb



              -

              HDClone is one of the most popular file backup programs. You can use this software to make a backup of your important files on your hard disk and external memory (USB). The working environment of this software is very simple and convenient and you can easily backup your files.A special feature of this software is the safe rescue feature. With this feature, you can see the problems of your hard drive and fix it.

              -

              HDClone Enterprise Edition is one of the best and most popular software for backing up and for creating copies of entire software or operating system installations. Parallel mass copies and deployment. Creating up to 4, 8 or 16 clones in one run, depending on variant. Directly or from an image. Perfect for production environments. This version really works for me. Ignore any warning. Try to stop it but continue. Working perfect.

              -

              Create copies of entire hard disks or mass storage media to create a backup. HDClone Enterprise Edition 4.2 Crack works independent of partitioning scheme, file system, and operating system. It also works with proprietary formats which would otherwise be inaccessible. Use the convenient built-in browser to easily configure and perform parallel backups and clones on multiple hard disks and other media.

              -

              899543212b
              -
              -
              \ No newline at end of file diff --git a/spaces/rti-international/rota-app/ABOUT.md b/spaces/rti-international/rota-app/ABOUT.md deleted file mode 100644 index e6117f755c62ad94f0e4b143b6eb6af60d8083cd..0000000000000000000000000000000000000000 --- a/spaces/rti-international/rota-app/ABOUT.md +++ /dev/null @@ -1,20 +0,0 @@ -# ROTA -## Rapid Offense Text Autocoder - -### ℹ️ Intro - -[![HuggingFace Models](https://img.shields.io/badge/%F0%9F%A4%97%20models-2021.05.18.15-blue)](https://huggingface.co/rti-international/rota) -[![GitHub Model Release](https://img.shields.io/github/v/release/RTIInternational/rota?logo=github)](https://github.com/RTIInternational/rota) -[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4770492.svg)](https://doi.org/10.5281/zenodo.4770492) - -Criminal justice research often requires conversion of free-text offense descriptions into overall charge categories to aid analysis. For example, the free-text offense of "eluding a police vehicle" would be coded to a charge category of "Obstruction - Law Enforcement". Since free-text offense descriptions aren't standardized and often need to be categorized in large volumes, this can result in a manual and time intensive process for researchers. ROTA is a machine learning model for converting offense text into offense codes. - -Currently ROTA predicts the *Charge Category* of a given offense text. A *charge category* is one of the headings for offense codes in the [2009 NCRP Codebook: Appendix F](https://www.icpsr.umich.edu/web/NACJD/studies/30799/datadocumentation#). - -The model was trained on [publicly available data](https://web.archive.org/web/20201021001250/https://www.icpsr.umich.edu/web/pages/NACJD/guides/ncrp.html) from a crosswalk containing offenses from all 50 states combined with three additional hand-labeled offense text datasets. - -For more information on the model, please see the [model repo](https://huggingface.co/rti-international/rota). - -This model and application were developed by the [RTI International Center for Data Science and AI](https://www.rti.org/centers/rti-center-data-science). - -### ℹ️ Use \ No newline at end of file diff --git a/spaces/rubinmc/Image-Animation-using-Thin-Plate-Spline-Motion-Modeldfdfdddddddddddddddddddddd/style.css b/spaces/rubinmc/Image-Animation-using-Thin-Plate-Spline-Motion-Modeldfdfdddddddddddddddddddddd/style.css deleted file mode 100644 index 435ebb5987b8913a52f73664c54022374d0c3ed7..0000000000000000000000000000000000000000 --- a/spaces/rubinmc/Image-Animation-using-Thin-Plate-Spline-Motion-Modeldfdfdddddddddddddddddddddd/style.css +++ /dev/null @@ -1,19 +0,0 @@ -h1 { - text-align: center; -} -img#overview { - max-width: 1000px; - max-height: 600px; - display: block; - margin: auto; -} -img#style-image { - max-width: 1000px; - max-height: 600px; - display: block; - margin: auto; -} -img#visitor-badge { - display: block; - margin: auto; -} \ No newline at end of file diff --git a/spaces/safi842/FashionGen/netdissect/tool/lightbox.html b/spaces/safi842/FashionGen/netdissect/tool/lightbox.html deleted file mode 100644 index fb0ebdf64766a43c9353428853be77deb5c52665..0000000000000000000000000000000000000000 --- a/spaces/safi842/FashionGen/netdissect/tool/lightbox.html +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - - - - - - - -
              -

              Images in {{ directory }}

              -
              -
              {{ r }}
              - -
              -
              - - - diff --git a/spaces/sanchit-gandhi/whisper-language-id/README.md b/spaces/sanchit-gandhi/whisper-language-id/README.md deleted file mode 100644 index 81e7a3db038ab39d51fc89a06884bc9e89743300..0000000000000000000000000000000000000000 --- a/spaces/sanchit-gandhi/whisper-language-id/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Whisper Language Id -emoji: 🌖 -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sayakpaul/pokemon-sd-kerascv/share_btn.py b/spaces/sayakpaul/pokemon-sd-kerascv/share_btn.py deleted file mode 100644 index d15d7720f3aa8f282217e9fcc28097dc8010b1c2..0000000000000000000000000000000000000000 --- a/spaces/sayakpaul/pokemon-sd-kerascv/share_btn.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -Credits: https://huggingface.co/spaces/stabilityai/stable-diffusion/blob/main/share_btn.py - -""" -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - const gradioEl = document.querySelector('body > gradio-app'); - const imgEls = gradioEl.querySelectorAll('#gallery img'); - const promptTxt = gradioEl.querySelector('#prompt-text-input input').value; - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - if(!imgEls.length){ - return; - }; - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - const files = await Promise.all( - [...imgEls].map(async (imgEl) => { - const res = await fetch(imgEl.src); - const blob = await res.blob(); - const imgId = Date.now() % 200; - const fileName = `diffuse-the-rest-${{imgId}}.jpg`; - return new File([blob], fileName, { type: 'image/jpeg' }); - }) - ); - const urls = await Promise.all(files.map((f) => uploadFile(f))); - const htmlImgs = urls.map(url => ``); - const descriptionMd = `
              -${htmlImgs.join(`\n`)} -
              `; - const params = new URLSearchParams({ - title: promptTxt, - description: descriptionMd, - }); - const paramsStr = params.toString(); - window.open(`https://huggingface.co/spaces/stabilityai/stable-diffusion/discussions/new?${paramsStr}`, '_blank'); - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" diff --git a/spaces/scedlatioru/img-to-music/Vs-Datey-Indirect-Tax-Book-Free-Download.md b/spaces/scedlatioru/img-to-music/Vs-Datey-Indirect-Tax-Book-Free-Download.md deleted file mode 100644 index 351e9866e26516af6e8d468c41729eac764b127e..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/Vs-Datey-Indirect-Tax-Book-Free-Download.md +++ /dev/null @@ -1,66 +0,0 @@ -## Vs Datey Indirect Tax Book Free Download - - - - - - - - - -**Download File ⭐ [https://urlca.com/2txvQ2](https://urlca.com/2txvQ2)** - - - - - - - - - - - - - -# VS Datey Indirect Tax Book: A Comprehensive Guide for Students and Professionals - - - -If you are looking for a book that covers all the aspects of indirect taxes in India, then VS Datey Indirect Tax Book is the one for you. This book is written by Mr. V.S. Datey, who has more than 27 years of experience in corporate field and has been writing books on indirect taxes and corporate laws since 1994[^1^]. His books are published by Taxmann, a leading publisher of tax and legal books in India. - - - -VS Datey Indirect Tax Book is a comprehensive guide that covers the latest provisions of GST, Customs, FTP and other indirect taxes. It explains the concepts, procedures, compliances and case laws in a simple and lucid manner. It also provides practical examples, illustrations, tables, charts and diagrams to help the readers understand the topics better. The book is updated with the latest amendments and notifications issued by the government. - - - -VS Datey Indirect Tax Book is suitable for students of CA, CS, CMA, LL.B., MBA and other professional courses. It is also useful for tax practitioners, consultants, lawyers, accountants and businessmen who deal with indirect taxes in their day-to-day work. The book is available in both print and digital formats. You can order it online from Taxmann's website or download it as a PDF file. - - - -VS Datey Indirect Tax Book is a must-have book for anyone who wants to learn or master the subject of indirect taxes in India. It is a reliable and authoritative source of information that will help you achieve your academic or professional goals. - - - -Some of the topics covered in VS Datey Indirect Tax Book are: - - - -- GST: Introduction, Levy and Collection, Supply, Registration, Tax Invoice, Returns, Payment, Refund, Input Tax Credit, Valuation, Reverse Charge, Composition Scheme, Exemptions, Zero Rated Supply, E-Way Bill, Audit and Assessment, Appeals and Revision, Advance Ruling, Offences and Penalties, etc. - -- Customs: Basic Concepts, Classification and Valuation of Goods, Import and Export Procedures, Duty Drawback, Baggage Rules, Warehousing, Anti-Dumping Duty, Safeguard Duty, Countervailing Duty, etc. - -- FTP: Foreign Trade Policy and Procedures, Export Promotion Schemes, Export Incentives, Duty Free Import Authorization Scheme, Advance Authorization Scheme, Export Oriented Units Scheme, Special Economic Zones Scheme, etc. - -- Other Indirect Taxes: Central Excise Duty, Service Tax (for pre-GST period), VAT (for pre-GST period), CST (for pre-GST period), Luxury Tax (for pre-GST period), Entertainment Tax (for pre-GST period), etc. - - - -The book also contains solved problems and multiple choice questions for practice and revision. It also provides references to relevant sections and rules of the respective laws for further study. The book is updated till the date of publication and incorporates all the changes made by the Finance Act 2022 and other notifications issued by the government. - - 1b8d091108 - - - - - diff --git a/spaces/scedlatioru/img-to-music/example/O Floare Si Doi Gradinari Film Indian Download [REPACK].md b/spaces/scedlatioru/img-to-music/example/O Floare Si Doi Gradinari Film Indian Download [REPACK].md deleted file mode 100644 index b0d3f5b2d9bc008c0c40bea08c14caefd270bc55..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/O Floare Si Doi Gradinari Film Indian Download [REPACK].md +++ /dev/null @@ -1,9 +0,0 @@ -
              -

              Footer Homepage
              desing wet cells 2
              jordan certified technician
              drrv 3000 drivers
              dm virus & spyware remover quick download
              one life no name v1.0
              Tempat Menyimpan Buku Lagian Kepada siapa Mar 2017 (12) June 2017 (17) September 2017 (22) Analnya Memiliki Ambil Pembelian Kursi Melamin
              vu lui 3gp, vidio, flv, mkv ipsfull
              Website Designing Tips And Tricks 2017 >s

              -

              o floare si doi gradinari film indian download


              DOWNLOAD ✺✺✺ https://gohhs.com/2uEzAS



              -

              best male ejaculating cum movies
              Free downloads for Firefox
              Free Download Mancode MediaPlayer 7.5 - Crack Datei Lchg - C00
              PCV to SD card convertor
              sukrat history in urdu pdf 11
              download jboss application server 7 hotfix for linux version
              how to make hard drive automatic repair
              forextradettrader's free free download
              buy html template online

              -

              BBC Dunya Hindi I Hindi www.bbc.co.in file hacking 2016 > Accelerate Online Meeting 2016 Hindi.rar
              Adobe Photoshop CS6 Activation Mac Osx DVD Windows Movie Download...
              phpmyadmin - Easy to use PHP web based MySQL Management System - Documentation

              -

              doraemon english sub Indonesia
              Big Boss HD - The Final Chapter (2015) - 720p
              Once Upon a Time in the West Full Movie 720p Bluray Subtitulado
              three quarters low - Das Model.html
              FMI - Auto Manufacturer and Supplier Manufacturer Network.doc
              MegaWin Driver Updater 2014.rar
              Remember Me Blood Pirate - Danza Agrarada Video HD Movil
              play online golden noyels free download
              Baby Girl Got Back 2 Full Movie HD 1080p Subtitles
              MeueKing (Czech) - Duration: 22:06.
              Sense8 (American) Full HD...

              -

              899543212b
              -
              -
              \ No newline at end of file diff --git a/spaces/schibsted/Facial_Recognition_with_Sentiment_Detector/darknet.py b/spaces/schibsted/Facial_Recognition_with_Sentiment_Detector/darknet.py deleted file mode 100644 index 6dc6918cd0d7b5940a2a21754abaeefd07e99fd4..0000000000000000000000000000000000000000 --- a/spaces/schibsted/Facial_Recognition_with_Sentiment_Detector/darknet.py +++ /dev/null @@ -1,322 +0,0 @@ -# PyTorch implementation of Darknet -# This is a custom, hard-coded version of darknet with -# YOLOv3 implementation for openimages database. This -# was written to test viability of implementing YOLO -# for face detection followed by emotion / sentiment -# analysis. -# -# Configuration, weights and data are hardcoded. -# Additional options include, ability to create -# subset of data with faces exracted for labelling. -# -# Author : Saikiran Tharimena -# Co-Authors: Kjetil Marinius Sjulsen, Juan Carlos Calvet Lopez -# Project : Emotion / Sentiment Detection from news images -# Date : 12 September 2022 -# Version : v0.1 -# -# (C) Schibsted ASA - -# Libraries -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.autograd import Variable -import numpy as np -from utils import * - - -def parse_cfg(cfgfile): - """ - Takes a configuration file - - Returns a list of blocks. Each blocks describes a block in the neural - network to be built. Block is represented as a dictionary in the list - - """ - - file = open(cfgfile, 'r') - lines = file.read().split('\n') # store the lines in a list - lines = [x for x in lines if len(x) > 0] # get read of the empty lines - lines = [x for x in lines if x[0] != '#'] # get rid of comments - lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces - - block = {} - blocks = [] - - for line in lines: - if line[0] == "[": # This marks the start of a new block - if len(block) != 0: # If block is not empty, implies it is storing values of previous block. - blocks.append(block) # add it the blocks list - block = {} # re-init the block - block["type"] = line[1:-1].rstrip() - else: - key,value = line.split("=") - block[key.rstrip()] = value.lstrip() - blocks.append(block) - - return blocks - - -class EmptyLayer(nn.Module): - def __init__(self): - super(EmptyLayer, self).__init__() - - -class DetectionLayer(nn.Module): - def __init__(self, anchors): - super(DetectionLayer, self).__init__() - self.anchors = anchors - - -def create_modules(blocks): - net_info = blocks[0] #Captures the information about the input and pre-processing - module_list = nn.ModuleList() - prev_filters = 3 - output_filters = [] - - for index, x in enumerate(blocks[1:]): - module = nn.Sequential() - - #check the type of block - #create a new module for the block - #append to module_list - - #If it's a convolutional layer - if (x["type"] == "convolutional"): - #Get the info about the layer - activation = x["activation"] - try: - batch_normalize = int(x["batch_normalize"]) - bias = False - except: - batch_normalize = 0 - bias = True - - filters= int(x["filters"]) - padding = int(x["pad"]) - kernel_size = int(x["size"]) - stride = int(x["stride"]) - - if padding: - pad = (kernel_size - 1) // 2 - else: - pad = 0 - - #Add the convolutional layer - conv = nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias = bias) - module.add_module("conv_{0}".format(index), conv) - - #Add the Batch Norm Layer - if batch_normalize: - bn = nn.BatchNorm2d(filters) - module.add_module("batch_norm_{0}".format(index), bn) - - #Check the activation. - #It is either Linear or a Leaky ReLU for YOLO - if activation == "leaky": - activn = nn.LeakyReLU(0.1, inplace = True) - module.add_module("leaky_{0}".format(index), activn) - - #If it's an upsampling layer - #We use Bilinear2dUpsampling - elif (x["type"] == "upsample"): - stride = int(x["stride"]) - upsample = nn.Upsample(scale_factor = 2, mode = "nearest") - module.add_module("upsample_{}".format(index), upsample) - - #If it is a route layer - elif (x["type"] == "route"): - x["layers"] = x["layers"].split(',') - #Start of a route - start = int(x["layers"][0]) - #end, if there exists one. - try: - end = int(x["layers"][1]) - except: - end = 0 - #Positive anotation - if start > 0: - start = start - index - if end > 0: - end = end - index - route = EmptyLayer() - module.add_module("route_{0}".format(index), route) - if end < 0: - filters = output_filters[index + start] + output_filters[index + end] - else: - filters= output_filters[index + start] - - #shortcut corresponds to skip connection - elif x["type"] == "shortcut": - shortcut = EmptyLayer() - module.add_module("shortcut_{}".format(index), shortcut) - - #Yolo is the detection layer - elif x["type"] == "yolo": - mask = x["mask"].split(",") - mask = [int(x) for x in mask] - - anchors = x["anchors"].split(",") - anchors = [int(a) for a in anchors] - anchors = [(anchors[i], anchors[i+1]) for i in range(0, len(anchors),2)] - anchors = [anchors[i] for i in mask] - - detection = DetectionLayer(anchors) - module.add_module("Detection_{}".format(index), detection) - - module_list.append(module) - prev_filters = filters - output_filters.append(filters) - - return (net_info, module_list) - -class Darknet(nn.Module): - def __init__(self, cfgfile): - super(Darknet, self).__init__() - self.blocks = parse_cfg(cfgfile) - self.net_info, self.module_list = create_modules(self.blocks) - - def forward(self, x, CUDA): - modules = self.blocks[1:] - outputs = {} #We cache the outputs for the route layer - - write = 0 - for i, module in enumerate(modules): - module_type = (module["type"]) - - if module_type == "convolutional" or module_type == "upsample": - x = self.module_list[i](x) - - elif module_type == "route": - layers = module["layers"] - layers = [int(a) for a in layers] - - if (layers[0]) > 0: - layers[0] = layers[0] - i - - if len(layers) == 1: - x = outputs[i + (layers[0])] - - else: - if (layers[1]) > 0: - layers[1] = layers[1] - i - - map1 = outputs[i + layers[0]] - map2 = outputs[i + layers[1]] - x = torch.cat((map1, map2), 1) - - - elif module_type == "shortcut": - from_ = int(module["from"]) - x = outputs[i-1] + outputs[i+from_] - - elif module_type == 'yolo': - anchors = self.module_list[i][0].anchors - #Get the input dimensions - inp_dim = int (self.net_info["height"]) - - #Get the number of classes - num_classes = int (module["classes"]) - - #Transform - x = x.data - x = predict_transform(x, inp_dim, anchors, num_classes, CUDA) - if not write: #if no collector has been intialised. - detections = x - write = 1 - - else: - detections = torch.cat((detections, x), 1) - - outputs[i] = x - - return detections - - - def load_weights(self, weightfile): - #Open the weights file - fp = open(weightfile, "rb") - - #The first 5 values are header information - # 1. Major version number - # 2. Minor Version Number - # 3. Subversion number - # 4,5. Images seen by the network (during training) - header = np.fromfile(fp, dtype = np.int32, count = 5) - self.header = torch.from_numpy(header) - self.seen = self.header[3] - - weights = np.fromfile(fp, dtype = np.float32) - - ptr = 0 - for i in range(len(self.module_list)): - module_type = self.blocks[i + 1]["type"] - - #If module_type is convolutional load weights - #Otherwise ignore. - - if module_type == "convolutional": - model = self.module_list[i] - try: - batch_normalize = int(self.blocks[i+1]["batch_normalize"]) - except: - batch_normalize = 0 - - conv = model[0] - - - if (batch_normalize): - bn = model[1] - - #Get the number of weights of Batch Norm Layer - num_bn_biases = bn.bias.numel() - - #Load the weights - bn_biases = torch.from_numpy(weights[ptr:ptr + num_bn_biases]) - ptr += num_bn_biases - - bn_weights = torch.from_numpy(weights[ptr: ptr + num_bn_biases]) - ptr += num_bn_biases - - bn_running_mean = torch.from_numpy(weights[ptr: ptr + num_bn_biases]) - ptr += num_bn_biases - - bn_running_var = torch.from_numpy(weights[ptr: ptr + num_bn_biases]) - ptr += num_bn_biases - - #Cast the loaded weights into dims of model weights. - bn_biases = bn_biases.view_as(bn.bias.data) - bn_weights = bn_weights.view_as(bn.weight.data) - bn_running_mean = bn_running_mean.view_as(bn.running_mean) - bn_running_var = bn_running_var.view_as(bn.running_var) - - #Copy the data to model - bn.bias.data.copy_(bn_biases) - bn.weight.data.copy_(bn_weights) - bn.running_mean.copy_(bn_running_mean) - bn.running_var.copy_(bn_running_var) - - else: - #Number of biases - num_biases = conv.bias.numel() - - #Load the weights - conv_biases = torch.from_numpy(weights[ptr: ptr + num_biases]) - ptr = ptr + num_biases - - #reshape the loaded weights according to the dims of the model weights - conv_biases = conv_biases.view_as(conv.bias.data) - - #Finally copy the data - conv.bias.data.copy_(conv_biases) - - #Let us load the weights for the Convolutional layers - num_weights = conv.weight.numel() - - #Do the same as above for weights - conv_weights = torch.from_numpy(weights[ptr:ptr+num_weights]) - ptr = ptr + num_weights - - conv_weights = conv_weights.view_as(conv.weight.data) - conv.weight.data.copy_(conv_weights) \ No newline at end of file diff --git a/spaces/sdhsdhk/bingosjj/src/components/learn-more.tsx b/spaces/sdhsdhk/bingosjj/src/components/learn-more.tsx deleted file mode 100644 index a64459ee7900a612292e117a6bda96ee9260990f..0000000000000000000000000000000000000000 --- a/spaces/sdhsdhk/bingosjj/src/components/learn-more.tsx +++ /dev/null @@ -1,39 +0,0 @@ -import React from 'react' -import { SourceAttribution } from '@/lib/bots/bing/types' - -export interface LearnMoreProps { - sourceAttributions?: SourceAttribution[] -} - -export function LearnMore({ sourceAttributions }: LearnMoreProps) { - if (!sourceAttributions?.length) { - return null - } - - return ( -
              -
              了解详细信息:
              -
              -
              - {sourceAttributions.map((attribution, index) => { - const { providerDisplayName, seeMoreUrl } = attribution - const { host } = new URL(seeMoreUrl) - return ( - - {index + 1}. {host} - - ) - })} -
              -
              -
              - ) -} diff --git a/spaces/seanghay/KLEA/models.py b/spaces/seanghay/KLEA/models.py deleted file mode 100644 index f5acdeb2bedd47897348407c0ae55c9a160da881..0000000000000000000000000000000000000000 --- a/spaces/seanghay/KLEA/models.py +++ /dev/null @@ -1,534 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - diff --git a/spaces/segments-tobias/conex/espnet/nets/tts_interface.py b/spaces/segments-tobias/conex/espnet/nets/tts_interface.py deleted file mode 100644 index 587d72792373ea4e9143a6443bac1f156d00fb90..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/nets/tts_interface.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2018 Nagoya University (Tomoki Hayashi) -# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) - -"""TTS Interface realted modules.""" - -from espnet.asr.asr_utils import torch_load - - -try: - import chainer -except ImportError: - Reporter = None -else: - - class Reporter(chainer.Chain): - """Reporter module.""" - - def report(self, dicts): - """Report values from a given dict.""" - for d in dicts: - chainer.reporter.report(d, self) - - -class TTSInterface(object): - """TTS Interface for ESPnet model implementation.""" - - @staticmethod - def add_arguments(parser): - """Add model specific argments to parser.""" - return parser - - def __init__(self): - """Initilize TTS module.""" - self.reporter = Reporter() - - def forward(self, *args, **kwargs): - """Calculate TTS forward propagation. - - Returns: - Tensor: Loss value. - - """ - raise NotImplementedError("forward method is not implemented") - - def inference(self, *args, **kwargs): - """Generate the sequence of features given the sequences of characters. - - Returns: - Tensor: The sequence of generated features (L, odim). - Tensor: The sequence of stop probabilities (L,). - Tensor: The sequence of attention weights (L, T). - - """ - raise NotImplementedError("inference method is not implemented") - - def calculate_all_attentions(self, *args, **kwargs): - """Calculate TTS attention weights. - - Args: - Tensor: Batch of attention weights (B, Lmax, Tmax). - - """ - raise NotImplementedError("calculate_all_attentions method is not implemented") - - def load_pretrained_model(self, model_path): - """Load pretrained model parameters.""" - torch_load(model_path, self) - - @property - def attention_plot_class(self): - """Plot attention weights.""" - from espnet.asr.asr_utils import PlotAttentionReport - - return PlotAttentionReport - - @property - def base_plot_keys(self): - """Return base key names to plot during training. - - The keys should match what `chainer.reporter` reports. - if you add the key `loss`, - the reporter will report `main/loss` and `validation/main/loss` values. - also `loss.png` will be created as a figure visulizing `main/loss` - and `validation/main/loss` values. - - Returns: - list[str]: Base keys to plot during training. - - """ - return ["loss"] diff --git a/spaces/segments-tobias/conex/espnet2/lm/espnet_model.py b/spaces/segments-tobias/conex/espnet2/lm/espnet_model.py deleted file mode 100644 index db6b0f7d62dff606c9095a2379dc36793f48133e..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet2/lm/espnet_model.py +++ /dev/null @@ -1,68 +0,0 @@ -from typing import Dict -from typing import Tuple - -import torch -import torch.nn.functional as F -from typeguard import check_argument_types - -from espnet.nets.pytorch_backend.nets_utils import make_pad_mask -from espnet2.lm.abs_model import AbsLM -from espnet2.torch_utils.device_funcs import force_gatherable -from espnet2.train.abs_espnet_model import AbsESPnetModel - - -class ESPnetLanguageModel(AbsESPnetModel): - def __init__(self, lm: AbsLM, vocab_size: int, ignore_id: int = 0): - assert check_argument_types() - super().__init__() - self.lm = lm - self.sos = vocab_size - 1 - self.eos = vocab_size - 1 - - # ignore_id may be assumed as 0, shared with CTC-blank symbol for ASR. - self.ignore_id = ignore_id - - def nll( - self, text: torch.Tensor, text_lengths: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: - batch_size = text.size(0) - # For data parallel - text = text[:, : text_lengths.max()] - - # 1. Create a sentence pair like ' w1 w2 w3' and 'w1 w2 w3 ' - # text: (Batch, Length) -> x, y: (Batch, Length + 1) - x = F.pad(text, [1, 0], "constant", self.eos) - t = F.pad(text, [0, 1], "constant", self.ignore_id) - for i, l in enumerate(text_lengths): - t[i, l] = self.sos - x_lengths = text_lengths + 1 - - # 2. Forward Language model - # x: (Batch, Length) -> y: (Batch, Length, NVocab) - y, _ = self.lm(x, None) - - # 3. Calc negative log likelihood - # nll: (BxL,) - nll = F.cross_entropy(y.view(-1, y.shape[-1]), t.view(-1), reduction="none") - # nll: (BxL,) -> (BxL,) - nll.masked_fill_(make_pad_mask(x_lengths).to(nll.device).view(-1), 0.0) - # nll: (BxL,) -> (B, L) - nll = nll.view(batch_size, -1) - return nll, x_lengths - - def forward( - self, text: torch.Tensor, text_lengths: torch.Tensor - ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]: - nll, y_lengths = self.nll(text, text_lengths) - ntokens = y_lengths.sum() - loss = nll.sum() / ntokens - stats = dict(loss=loss.detach()) - - # force_gatherable: to-device and to-tensor if scalar for DataParallel - loss, stats, weight = force_gatherable((loss, stats, ntokens), loss.device) - return loss, stats, weight - - def collect_feats( - self, text: torch.Tensor, text_lengths: torch.Tensor - ) -> Dict[str, torch.Tensor]: - return {} diff --git a/spaces/sentencebird/audio-noise-reduction/app.py b/spaces/sentencebird/audio-noise-reduction/app.py deleted file mode 100644 index dbb3ebc3e282588d706487ecade628993cfab7e9..0000000000000000000000000000000000000000 --- a/spaces/sentencebird/audio-noise-reduction/app.py +++ /dev/null @@ -1,96 +0,0 @@ -import streamlit as st -import streamlit.components.v1 as stc -import noisereduce as nr -import librosa -import soundfile as sf -import numpy as np -import plotly.graph_objects as go -import pickle - -from pyannote.audio.utils.signal import Binarize -import torch - -@st.cache -def speech_activity_detection_model(): - # sad = torch.hub.load('pyannote-audio', 'sad_ami', source='local', device='cpu', batch_size=128) - with open('speech_activity_detection_model.pkl', 'rb') as f: - sad = pickle.load(f) - return sad - -@st.cache -def trim_noise_part_from_speech(sad, fname, speech_wav, sr): - file_obj = {"uri": "filename", "audio": fname} - sad_scores = sad(file_obj) - binarize = Binarize(offset=0.52, onset=0.52, log_scale=True, min_duration_off=0.1, min_duration_on=0.1) - speech = binarize.apply(sad_scores, dimension=1) - - noise_wav = np.zeros((speech_wav.shape[0], 0)) - append_axis = 1 if speech_wav.ndim == 2 else 0 - noise_ranges = [] - noise_start = 0 - for segmentation in speech.segmentation(): - noise_end, next_noise_start = int(segmentation.start*sr), int(segmentation.end*sr) - noise_wav = np.append(noise_wav, speech_wav[:, noise_start:noise_end], axis=append_axis) - noise_ranges.append((noise_start/sr, noise_end/sr)) - noise_start = next_noise_start - return noise_wav.T, noise_ranges - -@st.cache -def trim_audio(data, rate, start_sec=None, end_sec=None): - start, end = int(start_sec * rate), int(end_sec * rate) - if data.ndim == 1: # mono - return data[start:end] - elif data.ndim == 2: # stereo - return data[:, start:end] - -title = 'Audio noise reduction' -st.set_page_config(page_title=title, page_icon=":sound:") -st.title(title) - -uploaded_file = st.file_uploader("Upload your audio file (.wav)") - -is_file_uploaded = uploaded_file is not None -if not is_file_uploaded: - uploaded_file = 'sample.wav' - -wav, sr = librosa.load(uploaded_file, sr=None) -wav_seconds = int(len(wav)/sr) - -st.subheader('Original audio') -st.audio(uploaded_file) - -st.subheader('Noise part') -noise_part_detection_method = st.radio('Noise source detection', ['Manually', 'Automatically (using speech activity detections)']) -if noise_part_detection_method == "Manually": # ノイズ区間は1箇所 - default_ranges = (0.0, float(wav_seconds)) if is_file_uploaded else (73.0, float(wav_seconds)) - noise_part_ranges = [st.slider("Select a part of the noise (sec)", 0.0, float(wav_seconds), default_ranges, step=0.1)] - noise_wav = trim_audio(wav, sr, noise_part_ranges[0][0], noise_part_ranges[0][1]) - -elif noise_part_detection_method == "Automatically (using speech activity detections)": # ノイズ区間が複数 - with st.spinner('Please wait for Detecting the speech activities'): - sad = speech_activity_detection_model() - noise_wav, noise_part_ranges = trim_noise_part_from_speech(sad, uploaded_file, wav, sr) - -fig = go.Figure() -x_wav = np.arange(len(wav)) / sr -fig.add_trace(go.Scatter(y=wav[::1000])) -for noise_part_range in noise_part_ranges: - fig.add_vrect(x0=int(noise_part_range[0]*sr/1000), x1=int(noise_part_range[1]*sr/1000), fillcolor="Red", opacity=0.2) -fig.update_layout(width=700, margin=dict(l=0, r=0, t=0, b=0, pad=0)) -fig.update_yaxes(visible=False, ticklabelposition='inside', tickwidth=0) -st.plotly_chart(fig, use_container_with=True) - -st.text('Noise audio') -sf.write('noise_clip.wav', noise_wav, sr) -noise_wav, sr = librosa.load('noise_clip.wav', sr=None) -st.audio('noise_clip.wav') - -if st.button('Denoise the audio!'): - with st.spinner('Please wait for completion'): - nr_wav = nr.reduce_noise(audio_clip=wav, noise_clip=noise_wav, prop_decrease=1.0) - - st.subheader('Denoised audio') - sf.write('nr_clip.wav', nr_wav, sr) - st.success('Done!') - st.text('Denoised audio') - st.audio('nr_clip.wav') diff --git a/spaces/shibinashraf36/drugrecommendationsystem/README.md b/spaces/shibinashraf36/drugrecommendationsystem/README.md deleted file mode 100644 index b2fba317547f62546914d56c9ba6a10187a8f0a8..0000000000000000000000000000000000000000 --- a/spaces/shibinashraf36/drugrecommendationsystem/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Drugrecommendationsystem -emoji: 🔥 -colorFrom: yellow -colorTo: yellow -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: shibinashraf/drugrecommendationsystem ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/shibing624/ChatPDF/modules/utils.py b/spaces/shibing624/ChatPDF/modules/utils.py deleted file mode 100644 index 6f93d1f71d41837cda6b4cbe7c3c6bbc18fa2109..0000000000000000000000000000000000000000 --- a/spaces/shibing624/ChatPDF/modules/utils.py +++ /dev/null @@ -1,656 +0,0 @@ -# -*- coding:utf-8 -*- -from __future__ import annotations -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type -import logging -import json -import os -import datetime -import hashlib -import csv -import requests -import re -import html -import sys -import subprocess - -import gradio as gr -from pypinyin import lazy_pinyin -import tiktoken -from markdown import markdown -from pygments import highlight -from pygments.lexers import get_lexer_by_name -from pygments.formatters import HtmlFormatter -import pandas as pd - -from modules.presets import * -from . import shared -from modules.config import retrieve_proxy, hide_history_when_not_logged_in - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: List[str] - data: List[List[str | int | bool]] - -def predict(current_model, *args): - iter = current_model.predict(*args) - for i in iter: - yield i - -def billing_info(current_model): - return current_model.billing_info() - -def set_key(current_model, *args): - return current_model.set_key(*args) - -def load_chat_history(current_model, *args): - return current_model.load_chat_history(*args) - -def interrupt(current_model, *args): - return current_model.interrupt(*args) - -def reset(current_model, *args): - return current_model.reset(*args) - -def retry(current_model, *args): - iter = current_model.retry(*args) - for i in iter: - yield i - -def delete_first_conversation(current_model, *args): - return current_model.delete_first_conversation(*args) - -def delete_last_conversation(current_model, *args): - return current_model.delete_last_conversation(*args) - -def set_system_prompt(current_model, *args): - return current_model.set_system_prompt(*args) - -def save_chat_history(current_model, *args): - return current_model.save_chat_history(*args) - -def export_markdown(current_model, *args): - return current_model.export_markdown(*args) - -def load_chat_history(current_model, *args): - return current_model.load_chat_history(*args) - -def upload_chat_history(current_model, *args): - return current_model.load_chat_history(*args) - -def set_token_upper_limit(current_model, *args): - return current_model.set_token_upper_limit(*args) - -def set_temperature(current_model, *args): - current_model.set_temperature(*args) - -def set_top_p(current_model, *args): - current_model.set_top_p(*args) - -def set_n_choices(current_model, *args): - current_model.set_n_choices(*args) - -def set_stop_sequence(current_model, *args): - current_model.set_stop_sequence(*args) - -def set_max_tokens(current_model, *args): - current_model.set_max_tokens(*args) - -def set_presence_penalty(current_model, *args): - current_model.set_presence_penalty(*args) - -def set_frequency_penalty(current_model, *args): - current_model.set_frequency_penalty(*args) - -def set_logit_bias(current_model, *args): - current_model.set_logit_bias(*args) - -def set_user_identifier(current_model, *args): - current_model.set_user_identifier(*args) - -def set_single_turn(current_model, *args): - current_model.set_single_turn(*args) - -def handle_file_upload(current_model, *args): - return current_model.handle_file_upload(*args) - -def handle_summarize_index(current_model, *args): - return current_model.summarize_index(*args) - -def like(current_model, *args): - return current_model.like(*args) - -def dislike(current_model, *args): - return current_model.dislike(*args) - - -def count_token(message): - encoding = tiktoken.get_encoding("cl100k_base") - input_str = f"role: {message['role']}, content: {message['content']}" - length = len(encoding.encode(input_str)) - return length - - -def markdown_to_html_with_syntax_highlight(md_str): # deprecated - def replacer(match): - lang = match.group(1) or "text" - code = match.group(2) - - try: - lexer = get_lexer_by_name(lang, stripall=True) - except ValueError: - lexer = get_lexer_by_name("text", stripall=True) - - formatter = HtmlFormatter() - highlighted_code = highlight(code, lexer, formatter) - - return f'
              {highlighted_code}
              ' - - code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```" - md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE) - - html_str = markdown(md_str) - return html_str - - -def normalize_markdown(md_text: str) -> str: # deprecated - lines = md_text.split("\n") - normalized_lines = [] - inside_list = False - - for i, line in enumerate(lines): - if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()): - if not inside_list and i > 0 and lines[i - 1].strip() != "": - normalized_lines.append("") - inside_list = True - normalized_lines.append(line) - elif inside_list and line.strip() == "": - if i < len(lines) - 1 and not re.match( - r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip() - ): - normalized_lines.append(line) - continue - else: - inside_list = False - normalized_lines.append(line) - - return "\n".join(normalized_lines) - - -def convert_mdtext(md_text): # deprecated - code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL) - inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL) - code_blocks = code_block_pattern.findall(md_text) - non_code_parts = code_block_pattern.split(md_text)[::2] - - result = [] - raw = f'
              {html.escape(md_text)}
              ' - for non_code, code in zip(non_code_parts, code_blocks + [""]): - if non_code.strip(): - non_code = normalize_markdown(non_code) - result.append(markdown(non_code, extensions=["tables"])) - if code.strip(): - # _, code = detect_language(code) # 暂时去除代码高亮功能,因为在大段代码的情况下会出现问题 - # code = code.replace("\n\n", "\n") # 暂时去除代码中的空行,因为在大段代码的情况下会出现问题 - code = f"\n```{code}\n\n```" - code = markdown_to_html_with_syntax_highlight(code) - result.append(code) - result = "".join(result) - output = f'
              {result}
              ' - output += raw - output += ALREADY_CONVERTED_MARK - return output - -def convert_bot_before_marked(chat_message): - """ - 注意不能给输出加缩进, 否则会被marked解析成代码块 - """ - if '
              ' in chat_message: - return chat_message - else: - code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL) - code_blocks = code_block_pattern.findall(chat_message) - non_code_parts = code_block_pattern.split(chat_message)[::2] - result = [] - - raw = f'
              {escape_markdown(chat_message)}
              ' - for non_code, code in zip(non_code_parts, code_blocks + [""]): - if non_code.strip(): - result.append(non_code) - if code.strip(): - code = f"\n```{code}\n```" - result.append(code) - result = "".join(result) - md = f'
              {result}\n
              ' - return raw + md - -def convert_user_before_marked(chat_message): - if '
              ' in chat_message: - return chat_message - else: - return f'
              {escape_markdown(chat_message)}
              ' - -def escape_markdown(text): - """ - Escape Markdown special characters to HTML-safe equivalents. - """ - escape_chars = { - ' ': ' ', - '_': '_', - '*': '*', - '[': '[', - ']': ']', - '(': '(', - ')': ')', - '{': '{', - '}': '}', - '#': '#', - '+': '+', - '-': '-', - '.': '.', - '!': '!', - '`': '`', - '>': '>', - '<': '<', - '|': '|' - } - return ''.join(escape_chars.get(c, c) for c in text) - - -def convert_asis(userinput): # deprecated - return ( - f'

              {html.escape(userinput)}

              ' - + ALREADY_CONVERTED_MARK - ) - - -def detect_converted_mark(userinput): # deprecated - try: - if userinput.endswith(ALREADY_CONVERTED_MARK): - return True - else: - return False - except: - return True - - -def detect_language(code): # deprecated - if code.startswith("\n"): - first_line = "" - else: - first_line = code.strip().split("\n", 1)[0] - language = first_line.lower() if first_line else "" - code_without_language = code[len(first_line) :].lstrip() if first_line else code - return language, code_without_language - - -def construct_text(role, text): - return {"role": role, "content": text} - - -def construct_user(text): - return construct_text("user", text) - - -def construct_system(text): - return construct_text("system", text) - - -def construct_assistant(text): - return construct_text("assistant", text) - - -def save_file(filename, system, history, chatbot, user_name): - logging.debug(f"{user_name} 保存对话历史中……") - os.makedirs(os.path.join(HISTORY_DIR, user_name), exist_ok=True) - if filename.endswith(".json"): - json_s = {"system": system, "history": history, "chatbot": chatbot} - if "/" in filename or "\\" in filename: - history_file_path = filename - else: - history_file_path = os.path.join(HISTORY_DIR, user_name, filename) - with open(history_file_path, "w", encoding='utf-8') as f: - json.dump(json_s, f, ensure_ascii=False) - elif filename.endswith(".md"): - md_s = f"system: \n- {system} \n" - for data in history: - md_s += f"\n{data['role']}: \n- {data['content']} \n" - with open(os.path.join(HISTORY_DIR, user_name, filename), "w", encoding="utf8") as f: - f.write(md_s) - logging.debug(f"{user_name} 保存对话历史完毕") - return os.path.join(HISTORY_DIR, user_name, filename) - - -def sorted_by_pinyin(list): - return sorted(list, key=lambda char: lazy_pinyin(char)[0][0]) - - -def get_file_names(dir, plain=False, filetypes=[".json"]): - logging.debug(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}") - files = [] - try: - for type in filetypes: - files += [f for f in os.listdir(dir) if f.endswith(type)] - except FileNotFoundError: - files = [] - files = sorted_by_pinyin(files) - if files == []: - files = [""] - logging.debug(f"files are:{files}") - if plain: - return files - else: - return gr.Dropdown.update(choices=files) - - -def get_history_names(plain=False, user_name=""): - logging.debug(f"从用户 {user_name} 中获取历史记录文件名列表") - if user_name == "" and hide_history_when_not_logged_in: - return "" - else: - return get_file_names(os.path.join(HISTORY_DIR, user_name), plain) - - -def load_template(filename, mode=0): - logging.debug(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)") - lines = [] - if filename.endswith(".json"): - with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f: - lines = json.load(f) - lines = [[i["act"], i["prompt"]] for i in lines] - else: - with open( - os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8" - ) as csvfile: - reader = csv.reader(csvfile) - lines = list(reader) - lines = lines[1:] - if mode == 1: - return sorted_by_pinyin([row[0] for row in lines]) - elif mode == 2: - return {row[0]: row[1] for row in lines} - else: - choices = sorted_by_pinyin([row[0] for row in lines]) - return {row[0]: row[1] for row in lines}, gr.Dropdown.update( - choices=choices - ) - - -def get_template_names(plain=False): - logging.debug("获取模板文件名列表") - return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"]) - - -def get_template_content(templates, selection, original_system_prompt): - logging.debug(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}") - try: - return templates[selection] - except: - return original_system_prompt - - -def reset_textbox(): - logging.debug("重置文本框") - return gr.update(value="") - - -def reset_default(): - default_host = shared.state.reset_api_host() - retrieve_proxy("") - return gr.update(value=default_host), gr.update(value=""), "API-Host 和代理已重置" - - -def change_api_host(host): - shared.state.set_api_host(host) - msg = f"API-Host更改为了{host}" - logging.info(msg) - return msg - - -def change_proxy(proxy): - retrieve_proxy(proxy) - os.environ["HTTPS_PROXY"] = proxy - msg = f"代理更改为了{proxy}" - logging.info(msg) - return msg - - -def hide_middle_chars(s): - if s is None: - return "" - if len(s) <= 8: - return s - else: - head = s[:4] - tail = s[-4:] - hidden = "*" * (len(s) - 8) - return head + hidden + tail - - -def submit_key(key): - key = key.strip() - msg = f"API密钥更改为了{hide_middle_chars(key)}" - logging.info(msg) - return key, msg - - -def replace_today(prompt): - today = datetime.datetime.today().strftime("%Y-%m-%d") - return prompt.replace("{current_date}", today) - - -def get_geoip(): - try: - with retrieve_proxy(): - response = requests.get("https://ipapi.co/json/", timeout=5) - data = response.json() - except: - data = {"error": True, "reason": "连接ipapi失败"} - if "error" in data.keys(): - logging.warning(f"无法获取IP地址信息。\n{data}") - if data["reason"] == "RateLimited": - return ( - i18n("您的IP区域:未知。") - ) - else: - return i18n("获取IP地理位置失败。原因:") + f"{data['reason']}" + i18n("。你仍然可以使用聊天功能。") - else: - country = data["country_name"] - if country == "China": - text = "**您的IP区域:中国。请立即检查代理设置,在不受支持的地区使用API可能导致账号被封禁。**" - else: - text = i18n("您的IP区域:") + f"{country}。" - logging.info(text) - return text - - -def find_n(lst, max_num): - n = len(lst) - total = sum(lst) - - if total < max_num: - return n - - for i in range(len(lst)): - if total - lst[i] < max_num: - return n - i - 1 - total = total - lst[i] - return 1 - - -def start_outputing(): - logging.debug("显示取消按钮,隐藏发送按钮") - return gr.Button.update(visible=False), gr.Button.update(visible=True) - - -def end_outputing(): - return ( - gr.Button.update(visible=True), - gr.Button.update(visible=False), - ) - - -def cancel_outputing(): - logging.info("中止输出……") - shared.state.interrupt() - - -def transfer_input(inputs): - # 一次性返回,降低延迟 - textbox = reset_textbox() - outputing = start_outputing() - return ( - inputs, - gr.update(value=""), - gr.Button.update(visible=False), - gr.Button.update(visible=True), - ) - - - -def run(command, desc=None, errdesc=None, custom_env=None, live=False): - if desc is not None: - print(desc) - if live: - result = subprocess.run(command, shell=True, env=os.environ if custom_env is None else custom_env) - if result.returncode != 0: - raise RuntimeError(f"""{errdesc or 'Error running command'}. - Command: {command} - Error code: {result.returncode}""") - - return "" - result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env) - if result.returncode != 0: - message = f"""{errdesc or 'Error running command'}. - Command: {command} - Error code: {result.returncode} - stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else ''} - stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else ''} - """ - raise RuntimeError(message) - return result.stdout.decode(encoding="utf8", errors="ignore") - -def versions_html(): - git = os.environ.get('GIT', "git") - python_version = ".".join([str(x) for x in sys.version_info[0:3]]) - try: - commit_hash = run(f"{git} rev-parse HEAD").strip() - except Exception: - commit_hash = "" - if commit_hash != "": - short_commit = commit_hash[0:7] - commit_info = f"{short_commit}" - else: - commit_info = "unknown \U0001F615" - return f""" - Python: {python_version} -  •  - Gradio: {gr.__version__} -  •  - ChuanhuChat: {commit_info} - """ - -def get_html(filename): - path = os.path.join(shared.chuanhu_path, "assets", "html", filename) - if os.path.exists(path): - with open(path, encoding="utf8") as file: - return file.read() - return "" - -def add_source_numbers(lst, source_name = "Source", use_source = True): - if use_source: - return [f'[{idx+1}]\t "{item[0]}"\n{source_name}: {item[1]}' for idx, item in enumerate(lst)] - else: - return [f'[{idx+1}]\t "{item}"' for idx, item in enumerate(lst)] - -def add_details(lst): - nodes = [] - for index, txt in enumerate(lst): - brief = txt[:25].replace("\n", "") - nodes.append( - f"
              {brief}...

              {txt}

              " - ) - return nodes - - -def sheet_to_string(sheet, sheet_name = None): - result = [] - for index, row in sheet.iterrows(): - row_string = "" - for column in sheet.columns: - row_string += f"{column}: {row[column]}, " - row_string = row_string.rstrip(", ") - row_string += "." - result.append(row_string) - return result - -def excel_to_string(file_path): - # 读取Excel文件中的所有工作表 - excel_file = pd.read_excel(file_path, engine='openpyxl', sheet_name=None) - - # 初始化结果字符串 - result = [] - - # 遍历每一个工作表 - for sheet_name, sheet_data in excel_file.items(): - - # 处理当前工作表并添加到结果字符串 - result += sheet_to_string(sheet_data, sheet_name=sheet_name) - - - return result - -def get_last_day_of_month(any_day): - # The day 28 exists in every month. 4 days later, it's always next month - next_month = any_day.replace(day=28) + datetime.timedelta(days=4) - # subtracting the number of the current day brings us back one month - return next_month - datetime.timedelta(days=next_month.day) - -def get_model_source(model_name, alternative_source): - if model_name == "gpt2-medium": - return "https://huggingface.co/gpt2-medium" - -def refresh_ui_elements_on_load(current_model, selected_model_name, user_name): - current_model.set_user_identifier(user_name) - return toggle_like_btn_visibility(selected_model_name), *current_model.auto_load() - -def toggle_like_btn_visibility(selected_model_name): - if selected_model_name == "xmchat": - return gr.update(visible=True) - else: - return gr.update(visible=False) - -def new_auto_history_filename(dirname): - latest_file = get_latest_filepath(dirname) - if latest_file: - with open(os.path.join(dirname, latest_file), 'r', encoding="utf-8") as f: - if len(f.read()) == 0: - return latest_file - now = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') - return f'{now}.json' - -def get_latest_filepath(dirname): - pattern = re.compile(r'\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}') - latest_time = None - latest_file = None - for filename in os.listdir(dirname): - if os.path.isfile(os.path.join(dirname, filename)): - match = pattern.search(filename) - if match and match.group(0) == filename[:19]: - time_str = filename[:19] - filetime = datetime.datetime.strptime(time_str, '%Y-%m-%d_%H-%M-%S') - if not latest_time or filetime > latest_time: - latest_time = filetime - latest_file = filename - return latest_file - -def get_history_filepath(username): - dirname = os.path.join(HISTORY_DIR, username) - os.makedirs(dirname, exist_ok=True) - latest_file = get_latest_filepath(dirname) - if not latest_file: - latest_file = new_auto_history_filename(dirname) - - latest_file = os.path.join(dirname, latest_file) - return latest_file diff --git a/spaces/sidharthism/fashion-eye/netdissect/tool/allunitsample.py b/spaces/sidharthism/fashion-eye/netdissect/tool/allunitsample.py deleted file mode 100644 index 9f86e196ce63ebfcad1fcee8bd2b7358463ff3d1..0000000000000000000000000000000000000000 --- a/spaces/sidharthism/fashion-eye/netdissect/tool/allunitsample.py +++ /dev/null @@ -1,199 +0,0 @@ -''' -A simple tool to generate sample of output of a GAN, -subject to filtering, sorting, or intervention. -''' - -import torch, numpy, os, argparse, sys, shutil, errno, numbers -from PIL import Image -from torch.utils.data import TensorDataset -from netdissect.zdataset import standard_z_sample -from netdissect.progress import default_progress, verbose_progress -from netdissect.autoeval import autoimport_eval -from netdissect.workerpool import WorkerBase, WorkerPool -from netdissect.nethook import retain_layers -from netdissect.runningstats import RunningTopK - -def main(): - parser = argparse.ArgumentParser(description='GAN sample making utility') - parser.add_argument('--model', type=str, default=None, - help='constructor for the model to test') - parser.add_argument('--pthfile', type=str, default=None, - help='filename of .pth file for the model') - parser.add_argument('--outdir', type=str, default='images', - help='directory for image output') - parser.add_argument('--size', type=int, default=100, - help='number of images to output') - parser.add_argument('--test_size', type=int, default=None, - help='number of images to test') - parser.add_argument('--layer', type=str, default=None, - help='layer to inspect') - parser.add_argument('--seed', type=int, default=1, - help='seed') - parser.add_argument('--quiet', action='store_true', default=False, - help='silences console output') - if len(sys.argv) == 1: - parser.print_usage(sys.stderr) - sys.exit(1) - args = parser.parse_args() - verbose_progress(not args.quiet) - - # Instantiate the model - model = autoimport_eval(args.model) - if args.pthfile is not None: - data = torch.load(args.pthfile) - if 'state_dict' in data: - meta = {} - for key in data: - if isinstance(data[key], numbers.Number): - meta[key] = data[key] - data = data['state_dict'] - model.load_state_dict(data) - # Unwrap any DataParallel-wrapped model - if isinstance(model, torch.nn.DataParallel): - model = next(model.children()) - # Examine first conv in model to determine input feature size. - first_layer = [c for c in model.modules() - if isinstance(c, (torch.nn.Conv2d, torch.nn.ConvTranspose2d, - torch.nn.Linear))][0] - # 4d input if convolutional, 2d input if first layer is linear. - if isinstance(first_layer, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)): - z_channels = first_layer.in_channels - spatialdims = (1, 1) - else: - z_channels = first_layer.in_features - spatialdims = () - # Instrument the model - retain_layers(model, [args.layer]) - model.cuda() - - if args.test_size is None: - args.test_size = args.size * 20 - z_universe = standard_z_sample(args.test_size, z_channels, - seed=args.seed) - z_universe = z_universe.view(tuple(z_universe.shape) + spatialdims) - indexes = get_all_highest_znums( - model, z_universe, args.size, seed=args.seed) - save_chosen_unit_images(args.outdir, model, z_universe, indexes, - lightbox=True) - - -def get_all_highest_znums(model, z_universe, size, - batch_size=10, seed=1): - # The model should have been instrumented already - retained_items = list(model.retained.items()) - assert len(retained_items) == 1 - layer = retained_items[0][0] - # By default, a 10% sample - progress = default_progress() - num_units = None - with torch.no_grad(): - # Pass 1: collect max activation stats - z_loader = torch.utils.data.DataLoader(TensorDataset(z_universe), - batch_size=batch_size, num_workers=2, - pin_memory=True) - rtk = RunningTopK(k=size) - for [z] in progress(z_loader, desc='Finding max activations'): - z = z.cuda() - model(z) - feature = model.retained[layer] - num_units = feature.shape[1] - max_feature = feature.view( - feature.shape[0], num_units, -1).max(2)[0] - rtk.add(max_feature) - td, ti = rtk.result() - highest = ti.sort(1)[0] - return highest - -def save_chosen_unit_images(dirname, model, z_universe, indices, - shared_dir="shared_images", - unitdir_template="unit_{}", - name_template="image_{}.jpg", - lightbox=False, batch_size=50, seed=1): - all_indices = torch.unique(indices.view(-1), sorted=True) - z_sample = z_universe[all_indices] - progress = default_progress() - sdir = os.path.join(dirname, shared_dir) - created_hashdirs = set() - for index in range(len(z_universe)): - hd = hashdir(index) - if hd not in created_hashdirs: - created_hashdirs.add(hd) - os.makedirs(os.path.join(sdir, hd), exist_ok=True) - with torch.no_grad(): - # Pass 2: now generate images - z_loader = torch.utils.data.DataLoader(TensorDataset(z_sample), - batch_size=batch_size, num_workers=2, - pin_memory=True) - saver = WorkerPool(SaveImageWorker) - for batch_num, [z] in enumerate(progress(z_loader, - desc='Saving images')): - z = z.cuda() - start_index = batch_num * batch_size - im = ((model(z) + 1) / 2 * 255).clamp(0, 255).byte().permute( - 0, 2, 3, 1).cpu() - for i in range(len(im)): - index = all_indices[i + start_index].item() - filename = os.path.join(sdir, hashdir(index), - name_template.format(index)) - saver.add(im[i].numpy(), filename) - saver.join() - linker = WorkerPool(MakeLinkWorker) - for u in progress(range(len(indices)), desc='Making links'): - udir = os.path.join(dirname, unitdir_template.format(u)) - os.makedirs(udir, exist_ok=True) - for r in range(indices.shape[1]): - index = indices[u,r].item() - fn = name_template.format(index) - # sourcename = os.path.join('..', shared_dir, fn) - sourcename = os.path.join(sdir, hashdir(index), fn) - targname = os.path.join(udir, fn) - linker.add(sourcename, targname) - if lightbox: - copy_lightbox_to(udir) - linker.join() - -def copy_lightbox_to(dirname): - srcdir = os.path.realpath( - os.path.join(os.getcwd(), os.path.dirname(__file__))) - shutil.copy(os.path.join(srcdir, 'lightbox.html'), - os.path.join(dirname, '+lightbox.html')) - -def hashdir(index): - # To keep the number of files the shared directory lower, split it - # into 100 subdirectories named as follows. - return '%02d' % (index % 100) - -class SaveImageWorker(WorkerBase): - # Saving images can be sped up by sending jpeg encoding and - # file-writing work to a pool. - def work(self, data, filename): - Image.fromarray(data).save(filename, optimize=True, quality=100) - -class MakeLinkWorker(WorkerBase): - # Creating symbolic links is a bit slow and can be done faster - # in parallel rather than waiting for each to be created. - def work(self, sourcename, targname): - try: - os.link(sourcename, targname) - except OSError as e: - if e.errno == errno.EEXIST: - os.remove(targname) - os.link(sourcename, targname) - else: - raise - -class MakeSyminkWorker(WorkerBase): - # Creating symbolic links is a bit slow and can be done faster - # in parallel rather than waiting for each to be created. - def work(self, sourcename, targname): - try: - os.symlink(sourcename, targname) - except OSError as e: - if e.errno == errno.EEXIST: - os.remove(targname) - os.symlink(sourcename, targname) - else: - raise - -if __name__ == '__main__': - main() diff --git a/spaces/sino72/Passenger_Reconization/deep_sort/deep_sort/deep/test.py b/spaces/sino72/Passenger_Reconization/deep_sort/deep_sort/deep/test.py deleted file mode 100644 index ebd590336f7b17c44738c4c15458f02f33f08017..0000000000000000000000000000000000000000 --- a/spaces/sino72/Passenger_Reconization/deep_sort/deep_sort/deep/test.py +++ /dev/null @@ -1,77 +0,0 @@ -import torch -import torch.backends.cudnn as cudnn -import torchvision - -import argparse -import os - -from model import Net - -parser = argparse.ArgumentParser(description="Train on market1501") -parser.add_argument("--data-dir",default='data',type=str) -parser.add_argument("--no-cuda",action="store_true") -parser.add_argument("--gpu-id",default=0,type=int) -args = parser.parse_args() - -# device -device = "cuda:{}".format(args.gpu_id) if torch.cuda.is_available() and not args.no_cuda else "cpu" -if torch.cuda.is_available() and not args.no_cuda: - cudnn.benchmark = True - -# data loader -root = args.data_dir -query_dir = os.path.join(root,"query") -gallery_dir = os.path.join(root,"gallery") -transform = torchvision.transforms.Compose([ - torchvision.transforms.Resize((128,64)), - torchvision.transforms.ToTensor(), - torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) -]) -queryloader = torch.utils.data.DataLoader( - torchvision.datasets.ImageFolder(query_dir, transform=transform), - batch_size=64, shuffle=False -) -galleryloader = torch.utils.data.DataLoader( - torchvision.datasets.ImageFolder(gallery_dir, transform=transform), - batch_size=64, shuffle=False -) - -# net definition -net = Net(reid=True) -assert os.path.isfile("./checkpoint/ckpt.t7"), "Error: no checkpoint file found!" -print('Loading from checkpoint/ckpt.t7') -checkpoint = torch.load("./checkpoint/ckpt.t7") -net_dict = checkpoint['net_dict'] -net.load_state_dict(net_dict, strict=False) -net.eval() -net.to(device) - -# compute features -query_features = torch.tensor([]).float() -query_labels = torch.tensor([]).long() -gallery_features = torch.tensor([]).float() -gallery_labels = torch.tensor([]).long() - -with torch.no_grad(): - for idx,(inputs,labels) in enumerate(queryloader): - inputs = inputs.to(device) - features = net(inputs).cpu() - query_features = torch.cat((query_features, features), dim=0) - query_labels = torch.cat((query_labels, labels)) - - for idx,(inputs,labels) in enumerate(galleryloader): - inputs = inputs.to(device) - features = net(inputs).cpu() - gallery_features = torch.cat((gallery_features, features), dim=0) - gallery_labels = torch.cat((gallery_labels, labels)) - -gallery_labels -= 2 - -# save features -features = { - "qf": query_features, - "ql": query_labels, - "gf": gallery_features, - "gl": gallery_labels -} -torch.save(features,"features.pth") \ No newline at end of file diff --git a/spaces/skf15963/summary/fengshen/examples/qa_t5/run_finetune.sh b/spaces/skf15963/summary/fengshen/examples/qa_t5/run_finetune.sh deleted file mode 100644 index 4e8e1f4b0fe07a8d2807e44d55a1f22cb2ef6439..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/examples/qa_t5/run_finetune.sh +++ /dev/null @@ -1,109 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=finetune-cmrc -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=1 -#SBATCH --gres=gpu:1 # number of gpus -#SBATCH --cpus-per-task=4 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH -o $YOUR_PROJECT_DIR/%x-%j.log -#SBATCH -e $YOUR_PROJECT_DIR/%x-%j.err - -set -x -e - -echo "START TIME: $(date)" -MICRO_BATCH_SIZE=8 - -ROOT_DIR=$YOUR_PROJECT_DIR -DOWNLOAD_MODEL_PATH=$YOUR_PROJECT_DIR/Randeng-T5-784M-QA-Chinese/ - - -if [ ! -d ${ROOT_DIR} ];then - mkdir ${ROOT_DIR} - echo ${ROOT_DIR} created!!!!!!!!!!!!!! -else - echo ${ROOT_DIR} exist!!!!!!!!!!!!!!! -fi - -ZERO_STAGE=1 - -config_json="$ROOT_DIR/ds_config.randeng_t5_dialog_784M.$SLURM_JOBID.json" -export MASTER_PORT=$[RANDOM%10000+30000] - -cat < $config_json -{ - "train_micro_batch_size_per_gpu": ${MICRO_BATCH_SIZE}, - "steps_per_print": 100, - "gradient_clipping": 1.0, - "zero_optimization": { - "stage": $ZERO_STAGE, - "contiguous_gradients": false, - "overlap_comm": true, - "reduce_scatter": true, - "reduce_bucket_size": 50000000, - "allgather_bucket_size": 500000000 - }, -} -EOT - -export PL_DEEPSPEED_CONFIG_PATH=$config_json -export TORCH_EXTENSIONS_DIR=$YOUR_HOME/tmp/torch_extendsions -# strategy=ddp -strategy=deepspeed_stage_1 - -TRAINER_ARGS=" - --max_epochs 10 \ - --gpus 1 \ - --num_nodes 1 \ - --strategy ${strategy} \ - --default_root_dir $ROOT_DIR \ - --save_ckpt_path $ROOT_DIR/ckpt \ - --save_top_k 5 \ - --every_n_train_steps 100\ - --monitor val_rougeL_fmeasure \ - --mode max \ - --save_last \ - --check_val_every_n_epoch 1 \ - --num_workers 4 \ - --dataloader_workers 4 \ - --replace_sampler_ddp False \ - --accumulate_grad_batches 2 \ - --formator t5style \ - --filename model-{epoch:02d}-{val_loss:.4f}-{val_rougeL_fmeasure:.3f} \ - --precision 16 \ -" - -TRAIN_DATA_PATH=$YOUR_TRAIN_FILE -DEV_DATA_PATH=$YOUR_DEV_FILE - -DATA_ARGS=" - --train_batchsize $MICRO_BATCH_SIZE \ - --val_batchsize $MICRO_BATCH_SIZE \ - --train_file $TRAIN_DATA_PATH \ - --val_file $DEV_DATA_PATH \ - --max_seq_length 512 \ - --max_knowledge_length 425 \ - --max_target_length 128 -" - -MODEL_ARGS=" - --pretrained_model_path $DOWNLOAD_MODEL_PATH \ - --tokenizer_type t5_tokenizer \ - --learning_rate 1e-4 \ - --weight_decay 1e-2 \ - --warmup_ratio 0.1 \ - --sheduler_type polynomial \ - --min_learning_rate 1e-5 \ -" - -SCRIPTS_PATH=$YOUR_PROJECT_DIR/Fengshenbang-LM/fengshen/examples/qa_t5/finetune_t5_cmrc.py - -export CMD=" \ - $SCRIPTS_PATH \ - $TRAINER_ARGS \ - $MODEL_ARGS \ - $DATA_ARGS \ - " - -echo $CMD -# conda activate fs -# export CUDA_VISIBLE_DEVICES=5 -srun python $CMD diff --git a/spaces/skytnt/moe-tts/README.md b/spaces/skytnt/moe-tts/README.md deleted file mode 100644 index 54e777a6e6be88cc07986cd129e8dbf8ab382942..0000000000000000000000000000000000000000 --- a/spaces/skytnt/moe-tts/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Moe TTS -emoji: 😊🎙️ -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/spark-nlp/SparkNLP_NER/SessionState.py b/spaces/spark-nlp/SparkNLP_NER/SessionState.py deleted file mode 100644 index 48217b0f160349a91b9b3a0b50238a8dc851bb06..0000000000000000000000000000000000000000 --- a/spaces/spark-nlp/SparkNLP_NER/SessionState.py +++ /dev/null @@ -1,117 +0,0 @@ -"""Hack to add per-session state to Streamlit. - -Usage ------ - ->>> import SessionState ->>> ->>> session_state = SessionState.get(user_name='', favorite_color='black') ->>> session_state.user_name -'' ->>> session_state.user_name = 'Mary' ->>> session_state.favorite_color -'black' - -Since you set user_name above, next time your script runs this will be the -result: ->>> session_state = get(user_name='', favorite_color='black') ->>> session_state.user_name -'Mary' - -""" -try: - import streamlit.ReportThread as ReportThread - from streamlit.server.Server import Server -except Exception: - # Streamlit >= 0.65.0 - import streamlit.report_thread as ReportThread - from streamlit.server.server import Server - - -class SessionState(object): - def __init__(self, **kwargs): - """A new SessionState object. - - Parameters - ---------- - **kwargs : any - Default values for the session state. - - Example - ------- - >>> session_state = SessionState(user_name='', favorite_color='black') - >>> session_state.user_name = 'Mary' - '' - >>> session_state.favorite_color - 'black' - - """ - for key, val in kwargs.items(): - setattr(self, key, val) - - -def get(**kwargs): - """Gets a SessionState object for the current session. - - Creates a new object if necessary. - - Parameters - ---------- - **kwargs : any - Default values you want to add to the session state, if we're creating a - new one. - - Example - ------- - >>> session_state = get(user_name='', favorite_color='black') - >>> session_state.user_name - '' - >>> session_state.user_name = 'Mary' - >>> session_state.favorite_color - 'black' - - Since you set user_name above, next time your script runs this will be the - result: - >>> session_state = get(user_name='', favorite_color='black') - >>> session_state.user_name - 'Mary' - - """ - # Hack to get the session object from Streamlit. - - ctx = ReportThread.get_report_ctx() - - this_session = None - - current_server = Server.get_current() - if hasattr(current_server, '_session_infos'): - # Streamlit < 0.56 - session_infos = Server.get_current()._session_infos.values() - else: - session_infos = Server.get_current()._session_info_by_id.values() - - for session_info in session_infos: - s = session_info.session - if ( - # Streamlit < 0.54.0 - (hasattr(s, '_main_dg') and s._main_dg == ctx.main_dg) - or - # Streamlit >= 0.54.0 - (not hasattr(s, '_main_dg') and s.enqueue == ctx.enqueue) - or - # Streamlit >= 0.65.2 - (not hasattr(s, '_main_dg') and s._uploaded_file_mgr == ctx.uploaded_file_mgr) - ): - this_session = s - - if this_session is None: - raise RuntimeError( - "Oh noes. Couldn't get your Streamlit Session object. " - 'Are you doing something fancy with threads?') - - # Got the session object! Now let's attach some state into it. - - if not hasattr(this_session, '_custom_session_state'): - this_session._custom_session_state = SessionState(**kwargs) - - return this_session._custom_session_state diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/models/nat/cmlm_transformer.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/models/nat/cmlm_transformer.py deleted file mode 100644 index c876e9453c101c00bd8e93e6e6f1fb48dc26f993..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/models/nat/cmlm_transformer.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -""" -This file implements: -Ghazvininejad, Marjan, et al. -"Constant-time machine translation with conditional masked language models." -arXiv preprint arXiv:1904.09324 (2019). -""" - -from fairseq.models import register_model, register_model_architecture -from fairseq.models.nat import NATransformerModel -from fairseq.utils import new_arange - - -def _skeptical_unmasking(output_scores, output_masks, p): - sorted_index = output_scores.sort(-1)[1] - boundary_len = ( - (output_masks.sum(1, keepdim=True).type_as(output_scores) - 2) * p - ).long() - skeptical_mask = new_arange(output_masks) < boundary_len - return skeptical_mask.scatter(1, sorted_index, skeptical_mask) - - -@register_model("cmlm_transformer") -class CMLMNATransformerModel(NATransformerModel): - @staticmethod - def add_args(parser): - NATransformerModel.add_args(parser) - - def forward( - self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs - ): - assert not self.decoder.src_embedding_copy, "do not support embedding copy." - - # encoding - encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) - # length prediction - length_out = self.decoder.forward_length( - normalize=False, encoder_out=encoder_out - ) - length_tgt = self.decoder.forward_length_prediction( - length_out, encoder_out, tgt_tokens - ) - - # decoding - word_ins_out = self.decoder( - normalize=False, - prev_output_tokens=prev_output_tokens, - encoder_out=encoder_out, - ) - word_ins_mask = prev_output_tokens.eq(self.unk) - - return { - "word_ins": { - "out": word_ins_out, - "tgt": tgt_tokens, - "mask": word_ins_mask, - "ls": self.args.label_smoothing, - "nll_loss": True, - }, - "length": { - "out": length_out, - "tgt": length_tgt, - "factor": self.decoder.length_loss_factor, - }, - } - - def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs): - - step = decoder_out.step - max_step = decoder_out.max_step - - output_tokens = decoder_out.output_tokens - output_scores = decoder_out.output_scores - history = decoder_out.history - - # execute the decoder - output_masks = output_tokens.eq(self.unk) - _scores, _tokens = self.decoder( - normalize=True, - prev_output_tokens=output_tokens, - encoder_out=encoder_out, - ).max(-1) - output_tokens.masked_scatter_(output_masks, _tokens[output_masks]) - output_scores.masked_scatter_(output_masks, _scores[output_masks]) - - if history is not None: - history.append(output_tokens.clone()) - - # skeptical decoding (depend on the maximum decoding steps.) - if (step + 1) < max_step: - skeptical_mask = _skeptical_unmasking( - output_scores, output_tokens.ne(self.pad), 1 - (step + 1) / max_step - ) - - output_tokens.masked_fill_(skeptical_mask, self.unk) - output_scores.masked_fill_(skeptical_mask, 0.0) - - if history is not None: - history.append(output_tokens.clone()) - - return decoder_out._replace( - output_tokens=output_tokens, - output_scores=output_scores, - attn=None, - history=history, - ) - - -@register_model_architecture("cmlm_transformer", "cmlm_transformer") -def cmlm_base_architecture(args): - args.encoder_embed_path = getattr(args, "encoder_embed_path", None) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) - args.encoder_layers = getattr(args, "encoder_layers", 6) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) - args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) - args.decoder_embed_path = getattr(args, "decoder_embed_path", None) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) - args.decoder_ffn_embed_dim = getattr( - args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim - ) - args.decoder_layers = getattr(args, "decoder_layers", 6) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) - args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) - args.attention_dropout = getattr(args, "attention_dropout", 0.0) - args.activation_dropout = getattr(args, "activation_dropout", 0.0) - args.activation_fn = getattr(args, "activation_fn", "relu") - args.dropout = getattr(args, "dropout", 0.1) - args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) - args.share_decoder_input_output_embed = getattr( - args, "share_decoder_input_output_embed", False - ) - args.share_all_embeddings = getattr(args, "share_all_embeddings", True) - args.no_token_positional_embeddings = getattr( - args, "no_token_positional_embeddings", False - ) - args.adaptive_input = getattr(args, "adaptive_input", False) - args.apply_bert_init = getattr(args, "apply_bert_init", False) - - args.decoder_output_dim = getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) - - # --- special arguments --- - args.sg_length_pred = getattr(args, "sg_length_pred", False) - args.pred_length_offset = getattr(args, "pred_length_offset", False) - args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) - args.ngram_predictor = getattr(args, "ngram_predictor", 1) - args.src_embedding_copy = getattr(args, "src_embedding_copy", False) - - -@register_model_architecture("cmlm_transformer", "cmlm_transformer_wmt_en_de") -def cmlm_wmt_en_de(args): - cmlm_base_architecture(args) diff --git a/spaces/stomexserde/gpt4-ui/Examples/Joshilay Movie Download 720p UPD.md b/spaces/stomexserde/gpt4-ui/Examples/Joshilay Movie Download 720p UPD.md deleted file mode 100644 index f8d281840a0013d7d3684905927c6cb60ac807e6..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Joshilay Movie Download 720p UPD.md +++ /dev/null @@ -1,19 +0,0 @@ -
              -Here is a possible title and article with html formatting for the keyword "Joshilay movie download 720p": - -

              How to Download Joshilay Movie in 720p Quality

              -

              Joshilay is a 1989 Hindi-language action drama film directed by Sibte Hassan Rizvi and starring Sunny Deol, Anil Kapoor, Sridevi and Meenakshi Sheshadri. The film revolves around two men who seek revenge against a dacoit and his partner who wronged them in their childhood. The film has a music score by R. D. Burman and features the last song by the legendary duo of Kishore Kumar and R. D. Burman.

              -

              If you are a fan of this classic film and want to watch it in high quality, you might be wondering how to download Joshilay movie in 720p resolution. Well, there are some ways to do that, but you need to be careful about the sources you use and the legal implications of downloading copyrighted content. Here are some tips to help you out:

              -

              Joshilay movie download 720p


              Download File --->>> https://urlgoal.com/2uI6tj



              -
                -
              • Check if the movie is available on any legal streaming platforms, such as Netflix, Amazon Prime Video, Hotstar or YouTube. You can use a VPN service to access geo-restricted content if needed. Some of these platforms allow you to download movies for offline viewing, but you might need a subscription or a rental fee to do so.
              • -
              • If the movie is not available on any legal streaming platforms, you can try searching for torrent files or magnet links that contain the movie in 720p quality. You can use a torrent client such as BitTorrent or uTorrent to download the movie file. However, be aware that downloading torrents can be risky, as they might contain viruses, malware or spyware that can harm your device or compromise your privacy. You might also face legal action from the copyright holders or your internet service provider if you download pirated content.
              • -
              • Another option is to use a third-party website that offers direct download links for movies in various resolutions and formats. You can use a search engine such as Google or Bing to find such websites, but be careful about the reliability and safety of these websites. Some of them might have broken links, low-quality files, pop-up ads or malicious software that can infect your device or steal your personal information. You might also violate the law by downloading copyrighted content from these websites.
              • -
              -

              As you can see, downloading Joshilay movie in 720p quality is not an easy task, and it involves some risks and challenges. Therefore, we recommend that you watch the movie legally on a streaming platform or buy a DVD or Blu-ray copy of the movie if available. This way, you can enjoy the movie in high quality without any hassle or worry.

              Here is a possible continuation of the article: - -

              Joshilay is a movie that has a lot of fans and admirers, especially among the lovers of action and drama genres. The movie showcases the performances of some of the finest actors of Bollywood, such as Sunny Deol, Anil Kapoor, Sridevi and Meenakshi Sheshadri. The movie also has some memorable songs and dialogues that have become iconic over the years. The movie has a cult following and is considered to be one of the best movies of the late 1980s.

              -

              If you want to watch Joshilay movie in 720p quality, you have to be careful about the sources you use and the legal implications of downloading copyrighted content. We hope that this article has given you some useful tips and information on how to download Joshilay movie in 720p quality. However, we strongly advise that you watch the movie legally on a streaming platform or buy a DVD or Blu-ray copy of the movie if available. This way, you can support the makers of the movie and enjoy the movie in high quality without any hassle or worry.

              -

              7196e7f11a
              -
              -
              \ No newline at end of file diff --git a/spaces/sub314xxl/MetaGPT/metagpt/roles/architect.py b/spaces/sub314xxl/MetaGPT/metagpt/roles/architect.py deleted file mode 100644 index 00b6cb2eb3c59345d4c4e2de626deeb4587374a7..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/metagpt/roles/architect.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/11 14:43 -@Author : alexanderwu -@File : architect.py -""" - -from metagpt.actions import WriteDesign, WritePRD -from metagpt.roles import Role - - -class Architect(Role): - """Architect: Listen to PRD, responsible for designing API, designing code files""" - def __init__(self, name="Bob", profile="Architect", goal="Design a concise, usable, complete python system", - constraints="Try to specify good open source tools as much as possible"): - super().__init__(name, profile, goal, constraints) - self._init_actions([WriteDesign]) - self._watch({WritePRD}) diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/sd_hijack_optimizations.py b/spaces/supertori/files/stable-diffusion-webui/modules/sd_hijack_optimizations.py deleted file mode 100644 index f843634ac9f56c50db3a4c9a85e49c0216b946a9..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/modules/sd_hijack_optimizations.py +++ /dev/null @@ -1,514 +0,0 @@ -import math -import sys -import traceback -import psutil - -import torch -from torch import einsum - -from ldm.util import default -from einops import rearrange - -from modules import shared, errors, devices -from modules.hypernetworks import hypernetwork - -from .sub_quadratic_attention import efficient_dot_product_attention - - -if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers: - try: - import xformers.ops - shared.xformers_available = True - except Exception: - print("Cannot import xformers", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - - -def get_available_vram(): - if shared.device.type == 'cuda': - stats = torch.cuda.memory_stats(shared.device) - mem_active = stats['active_bytes.all.current'] - mem_reserved = stats['reserved_bytes.all.current'] - mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device()) - mem_free_torch = mem_reserved - mem_active - mem_free_total = mem_free_cuda + mem_free_torch - return mem_free_total - else: - return psutil.virtual_memory().available - - -# see https://github.com/basujindal/stable-diffusion/pull/117 for discussion -def split_cross_attention_forward_v1(self, x, context=None, mask=None): - h = self.heads - - q_in = self.to_q(x) - context = default(context, x) - - context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context) - k_in = self.to_k(context_k) - v_in = self.to_v(context_v) - del context, context_k, context_v, x - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) - del q_in, k_in, v_in - - dtype = q.dtype - if shared.opts.upcast_attn: - q, k, v = q.float(), k.float(), v.float() - - with devices.without_autocast(disable=not shared.opts.upcast_attn): - r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) - for i in range(0, q.shape[0], 2): - end = i + 2 - s1 = einsum('b i d, b j d -> b i j', q[i:end], k[i:end]) - s1 *= self.scale - - s2 = s1.softmax(dim=-1) - del s1 - - r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end]) - del s2 - del q, k, v - - r1 = r1.to(dtype) - - r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) - del r1 - - return self.to_out(r2) - - -# taken from https://github.com/Doggettx/stable-diffusion and modified -def split_cross_attention_forward(self, x, context=None, mask=None): - h = self.heads - - q_in = self.to_q(x) - context = default(context, x) - - context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context) - k_in = self.to_k(context_k) - v_in = self.to_v(context_v) - - dtype = q_in.dtype - if shared.opts.upcast_attn: - q_in, k_in, v_in = q_in.float(), k_in.float(), v_in if v_in.device.type == 'mps' else v_in.float() - - with devices.without_autocast(disable=not shared.opts.upcast_attn): - k_in = k_in * self.scale - - del context, x - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) - del q_in, k_in, v_in - - r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) - - mem_free_total = get_available_vram() - - gb = 1024 ** 3 - tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() - modifier = 3 if q.element_size() == 2 else 2.5 - mem_required = tensor_size * modifier - steps = 1 - - if mem_required > mem_free_total: - steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2))) - # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB " - # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}") - - if steps > 64: - max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64 - raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). ' - f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free') - - slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] - for i in range(0, q.shape[1], slice_size): - end = i + slice_size - s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) - - s2 = s1.softmax(dim=-1, dtype=q.dtype) - del s1 - - r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v) - del s2 - - del q, k, v - - r1 = r1.to(dtype) - - r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) - del r1 - - return self.to_out(r2) - - -# -- Taken from https://github.com/invoke-ai/InvokeAI and modified -- -mem_total_gb = psutil.virtual_memory().total // (1 << 30) - -def einsum_op_compvis(q, k, v): - s = einsum('b i d, b j d -> b i j', q, k) - s = s.softmax(dim=-1, dtype=s.dtype) - return einsum('b i j, b j d -> b i d', s, v) - -def einsum_op_slice_0(q, k, v, slice_size): - r = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) - for i in range(0, q.shape[0], slice_size): - end = i + slice_size - r[i:end] = einsum_op_compvis(q[i:end], k[i:end], v[i:end]) - return r - -def einsum_op_slice_1(q, k, v, slice_size): - r = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) - for i in range(0, q.shape[1], slice_size): - end = i + slice_size - r[:, i:end] = einsum_op_compvis(q[:, i:end], k, v) - return r - -def einsum_op_mps_v1(q, k, v): - if q.shape[0] * q.shape[1] <= 2**16: # (512x512) max q.shape[1]: 4096 - return einsum_op_compvis(q, k, v) - else: - slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1])) - if slice_size % 4096 == 0: - slice_size -= 1 - return einsum_op_slice_1(q, k, v, slice_size) - -def einsum_op_mps_v2(q, k, v): - if mem_total_gb > 8 and q.shape[0] * q.shape[1] <= 2**16: - return einsum_op_compvis(q, k, v) - else: - return einsum_op_slice_0(q, k, v, 1) - -def einsum_op_tensor_mem(q, k, v, max_tensor_mb): - size_mb = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() // (1 << 20) - if size_mb <= max_tensor_mb: - return einsum_op_compvis(q, k, v) - div = 1 << int((size_mb - 1) / max_tensor_mb).bit_length() - if div <= q.shape[0]: - return einsum_op_slice_0(q, k, v, q.shape[0] // div) - return einsum_op_slice_1(q, k, v, max(q.shape[1] // div, 1)) - -def einsum_op_cuda(q, k, v): - stats = torch.cuda.memory_stats(q.device) - mem_active = stats['active_bytes.all.current'] - mem_reserved = stats['reserved_bytes.all.current'] - mem_free_cuda, _ = torch.cuda.mem_get_info(q.device) - mem_free_torch = mem_reserved - mem_active - mem_free_total = mem_free_cuda + mem_free_torch - # Divide factor of safety as there's copying and fragmentation - return einsum_op_tensor_mem(q, k, v, mem_free_total / 3.3 / (1 << 20)) - -def einsum_op(q, k, v): - if q.device.type == 'cuda': - return einsum_op_cuda(q, k, v) - - if q.device.type == 'mps': - if mem_total_gb >= 32 and q.shape[0] % 32 != 0 and q.shape[0] * q.shape[1] < 2**18: - return einsum_op_mps_v1(q, k, v) - return einsum_op_mps_v2(q, k, v) - - # Smaller slices are faster due to L2/L3/SLC caches. - # Tested on i7 with 8MB L3 cache. - return einsum_op_tensor_mem(q, k, v, 32) - -def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None): - h = self.heads - - q = self.to_q(x) - context = default(context, x) - - context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context) - k = self.to_k(context_k) - v = self.to_v(context_v) - del context, context_k, context_v, x - - dtype = q.dtype - if shared.opts.upcast_attn: - q, k, v = q.float(), k.float(), v if v.device.type == 'mps' else v.float() - - with devices.without_autocast(disable=not shared.opts.upcast_attn): - k = k * self.scale - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - r = einsum_op(q, k, v) - r = r.to(dtype) - return self.to_out(rearrange(r, '(b h) n d -> b n (h d)', h=h)) - -# -- End of code from https://github.com/invoke-ai/InvokeAI -- - - -# Based on Birch-san's modified implementation of sub-quadratic attention from https://github.com/Birch-san/diffusers/pull/1 -# The sub_quad_attention_forward function is under the MIT License listed under Memory Efficient Attention in the Licenses section of the web UI interface -def sub_quad_attention_forward(self, x, context=None, mask=None): - assert mask is None, "attention-mask not currently implemented for SubQuadraticCrossAttnProcessor." - - h = self.heads - - q = self.to_q(x) - context = default(context, x) - - context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context) - k = self.to_k(context_k) - v = self.to_v(context_v) - del context, context_k, context_v, x - - q = q.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1) - k = k.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1) - v = v.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1) - - dtype = q.dtype - if shared.opts.upcast_attn: - q, k = q.float(), k.float() - - x = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training) - - x = x.to(dtype) - - x = x.unflatten(0, (-1, h)).transpose(1,2).flatten(start_dim=2) - - out_proj, dropout = self.to_out - x = out_proj(x) - x = dropout(x) - - return x - -def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_size_min=None, chunk_threshold=None, use_checkpoint=True): - bytes_per_token = torch.finfo(q.dtype).bits//8 - batch_x_heads, q_tokens, _ = q.shape - _, k_tokens, _ = k.shape - qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens - - if chunk_threshold is None: - chunk_threshold_bytes = int(get_available_vram() * 0.9) if q.device.type == 'mps' else int(get_available_vram() * 0.7) - elif chunk_threshold == 0: - chunk_threshold_bytes = None - else: - chunk_threshold_bytes = int(0.01 * chunk_threshold * get_available_vram()) - - if kv_chunk_size_min is None and chunk_threshold_bytes is not None: - kv_chunk_size_min = chunk_threshold_bytes // (batch_x_heads * bytes_per_token * (k.shape[2] + v.shape[2])) - elif kv_chunk_size_min == 0: - kv_chunk_size_min = None - - if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes: - # the big matmul fits into our memory limit; do everything in 1 chunk, - # i.e. send it down the unchunked fast-path - query_chunk_size = q_tokens - kv_chunk_size = k_tokens - - with devices.without_autocast(disable=q.dtype == v.dtype): - return efficient_dot_product_attention( - q, - k, - v, - query_chunk_size=q_chunk_size, - kv_chunk_size=kv_chunk_size, - kv_chunk_size_min = kv_chunk_size_min, - use_checkpoint=use_checkpoint, - ) - - -def get_xformers_flash_attention_op(q, k, v): - if not shared.cmd_opts.xformers_flash_attention: - return None - - try: - flash_attention_op = xformers.ops.MemoryEfficientAttentionFlashAttentionOp - fw, bw = flash_attention_op - if fw.supports(xformers.ops.fmha.Inputs(query=q, key=k, value=v, attn_bias=None)): - return flash_attention_op - except Exception as e: - errors.display_once(e, "enabling flash attention") - - return None - - -def xformers_attention_forward(self, x, context=None, mask=None): - h = self.heads - q_in = self.to_q(x) - context = default(context, x) - - context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context) - k_in = self.to_k(context_k) - v_in = self.to_v(context_v) - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in)) - del q_in, k_in, v_in - - dtype = q.dtype - if shared.opts.upcast_attn: - q, k = q.float(), k.float() - - out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=get_xformers_flash_attention_op(q, k, v)) - - out = out.to(dtype) - - out = rearrange(out, 'b n h d -> b n (h d)', h=h) - return self.to_out(out) - -# Based on Diffusers usage of scaled dot product attention from https://github.com/huggingface/diffusers/blob/c7da8fd23359a22d0df2741688b5b4f33c26df21/src/diffusers/models/cross_attention.py -# The scaled_dot_product_attention_forward function contains parts of code under Apache-2.0 license listed under Scaled Dot Product Attention in the Licenses section of the web UI interface -def scaled_dot_product_attention_forward(self, x, context=None, mask=None): - batch_size, sequence_length, inner_dim = x.shape - - if mask is not None: - mask = self.prepare_attention_mask(mask, sequence_length, batch_size) - mask = mask.view(batch_size, self.heads, -1, mask.shape[-1]) - - h = self.heads - q_in = self.to_q(x) - context = default(context, x) - - context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context) - k_in = self.to_k(context_k) - v_in = self.to_v(context_v) - - head_dim = inner_dim // h - q = q_in.view(batch_size, -1, h, head_dim).transpose(1, 2) - k = k_in.view(batch_size, -1, h, head_dim).transpose(1, 2) - v = v_in.view(batch_size, -1, h, head_dim).transpose(1, 2) - - del q_in, k_in, v_in - - dtype = q.dtype - if shared.opts.upcast_attn: - q, k = q.float(), k.float() - - # the output of sdp = (batch, num_heads, seq_len, head_dim) - hidden_states = torch.nn.functional.scaled_dot_product_attention( - q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False - ) - - hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, h * head_dim) - hidden_states = hidden_states.to(dtype) - - # linear proj - hidden_states = self.to_out[0](hidden_states) - # dropout - hidden_states = self.to_out[1](hidden_states) - return hidden_states - -def scaled_dot_product_no_mem_attention_forward(self, x, context=None, mask=None): - with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=False): - return scaled_dot_product_attention_forward(self, x, context, mask) - -def cross_attention_attnblock_forward(self, x): - h_ = x - h_ = self.norm(h_) - q1 = self.q(h_) - k1 = self.k(h_) - v = self.v(h_) - - # compute attention - b, c, h, w = q1.shape - - q2 = q1.reshape(b, c, h*w) - del q1 - - q = q2.permute(0, 2, 1) # b,hw,c - del q2 - - k = k1.reshape(b, c, h*w) # b,c,hw - del k1 - - h_ = torch.zeros_like(k, device=q.device) - - mem_free_total = get_available_vram() - - tensor_size = q.shape[0] * q.shape[1] * k.shape[2] * q.element_size() - mem_required = tensor_size * 2.5 - steps = 1 - - if mem_required > mem_free_total: - steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2))) - - slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] - for i in range(0, q.shape[1], slice_size): - end = i + slice_size - - w1 = torch.bmm(q[:, i:end], k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] - w2 = w1 * (int(c)**(-0.5)) - del w1 - w3 = torch.nn.functional.softmax(w2, dim=2, dtype=q.dtype) - del w2 - - # attend to values - v1 = v.reshape(b, c, h*w) - w4 = w3.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q) - del w3 - - h_[:, :, i:end] = torch.bmm(v1, w4) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] - del v1, w4 - - h2 = h_.reshape(b, c, h, w) - del h_ - - h3 = self.proj_out(h2) - del h2 - - h3 += x - - return h3 - -def xformers_attnblock_forward(self, x): - try: - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - b, c, h, w = q.shape - q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) - dtype = q.dtype - if shared.opts.upcast_attn: - q, k = q.float(), k.float() - q = q.contiguous() - k = k.contiguous() - v = v.contiguous() - out = xformers.ops.memory_efficient_attention(q, k, v, op=get_xformers_flash_attention_op(q, k, v)) - out = out.to(dtype) - out = rearrange(out, 'b (h w) c -> b c h w', h=h) - out = self.proj_out(out) - return x + out - except NotImplementedError: - return cross_attention_attnblock_forward(self, x) - -def sdp_attnblock_forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - b, c, h, w = q.shape - q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) - dtype = q.dtype - if shared.opts.upcast_attn: - q, k = q.float(), k.float() - q = q.contiguous() - k = k.contiguous() - v = v.contiguous() - out = torch.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=0.0, is_causal=False) - out = out.to(dtype) - out = rearrange(out, 'b (h w) c -> b c h w', h=h) - out = self.proj_out(out) - return x + out - -def sdp_no_mem_attnblock_forward(self, x): - with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=False): - return sdp_attnblock_forward(self, x) - -def sub_quad_attnblock_forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - b, c, h, w = q.shape - q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) - q = q.contiguous() - k = k.contiguous() - v = v.contiguous() - out = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training) - out = rearrange(out, 'b (h w) c -> b c h w', h=h) - out = self.proj_out(out) - return x + out diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Batman V Superman Dawn Of Justice (English) 720p In Hindi Dubbed Movie [CRACKED].md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Batman V Superman Dawn Of Justice (English) 720p In Hindi Dubbed Movie [CRACKED].md deleted file mode 100644 index 83e727e5d805eb4f14971be8c6fb2fac938580a7..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Batman V Superman Dawn Of Justice (English) 720p In Hindi Dubbed Movie [CRACKED].md +++ /dev/null @@ -1,10 +0,0 @@ - -

              the world changed when mankind, the citizens of metropolis, and billionaire bruce wayne were witness to the awesome power of superman. now 2 years later, the love affair is seemingly ended when the man of steel becomes involved in the complexities of international politics with lives lost amidst terrorists/rebels.

              -

              the general public is concerned over having superman on their planet and letting the dark knight batman pursue the streets of gotham. while this is happening, a power-phobic batman tries to attack superman. meanwhile, superman tries to settle on a decision, and lex luthor, the criminal mastermind and millionaire, tries to use his own advantages to fight the man of steel.

              -

              Batman V Superman: Dawn of Justice (English) 720p in hindi dubbed movie


              Download 🗸🗸🗸 https://cinurl.com/2uEYSA



              -

              the general public is concerned over having superman on their planet and letting the dark knight batman pursue the streets of gotham. while this is happening, a power-phobic batman tries to attack superman.

              -

              the common element between superman and batman is that they both long for a better life, and they both want to help others while doing so. batman, with his keen intellect, resourcefulness, cunning, and military training, is an excellent foil for superman, who is the quintessential ultimate good hero. as with superman's real-life counterpart, batman fights for justice, truth, and the american way in an effort to protect his way of life, while taking on any nefarious criminal who dares to challenge him.

              -

              -

              the most interesting aspect of bvs is how the seemingly good and evil characters engage with each other while realizing that they are both doing what is right in their own way. as with the real batman and superman, they see that while they like and admire their common enemy, lex luthor, they each have their own reasons for seeking the bad guy's destruction. superman always had the conscience, while batman became an expert in survival and took things into his own hands to seek lex's destruction.

              899543212b
              -
              -
              \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Chandni Chowk To China Movie In Hindi Download.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Chandni Chowk To China Movie In Hindi Download.md deleted file mode 100644 index 359afb9f60124751aa266379b56bb3524e1a846c..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Chandni Chowk To China Movie In Hindi Download.md +++ /dev/null @@ -1,76 +0,0 @@ -

              Chandni Chowk To China Movie In Hindi Download


              Download Zip > https://cinurl.com/2uEY7v



              -
              -Cast - - Phoola - Liu Shengh, Palat Singh - - Ramji - Angry Vaidya - - Anees - Pritam - - Akram - Arvind - - Hina - Prerna - - Kalpana - Mushtari (credited as Kali Bai) - - Anupam - Darpan Saini - - Gurpal - Pankaj - - Sucheta - Kunwar sahib's niece - - Raju Mukherjee - Akbar's father - - Mohua - Akbar's mother - - Amina - Akbar's sister - - Mukesh - Suleiman - -Soundtrack - -The songs were composed by Gulzar and sung by Kishore Kumar, Kavita Krishnamurthy, Asha Bhosle, and Anuradha Patel. - -Box office - -The film ran for a long time in multiplexes and became a commercial success. - -References - -External links - - - -Category:1979 films - -Category:1970s Hindi-language films - -Category:Indian films - -Category:Films directed by Ramesh Sippy - -Category:Films scored by Ravindra Jain - -Category:Indian action comedy films - -Category:1970s action comedy films - -Category:Hindi remakes of Marathi filmsTehcnology - -Share this page - -You Are Visitor - -Capsule - -£38.99 - -For a small medium sized head the K90 is a well built dome and the 30mm version is actually a rather good looking dome. I like the design of the tubes they don’t look like tapered but rather like a nice fat tubular shape. - -The Zeiss K90 has a flip up lens cap which can be removed from the front of the unit, what a delight this is it takes all of 2 seconds to remove and replace and this is a good thing as there is one of the better tubes that we have tested which takes over 5 seconds to remove and replace. - -For a small medium sized head the K90 is a well built dome and the 30mm version 4fefd39f24
              -
              -
              -

              diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/4K Video Downloader 4.9.2 FOR MAC ? 100 Working.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/4K Video Downloader 4.9.2 FOR MAC ? 100 Working.md deleted file mode 100644 index 21a9d5416b3d7b8d2d5247363bd937283e696554..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/4K Video Downloader 4.9.2 FOR MAC ? 100 Working.md +++ /dev/null @@ -1,6 +0,0 @@ -

              4K Video Downloader 4.9.2 FOR MAC – 100 Working


              Download ————— https://urluss.com/2uCEzN



              - -4K Video Downloader 4.13.5.3950 Crack Lifetime Serial 24 Mar 2018 Downoad, ... Kastor All Video Downloader works fine with 32-bit versions of Windows XP/Vista/7/8. ... you an easy, efficient and legitimate method to search and download 100 million songs. ... Wondershare AllMyTube 4.9.2 for Windows - Download. 1fdad05405
              -
              -
              -

              diff --git a/spaces/suvradip2000/space1/app/templates/expr_recognition.html b/spaces/suvradip2000/space1/app/templates/expr_recognition.html deleted file mode 100644 index e89ac46299f9efb9a4d781291362cd37c5c2def0..0000000000000000000000000000000000000000 --- a/spaces/suvradip2000/space1/app/templates/expr_recognition.html +++ /dev/null @@ -1,32 +0,0 @@ - - - - Index - - -
              -

              -
              Expression Recognition
              -

              -
              -
              -
              -
                - -
                -
                - Upload Image:

                - -


                - -
                - -

                -
                - -
                -
              -
              -
              - - diff --git a/spaces/svjack/ControlNet-Face-Chinese/SPIGA/spiga/data/visualize/__init__.py b/spaces/svjack/ControlNet-Face-Chinese/SPIGA/spiga/data/visualize/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/sweetcocoa/pop2piano/app.py b/spaces/sweetcocoa/pop2piano/app.py deleted file mode 100644 index 75c9efc01a6bf5e52762f876871de9001ebb8ae2..0000000000000000000000000000000000000000 --- a/spaces/sweetcocoa/pop2piano/app.py +++ /dev/null @@ -1,170 +0,0 @@ -import os -import binascii -import warnings - -import gradio as gr -import librosa -import numpy as np -import torch -import pretty_midi -import pytube as pt - -from pytube.exceptions import VideoUnavailable -from transformers import Pop2PianoForConditionalGeneration, Pop2PianoProcessor - -from utils import mp3_write, normalize - -yt_video_dir = "./yt_dir" -outputs_dir = "./midi_wav_outputs" -os.makedirs(outputs_dir, exist_ok=True) -os.makedirs(yt_video_dir, exist_ok=True) - -device = "cuda" if torch.cuda.is_available() else "cpu" -model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano").to(device) -processor = Pop2PianoProcessor.from_pretrained("sweetcocoa/pop2piano") -composers = model.generation_config.composer_to_feature_token.keys() - - -def get_audio_from_yt_video(yt_link: str): - try: - yt = pt.YouTube(yt_link) - t = yt.streams.filter(only_audio=True) - filename = os.path.join(yt_video_dir, binascii.hexlify(os.urandom(8)).decode() + ".mp4") - t[0].download(filename=filename) - except VideoUnavailable as e: - warnings.warn(f"Video Not Found at {yt_link} ({e})") - filename = None - - return filename, filename - - -def inference(file_uploaded, composer): - # to save the native sampling rate of the file, sr=None is used, but this can cause some silent errors where the - # generated output will not be upto the desired quality. If that happens please consider switching sr to 44100 Hz. - pop_y, sr = librosa.load(file_uploaded, sr=None) - - inputs = processor(audio=pop_y, sampling_rate=sr, return_tensors="pt").to(device) - model_output = model.generate(input_features=inputs["input_features"], composer=composer) - tokenizer_output = processor.batch_decode( - token_ids=model_output.to("cpu"), feature_extractor_output=inputs.to("cpu") - )["pretty_midi_objects"] - - return prepare_output_file(tokenizer_output, sr, pop_y) - - -def prepare_output_file(tokenizer_output: pretty_midi.PrettyMIDI, sr: int, pop_y: np.ndarray): - # Add some random values so that no two file names are same - output_file_name = "p2p_" + binascii.hexlify(os.urandom(8)).decode() - midi_output = os.path.join(outputs_dir, output_file_name + ".mid") - - # write the .mid and its wav files - tokenizer_output[0].write(midi_output) - midi_y: np.ndarray = tokenizer_output[0].fluidsynth(sr) - midi_y_path: str = midi_output.replace(".mid", ".mp3") - mp3_write(midi_y_path, sr, normalize(midi_y), normalized=True) - - # stack stereo audio - if len(pop_y) > len(midi_y): - midi_y = np.pad(midi_y, (0, len(pop_y) - len(midi_y))) - elif len(pop_y) < len(midi_y): - pop_y = np.pad(pop_y, (0, -len(pop_y) + len(midi_y))) - stereo = np.stack((midi_y, pop_y * 0.5)) - - # write stereo audio - stereo_path = midi_output.replace(".mid", ".mix.mp3") - mp3_write(stereo_path, sr, normalize(stereo.T), normalized=True) - - return midi_y_path, midi_y_path, midi_output, stereo_path, stereo_path - - -block = gr.Blocks() - -with block: - gr.HTML( - """ -
              -
              -

              - Pop2piano -

              -
              -

              - A demo for Pop2Piano:Pop Audio-based Piano Cover Generation.
              - Please select the composer(Arranger) and upload the pop audio or enter the YouTube link and then click Generate. -

              -
              - """ - ) - with gr.Group(): - with gr.Column(): - with gr.Blocks() as audio_select: - with gr.Tab("Upload Audio"): - file_uploaded = gr.Audio(label="Upload an audio", type="filepath") - with gr.Tab("YouTube url"): - with gr.Row(): - yt_link = gr.Textbox( - label="Enter YouTube Link of the Video", autofocus=True, lines=3 - ) - yt_btn = gr.Button("Download Audio from YouTube Link", size="lg") - yt_audio_path = gr.Audio( - label="Audio Extracted from the YouTube Video", interactive=False - ) - yt_btn.click( - get_audio_from_yt_video, - inputs=[yt_link], - outputs=[yt_audio_path, file_uploaded], - ) - with gr.Column(): - composer = gr.Dropdown(label="Arranger", choices=composers, value="composer1") - generate_btn = gr.Button("Generate") - - with gr.Group(): - gr.HTML( - """ -

              Listen to the generated MIDI.

              - """ - ) - with gr.Row().style(mobile_collapse=False, equal_height=True): - stereo_mix1 = gr.Audio(label="Listen to the Stereo Mix") - wav_output1 = gr.Audio(label="Listen to the Generated MIDI") - - with gr.Row(): - stereo_mix2 = gr.File(label="Download the Stereo Mix (.mp3") - wav_output2 = gr.File(label="Download the Generated MIDI (.mp3)") - midi_output = gr.File(label="Download the Generated MIDI (.mid)") - generate_btn.click( - inference, - inputs=[file_uploaded, composer], - outputs=[wav_output1, wav_output2, midi_output, stereo_mix1, stereo_mix2], - ) - - with gr.Group(): - gr.Examples( - [ - ["./examples/custom_song.mp3", "composer1"], - ], - fn=inference, - inputs=[file_uploaded, composer], - outputs=[wav_output1, wav_output2, midi_output, stereo_mix1, stereo_mix2], - cache_examples=True, - ) - - gr.HTML( - """ - - """ - ) - -block.launch(debug=False) diff --git a/spaces/szukevin/VISOR-GPT/train/scripts/topn_words_dep.py b/spaces/szukevin/VISOR-GPT/train/scripts/topn_words_dep.py deleted file mode 100644 index 8fa8771de60c3f461f785d0ffd38e046381d6f05..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/scripts/topn_words_dep.py +++ /dev/null @@ -1,129 +0,0 @@ -import sys -import os -import torch -import argparse -import numpy as np - -tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -sys.path.append(tencentpretrain_dir) - -from tencentpretrain.embeddings import * -from tencentpretrain.encoders import * -from tencentpretrain.utils.constants import * -from tencentpretrain.utils import * -from tencentpretrain.utils.config import load_hyperparam -from tencentpretrain.utils.vocab import Vocab -from tencentpretrain.opts import model_opts, tokenizer_opts - - -class SequenceEncoder(torch.nn.Module): - def __init__(self, args): - super(SequenceEncoder, self).__init__() - self.embedding = str2embedding[args.embedding](args, len(args.tokenizer.vocab)) - self.encoder = str2encoder[args.encoder](args) - - def forward(self, src, seg): - emb = self.embedding(src, seg) - output = self.encoder(emb, seg) - - return output - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - - model_opts(parser) - - parser.add_argument("--load_model_path", default=None, type=str, - help="Path of the input model.") - parser.add_argument("--cand_vocab_path", default=None, type=str, - help="Path of the candidate vocabulary file.") - parser.add_argument("--test_path", type=str, required=True, - help="Path of the target word an its context.") - parser.add_argument("--config_path", default="models/bert/base_config.json", type=str, - help="Path of the config file.") - - tokenizer_opts(parser) - - parser.add_argument("--batch_size", type=int, default=64, - help="Batch size.") - parser.add_argument("--seq_length", type=int, default=128, - help="Sequence length.") - - parser.add_argument("--topn", type=int, default=15) - - args = parser.parse_args() - args = load_hyperparam(args) - - args.spm_model_path = None - - vocab = Vocab() - vocab.load(args.vocab_path) - - cand_vocab = Vocab() - cand_vocab.load(args.cand_vocab_path) - - args.tokenizer = str2tokenizer[args.tokenizer](args) - - model = SequenceEncoder(args) - - pretrained_model = torch.load(args.load_model_path) - model.load_state_dict(pretrained_model, strict=False) - - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - if torch.cuda.device_count() > 1: - print("{} GPUs are available. Let's use them.".format(torch.cuda.device_count())) - model = torch.nn.DataParallel(model) - model = model.to(device) - model.eval() - - PAD_ID = args.tokenizer.vocab.get(PAD_TOKEN) - with open(args.test_path, mode="r", encoding="utf-8") as f: - for line in f: - line = line.strip().split("\t") - if len(line) != 2: - continue - target_word, context = line[0], line[1] - print("Original sentence: " + context) - print("Target word: " + target_word) - src = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(context)) - seg = [1] * len(src) - if len(src) > args.seq_length: - src = src[:args.seq_length] - seg = seg[:args.seq_length] - while len(src) < args.seq_length: - src.append(PAD_ID) - seg.append(PAD_ID) - - target_word_id = vocab.get(target_word) - if target_word_id in src: - position = src.index(target_word_id) - else: - print("The target word is not in the sentence.") - continue - - output = model(torch.LongTensor([src]).to(device), torch.LongTensor([seg]).to(device)) - output = output.cpu().data.numpy() - output = output.reshape([args.seq_length, -1]) - target_embedding = output[position, :] - target_embedding = target_embedding.reshape(1, -1).astype("float") - - cand_words_batch, cand_embeddings = [], [] - for i, word in enumerate(cand_vocab.i2w): - cand_words_batch.append(vocab.w2i.get(word)) - if len(cand_words_batch) == args.batch_size or i == (len(cand_vocab.i2w)-1): - src_batch = torch.LongTensor([src] * len(cand_words_batch)) - seg_batch = [seg] * len(cand_words_batch) - src_batch[:, position] = torch.LongTensor(cand_words_batch) - output = model(torch.LongTensor(src_batch).to(device), torch.LongTensor(seg_batch).to(device)) - output = output.cpu().data.numpy() - output = np.reshape(output, (len(output), args.seq_length, -1)) - cand_embeddings.extend(output[:, position, :].tolist()) - cand_words_batch = [] - - sims = torch.nn.functional.cosine_similarity(torch.FloatTensor(target_embedding), \ - torch.FloatTensor(cand_embeddings)) - - sorted_ids = torch.argsort(sims, descending=True) - for j in sorted_ids[1: args.topn + 1]: - print(cand_vocab.i2w[j].strip() + "\t" + str(sims[j].item())) diff --git a/spaces/tang155/bingo/next.config.js b/spaces/tang155/bingo/next.config.js deleted file mode 100644 index 0e6ccd7fbc91d0459eaaff3e968ce0556789c605..0000000000000000000000000000000000000000 --- a/spaces/tang155/bingo/next.config.js +++ /dev/null @@ -1,38 +0,0 @@ -/** @type {import('next').NextConfig} */ -const nextConfig = { - // output: 'export', - // assetPrefix: '.', - webpack: (config, { isServer }) => { - if (!isServer) { - config.resolve = { - ...config.resolve, - fallback: { - 'bufferutil': false, - 'utf-8-validate': false, - http: false, - https: false, - stream: false, - // fixes proxy-agent dependencies - net: false, - dns: false, - tls: false, - assert: false, - // fixes next-i18next dependencies - path: false, - fs: false, - // fixes mapbox dependencies - events: false, - // fixes sentry dependencies - process: false - } - }; - } - config.module.exprContextCritical = false; - - return config; - }, -} - -module.exports = (...args) => { - return nextConfig -} diff --git a/spaces/taquynhnga/CNNs-interpretation-visualization/README.md b/spaces/taquynhnga/CNNs-interpretation-visualization/README.md deleted file mode 100644 index ff595b9224fc89bd1dcc671cb00e4301f71d769f..0000000000000000000000000000000000000000 --- a/spaces/taquynhnga/CNNs-interpretation-visualization/README.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: CNNs Interpretation Visualization -emoji: 💡 -colorFrom: blue -colorTo: green -sdk: streamlit -sdk_version: 1.19.0 -app_file: Home.py -pinned: false ---- - -# Visualizing Interpretations of CNN models: ConvNeXt, ResNet and MobileNet - -To be change name: CNNs-interpretation-visualization - -This app was built with Streamlit. To run the app, `streamlit run Home.py` in the terminal. - -This repo lacks one more folder `data/preprocessed_image_net` which contains 50,000 preprocessed imagenet validation images saved in 5 pickle files. diff --git a/spaces/tarteel-ai/latest-demo/app.py b/spaces/tarteel-ai/latest-demo/app.py deleted file mode 100644 index bef5d0eadc1001efecd1bde84384a5dc61b30875..0000000000000000000000000000000000000000 --- a/spaces/tarteel-ai/latest-demo/app.py +++ /dev/null @@ -1,108 +0,0 @@ -from datetime import datetime, timedelta -import os - -import gradio as gr -import nemo.collections.asr as nemo_asr -import wandb - -MODEL_HISTORY_DAYS = 180 -WANDB_ENTITY = os.environ.get("WANDB_ENTITY", "tarteel") -WANDB_PROJECT_NAME = os.environ.get("WANDB_PROJECT_NAME", "nemo-experiments") - -wandb_api = wandb.Api(overrides={"entity": WANDB_ENTITY}) - -all_artifacts_versions = [ - version - for version in [ - collection.versions() - for collection in wandb_api.artifact_type( - type_name="model", project=WANDB_PROJECT_NAME - ).collections() - ] -] - -latest_artifacts = [ - artifact - for artifact_versions in all_artifacts_versions - for artifact in artifact_versions - if ( - datetime.fromisoformat(artifact.created_at) - > datetime.now() - timedelta(days=MODEL_HISTORY_DAYS) # last 180 days - and artifact.state != "DELETED" - ) -] -latest_artifacts.sort(key=lambda a: a.created_at, reverse=True) - -models = {artifact.name: None for artifact in latest_artifacts} - - -def lazy_load_models(models_names): - for model_name in models_names: - model = models[model_name] - if not model: - models[model_name] = nemo_asr.models.ASRModel.restore_from( - list(filter(lambda x: x.name == model_name, latest_artifacts))[0].file() - ) - models[model_name].eval() - - -def transcribe(audio_mic, audio_file, models_names): - lazy_load_models(models_names) - # transcribe audio_mic and audio_file separately - # because transcribe() fails is path is empty - transcription_mic = "\n".join( - [ - f"{model_name} => {models[model_name].transcribe([audio_mic])[0]}" - for model_name in models_names - ] - if audio_mic - else "" - ) - transcription_file = "\n".join( - [ - f"{model_name} => {models[model_name].transcribe([audio_file])[0]}" - for model_name in models_names - ] - if audio_file - else "" - ) - return transcription_mic, transcription_file - - -model_selection = list(models.keys()) - -demo = gr.Blocks() - -with demo: - gr.Markdown( - """ - # ﷽ - These are the latest* Tarteel models. - - Please note that the models are lazy loaded. - This means that the first time you use a model, - it might take some time to be downloaded and loaded for inference. - - *: last 180 days since the space was launched. - To update the list, restart the space. - """ - ) - with gr.Row(): - audio_mic = gr.Audio(source="microphone", type="filepath", label="Microphone") - audio_file = gr.Audio(source="upload", type="filepath", label="File") - - with gr.Row(): - output_mic = gr.TextArea(label="Microphone Transcription") - output_file = gr.TextArea(label="Audio Transcription") - - models_names = gr.CheckboxGroup(model_selection, label="Select Models to Use") - - b1 = gr.Button("Transcribe") - - b1.click( - transcribe, - inputs=[audio_mic, audio_file, models_names], - outputs=[output_mic, output_file], - ) - -demo.launch() diff --git a/spaces/terfces0erbo/CollegeProjectV2/Answerkeyenglishforbusinessstudiesthirdeditionianmackenzie20.md b/spaces/terfces0erbo/CollegeProjectV2/Answerkeyenglishforbusinessstudiesthirdeditionianmackenzie20.md deleted file mode 100644 index d0cc6465c3b8b94635578ab3e2d8286597a28e7e..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Answerkeyenglishforbusinessstudiesthirdeditionianmackenzie20.md +++ /dev/null @@ -1,6 +0,0 @@ -

              answerkeyenglishforbusinessstudiesthirdeditionianmackenzie20


              Downloadhttps://bytlly.com/2uGiHQ



              -
              -Answerkeyenglishforbusinessstudiesthirdeditionianmackenzie20 ✅ https://tinurll.com/1gixhx ✅ https://tinurll.com/1gixhx-2 d31cf15d6b usb serial adapter made ... 4d29de3e1b
              -
              -
              -

              diff --git a/spaces/thegenerativegeneration/FNeVR_demo/sync_batchnorm/replicate.py b/spaces/thegenerativegeneration/FNeVR_demo/sync_batchnorm/replicate.py deleted file mode 100644 index b71c7b8ed51a1d6c55b1f753bdd8d90bad79bd06..0000000000000000000000000000000000000000 --- a/spaces/thegenerativegeneration/FNeVR_demo/sync_batchnorm/replicate.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# File : replicate.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import functools - -from torch.nn.parallel.data_parallel import DataParallel - -__all__ = [ - 'CallbackContext', - 'execute_replication_callbacks', - 'DataParallelWithCallback', - 'patch_replication_callback' -] - - -class CallbackContext(object): - pass - - -def execute_replication_callbacks(modules): - """ - Execute an replication callback `__data_parallel_replicate__` on each module created by original replication. - - The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` - - Note that, as all modules are isomorphism, we assign each sub-module with a context - (shared among multiple copies of this module on different devices). - Through this context, different copies can share some information. - - We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback - of any slave copies. - """ - master_copy = modules[0] - nr_modules = len(list(master_copy.modules())) - ctxs = [CallbackContext() for _ in range(nr_modules)] - - for i, module in enumerate(modules): - for j, m in enumerate(module.modules()): - if hasattr(m, '__data_parallel_replicate__'): - m.__data_parallel_replicate__(ctxs[j], i) - - -class DataParallelWithCallback(DataParallel): - """ - Data Parallel with a replication callback. - - An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by - original `replicate` function. - The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` - - Examples: - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - # sync_bn.__data_parallel_replicate__ will be invoked. - """ - - def replicate(self, module, device_ids): - modules = super(DataParallelWithCallback, self).replicate(module, device_ids) - execute_replication_callbacks(modules) - return modules - - -def patch_replication_callback(data_parallel): - """ - Monkey-patch an existing `DataParallel` object. Add the replication callback. - Useful when you have customized `DataParallel` implementation. - - Examples: - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallel(sync_bn, device_ids=[0, 1]) - > patch_replication_callback(sync_bn) - # this is equivalent to - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - """ - - assert isinstance(data_parallel, DataParallel) - - old_replicate = data_parallel.replicate - - @functools.wraps(old_replicate) - def new_replicate(module, device_ids): - modules = old_replicate(module, device_ids) - execute_replication_callbacks(modules) - return modules - - data_parallel.replicate = new_replicate diff --git a/spaces/theintuitiveye/FantasyMix-v1/README.md b/spaces/theintuitiveye/FantasyMix-v1/README.md deleted file mode 100644 index 9867669f59b5c5b4718464ba3304e2cfeacda272..0000000000000000000000000000000000000000 --- a/spaces/theintuitiveye/FantasyMix-v1/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: FantasyMix V1 -emoji: 📉 -colorFrom: blue -colorTo: pink -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/thesven/image-to-story/app.py b/spaces/thesven/image-to-story/app.py deleted file mode 100644 index 69e4a44d913ba2db7718d5e24df5dd97b5d35318..0000000000000000000000000000000000000000 --- a/spaces/thesven/image-to-story/app.py +++ /dev/null @@ -1,76 +0,0 @@ -import os -import requests -import streamlit as st - -from dotenv import find_dotenv, load_dotenv -from transformers import pipeline - -from langchain import PromptTemplate, LLMChain -from langchain.llms import GooglePalm - -load_dotenv(find_dotenv()) - -llm = GooglePalm(temperature=0.9, google_api_key=os.getenv("GOOGLE_API_KEY")) - -# Iamge to Text -def image_to_text(url): - #load a transformer - image_to_text = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") - - text = image_to_text(url)[0]['generated_text'] - - print (text) - return text - -# llm -def generate_story(scenario): - template = """ - you are a very good story teller and a very rude person: - you can generate a short fairy tail based on a single narrative, the story should take 5 seconds to read. - - CONTEXT: {scenario} - STORY: - """ - - prompt = PromptTemplate(template=template, input_variables=["scenario"]) - story_llm = LLMChain(llm=llm, prompt=prompt, verbose=True) - story = story_llm.predict(scenario=scenario) - print(story) - return story - -# text to speech - -def text_to_speech(message): - API_URL = "https://api-inference.huggingface.co/models/espnet/kan-bayashi_ljspeech_vits" - headers = {"Authorization": f"Bearer {os.getenv('HUGGINGFACE_API_TOKEN')}"} - payload = {"inputs": message} - - response = requests.post(API_URL, headers=headers, json=payload) - print(response.content) - with open('audio.mp3', 'wb') as audio_file: - audio_file.write(response.content) - -def main(): - st.set_page_config(page_title="Image to Story", page_icon="📚", layout="wide") - - st.title("Image to Story") - uploaded_file = st.file_uploader("Choose an image...", type="png") - - if uploaded_file is not None: - bytes_data = uploaded_file.getvalue() - with open(uploaded_file.name, "wb") as file: - file.write(bytes_data) - st.image(uploaded_file, caption='Uploaded Image.', use_column_width=True) - scenario = image_to_text(uploaded_file.name) - story = generate_story(scenario) - text_to_speech(story) - - with st.expander("scenerio"): - st.write(scenario) - with st.expander("story"): - st.write(story) - - st.audio("audio.mp3") - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/spaces/thinkersloop/finetuned-dl-cord-v2/README.md b/spaces/thinkersloop/finetuned-dl-cord-v2/README.md deleted file mode 100644 index 0fc05d8d3c2ff929a330f4ac84c8cc6c1223b450..0000000000000000000000000000000000000000 --- a/spaces/thinkersloop/finetuned-dl-cord-v2/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Finetuned DL Cord -emoji: 💳 -colorFrom: blue -colorTo: gray -sdk: gradio -sdk_version: 3.0.26 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Egg NS Mod APK 4.0.4 - The Best NS Emulator for Android.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Egg NS Mod APK 4.0.4 - The Best NS Emulator for Android.md deleted file mode 100644 index 18a774e70b7b44c83ad49bf942dc96f976dd7a15..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Egg NS Mod APK 4.0.4 - The Best NS Emulator for Android.md +++ /dev/null @@ -1,192 +0,0 @@ -
              -

              Egg NS Mod APK: How to Play Nintendo Switch Games on Your Android Device

              -

              Do you love Nintendo Switch games but don't have the console or the budget to buy one? Do you wish you could play your favorite Switch games on your Android device anytime and anywhere? If yes, then you are in luck because there is a way to do that with the help of Egg NS Mod APK.

              -

              Egg NS Mod APK is a modified version of the original Egg NS Emulator, which is an Android app that allows you to play Nintendo Switch games on your smartphone. With this modded version, you can enjoy all the features of the emulator without any limitations or restrictions. You can play hundreds of Switch games with full FPS, touch and controller support, easy interface, save game progress, change language, access VIP center, and more.

              -

              egg ns mod apk


              DOWNLOAD ⚙⚙⚙ https://bltlly.com/2uOszN



              -

              In this article, we will tell you everything you need to know about Egg NS Mod APK, including its features, how to download and install it, how to use it, and some frequently asked questions. By the end of this article, you will be able to play Nintendo Switch games on your Android device with ease and fun.

              -

              What is Egg NS Mod APK?

              -

              Egg NS Mod APK is a modified version of the original Egg NS Emulator, which is an Android app that lets you play Nintendo Switch games on your smartphone. The original emulator was developed by Nxteam Studios, a team of developers who are passionate about Nintendo games and emulation. The emulator is compatible with hundreds of Switch games, even 3A games, and supports Bluetooth handle control and touch screen control.

              -

              However, the original emulator has some drawbacks, such as requiring a high-end device, having ads, needing a subscription fee for some features, having limited game compatibility, and having some bugs and glitches. That's why some modders decided to create a modded version of the emulator that removes all these limitations and enhances the user experience. The modded version is called Egg NS Mod APK and it offers all the features of the original emulator for free and without any restrictions.

              -

              Features of Egg NS Mod APK

              -

              Egg NS Mod APK has many features that make it one of the best Nintendo Switch emulators for Android devices. Here are some of the main features of the modded version:

              -

              Multiple Games

              -

              Egg NS Mod APK supports a wide variety of Switch games from different genres and categories. You can play popular RPGs like The Legend of Zelda: Breath of the Wild, Pokemon Sword and Shield, Fire Emblem: Three Houses, etc. You can also play puzzle games like Tetris 99, Snipperclips, Captain Toad: Treasure Tracker, etc. You can also play platformers like Super Mario Odyssey, Donkey Kong Country: Tropical Freeze, Sonic Mania Plus, etc. And many more games like Mario Kart 8 Deluxe, Super Smash Bros. Ultimate, Animal Crossing: New Horizons, etc.

              -

              Full FPS

              -

              Egg NS Mod APK delivers smooth and stable performance for most Switch games. You can enjoy the games with full FPS (frames per second), which means that the games will run smoothly and without any lag or stutter. You can also adjust the FPS settings according to your device's capabilities and preferences.

              -

              egg ns emulator mod apk download
              -egg ns switch emulator mod apk
              -egg ns pro mod apk
              -egg ns emulator nxteam mod apk
              -egg ns emulator apk mod unlocked
              -egg ns emulator apk mod free
              -egg ns emulator android mod apk
              -egg ns emulator latest mod apk
              -egg ns emulator premium mod apk
              -egg ns emulator full mod apk
              -egg ns emulator cracked mod apk
              -egg ns emulator hack mod apk
              -egg ns emulator no ads mod apk
              -egg ns emulator plus mod apk
              -egg ns emulator vip mod apk
              -egg ns emulator beta mod apk
              -egg ns emulator update mod apk
              -egg ns emulator 4.0.4 mod apk
              -egg ns emulator 4.0.7 mod apk
              -egg ns emulator 3.2.1 mod apk
              -egg ns nintendo switch emulator mod apk
              -egg ns switch games emulator mod apk
              -egg ns switch pro controller emulator mod apk
              -egg ns switch online emulator mod apk
              -egg ns switch lite emulator mod apk
              -egg ns switch joy con emulator mod apk
              -egg ns switch android emulator mod apk
              -egg ns switch ios emulator mod apk
              -download game egg ns switch emulator mod apk
              -download aplikasi egg ns switch emulator mod apk
              -cara download egg ns switch emulator mod apk
              -how to download egg ns switch emulator mod apk
              -best settings for egg ns switch emulator mod apk
              -compatible games for egg ns switch emulator mod apk
              -how to play games on egg ns switch emulator mod apk
              -how to use gamepad on egg ns switch emulator mod apk
              -how to connect bluetooth controller on egg ns switch emulator mod apk
              -how to fix lag on egg ns switch emulator mod apk
              -how to increase fps on egg ns switch emulator mod apk
              -how to install games on egg ns switch emulator mod apk
              -how to update games on egg ns switch emulator mod apk
              -how to save games on egg ns switch emulator mod apk
              -how to load games on egg ns switch emulator mod apk
              -how to delete games on egg ns switch emulator mod apk
              -how to change language on egg ns switch emulator mod apk
              -how to change resolution on egg ns switch emulator mod apk
              -how to change graphics on egg ns switch emulator mod apk
              -how to change sound on egg ns switch emulator mod apk
              -how to change controls on egg ns switch emulator mod apk

              -

              Touch and Controller Support

              -

              Egg NS Mod APK allows you to control the games with either touch screen or Bluetooth controller. You can use the virtual buttons on the screen to play the games, or you can connect a compatible controller to your device via Bluetooth and enjoy a more comfortable and immersive gaming experience. You can also customize the button layout and sensitivity according to your liking.

              -

              Easy to Use Interface

              -

              Egg NS Mod APK has a simple and user-friendly interface that makes it easy to use and navigate. You can access all the features and settings from the main menu, such as loading games, configuring settings, accessing VIP center, etc. You can also switch between different themes and languages for the interface.

              -

              Save Game Progress

              -

              Egg NS Mod APK lets you save your game progress anytime and anywhere. You can use the save state feature to save your game at any point, and then load it later when you want to resume playing. You can also use the auto-save feature to automatically save your game every few minutes. This way, you won't lose your progress even if your device crashes or runs out of battery.

              -

              Change Language

              -

              Egg NS Mod APK supports multiple languages for the games and the interface. You can change the language of the games from the settings menu, and choose from English, Chinese, Japanese, Korean, Spanish, French, German, Italian, etc. You can also change the language of the interface from the main menu, and choose from English, Chinese, Japanese, Korean, etc.

              -

              VIP Center

              -

              Egg NS Mod APK gives you access to the VIP center, which is a special feature that offers exclusive benefits for the users. You can get VIP points by playing games, watching ads, inviting friends, etc. You can use these points to redeem various rewards, such as free games, coupons, gift cards, etc. You can also get VIP membership by paying a small fee, which will give you more VIP points and privileges.

              -

              Free to Use

              -

              Egg NS Mod APK is completely free to use and download. You don't need to pay any subscription fee or hidden charges to use the emulator or play the games. You can enjoy all the features of the emulator without any limitations or restrictions.

              -

              How to Download and Install Egg NS Mod APK?

              -

              If you want to download and install Egg NS Mod APK on your Android device, you need to follow these simple steps:

              -

              Requirements for Egg NS Mod APK

              -

              Before you download and install Egg NS Mod APK, you need to make sure that your device meets these requirements:

              -
                -
              • Your device must have Android 5.0 or higher version.
              • -
              • Your device must have at least 3 GB of RAM and 64 GB of storage space.
              • -
              • Your device must have a Snapdragon 855 or higher processor.
              • -
              • Your device must have a good internet connection.
              • -
              • Your device must allow installation of apps from unknown sources.
              • -
              -

              Steps to Download and Install Egg NS Mod APK

              -

              After you have checked that your device meets the requirements, you can follow these steps to download and install Egg NS Mod APK:

              -
                -
              1. Go to this link and download the Egg NS Mod APK file on your device.
              2. -
              3. Go to this link and download the Egg NS Controller file on your device.
              4. -
              5. Go to this link and download the Egg NS JoyCon file on your device.
              6. -
              7. Go to your device's file manager and locate the downloaded files.
              8. -
              9. Tap on each file and install them one by one on your device.
              10. -
              11. Wait for the installation process to complete.
              12. -
              13. Launch the Egg NS Mod APK app from your app drawer or home screen.
              14. -
              15. Enjoy playing Nintendo Switch games on your Android device.
              16. -
              -

              How to Use Egg NS Mod APK?

              -

              After you have downloaded and installed Egg NS Mod APK on your device, you need to know how to use it properly. Here are some tips on how to use Egg NS Mod APK:

              -

              How to Load Games on Egg NS Mod APK?

              -

              To load games on Egg NS Mod APK, you need to have the game ROMs or files on your device or SD card. You can get these files from various sources online, but make sure that you only download the games that you own legally and do not support piracy. Once you have the game files, you can follow these steps to load them on Egg NS Mod APK:

              -
                -
              1. Launch the Egg NS Mod APK app on your device.
              2. -
              3. Tap on the "Load Game" button on the main menu.
              4. -
              5. Browse your device's storage and locate the game file that you want to play.
              6. -
              7. Tap on the game file and wait for it to load on the emulator.
              8. -
              9. Enjoy playing the game on your device.
              10. -
              -

              How to Configure Settings on Egg NS Mod APK?

              -

              To configure settings on Egg NS Mod APK, you can access the settings menu from the main menu or from the game screen. You can adjust various settings according to your preferences and needs, such as:

              -
                -
              • FPS: You can change the FPS settings from 30 to 60 or vice versa, depending on your device's performance and the game's requirements.
              • -
              • Resolution: You can change the resolution settings from 720p to 1080p or vice versa, depending on your device's screen size and quality.
              • -
              • Graphics: You can change the graphics settings from low to high or vice versa, depending on your device's capabilities and the game's visuals.
              • -
              • Sound: You can change the sound settings from mute to normal or vice versa, depending on your device's volume and the game's audio.
              • -
              • Language: You can change the language settings from English to other languages or vice versa, depending on your preference and the game's availability.
              • -
              -

              How to Control Games on Egg NS Mod APK?

              -

              To control games on Egg NS Mod APK, you can use either touch screen or Bluetooth controller. You can switch between these modes from the settings menu or from the game screen. Here are some tips on how to control games on Egg NS Mod APK:

              -
                -
              • Touch Screen: You can use the virtual buttons on the screen to control the games. You can also customize the button layout and sensitivity from the settings menu. You can also use gestures like swipe, tap, pinch, etc. to perform certain actions in some games.
              • -
              • Bluetooth Controller: You can connect a compatible controller to your device via Bluetooth and use it to control the games. You can also customize the controller mapping and sensitivity from the settings menu. You can also use buttons like L, R, ZL, ZR, etc. to perform certain actions in some games.
              • -
              -

              Frequently Asked Questions about Egg NS Mod APK

              -

              Here are some of the most frequently asked questions about Egg NS Mod APK and their answers:

              -

              What is the difference between Egg NS Mod APK and Egg NS Emulator?

              -

              Egg NS Mod APK is a modified version of Egg NS Emulator, which is an Android app that lets you play Nintendo Switch games on your smartphone. The modded version offers all the features of the original emulator for free and without any limitations or restrictions.

              -

              Is Egg NS Mod APK safe and legal to use?

              -

              Egg NS Mod APK is safe and legal to use as long as you download it from a trusted source and only play games that you own legally. The modded version does not contain any viruses or malware that can harm your device or data. However, you should be careful about downloading games from unverified sources as they may contain harmful files or infringe intellectual property rights.

              -

              What are the best games to play on Egg NS Mod APK?

              -

              Egg NS Mod APK supports a wide variety of Switch games from different genres and categories. Some of the best games to play on Egg NS Mod APK are:

              -
                -
              • The Legend of Zelda: Breath of the Wild
              • -
              • Pokemon Sword and Shield
              • -
              • Super Mario Odyssey
              • -
              • Mario Kart 8 Deluxe
              • -
              • Super Smash Bros. Ultimate
              • -
              • Animal Crossing: New Horizons
              • -
              • Fire Emblem: Three Houses
              • -
              • Tetris 99
              • -
              • Sonic Mania Plus
              • -
              • Captain Toad: Treasure Tracker
              • -
              -

              How to fix common issues on Egg NS Mod APK?

              -

              If you encounter any issues or problems while using Egg NS Mod APK, you can try these solutions:

              -
                -
              • Make sure that your device meets the requirements for Egg NS Mod APK.
              • -
              • Make sure that you have downloaded and installed all the necessary files for Egg NS Mod APK.
              • -
              • Make sure that you have a good internet connection and enough storage space on your device.Make sure that you have the latest version of Egg NS Mod APK and update it if necessary.
              • -
              • Make sure that the game files are compatible and not corrupted or damaged.
              • -
              • Make sure that you have configured the settings correctly and according to your device and game.
              • -
              • Make sure that you have closed any background apps or processes that may interfere with the emulator.
              • -
              • Restart your device and the emulator and try again.
              • -
              • Contact the developer or the modder for more help and support.
              • -
              -

              Where can I get more information and support for Egg NS Mod APK?

              -

              If you want to get more information and support for Egg NS Mod APK, you can visit these sources:

              -
                -
              • The official website of Egg NS Emulator: https://www.eggns.com/
              • -
              • The official Facebook page of Egg NS Emulator: https://www.facebook.com/EggNS-Emulator-103720781647147
              • -
              • The official YouTube channel of Egg NS Emulator: https://www.youtube.com/channel/UCm0g7wWl6Z1yX9x8f1zGv0w
              • -
              • The official Discord server of Egg NS Emulator: https://discord.gg/9QF6aZk
              • -
              • The official Reddit community of Egg NS Emulator: https://www.reddit.com/r/EggNS/
              • -
              • The official Telegram group of Egg NS Emulator: https://t.me/EggNS_Emulator
              • -
              -

              Conclusion

              -

              Egg NS Mod APK is a great way to play Nintendo Switch games on your Android device. It offers all the features of the original emulator for free and without any limitations or restrictions. You can play hundreds of Switch games with full FPS, touch and controller support, easy interface, save game progress, change language, access VIP center, and more. You can download and install Egg NS Mod APK on your device by following the steps in this article. You can also use the tips and solutions in this article to use Egg NS Mod APK properly and fix any issues or problems. If you have any questions or feedback, you can contact the developer or the modder for more help and support.

              -

              We hope that this article has helped you learn more about Egg NS Mod APK and how to use it. If you liked this article, please share it with your friends and family who love Nintendo Switch games. Thank you for reading and happy gaming!

              -

              Frequently Asked Questions

              -

              Here are some of the most frequently asked questions about this article and their answers:

              -

              What is the purpose of this article?

              -

              The purpose of this article is to provide information and guidance on how to download, install, and use Egg NS Mod APK, which is a modified version of Egg NS Emulator that lets you play Nintendo Switch games on your Android device.

              -

              Who is the target audience of this article?

              -

              The target audience of this article is anyone who loves Nintendo Switch games and wants to play them on their Android device with the help of Egg NS Mod APK.

              -

              What are the main points of this article?

              -

              The main points of this article are:

              -
                -
              • Egg NS Mod APK is a modified version of Egg NS Emulator that offers all the features of the original emulator for free and without any limitations or restrictions.
              • -
              • Egg NS Mod APK supports a wide variety of Switch games from different genres and categories.
              • -
              • Egg NS Mod APK delivers smooth and stable performance for most Switch games with full FPS, touch and controller support, easy interface, save game progress, change language, access VIP center, and more.
              • -
              • Egg NS Mod APK can be downloaded and installed on your Android device by following the steps in this article.
              • -
              • Egg NS Mod APK can be used properly and fixed any issues or problems by following the tips and solutions in this article.
              • -
              • Egg NS Mod APK can be contacted for more information and support by visiting the sources in this article.
              • -
              -

              How long is this article?

              -

              This article is about 500 words long.

              -

              What is the custom message at the end of this article?

              -

              The custom message at the end of this article is "

              401be4b1e0
              -
              -
              \ No newline at end of file diff --git a/spaces/timpal0l/chat-ui/src/lib/types/Message.ts b/spaces/timpal0l/chat-ui/src/lib/types/Message.ts deleted file mode 100644 index c3b710810a2014eaceceee2c1aefbd4bfbf97e3f..0000000000000000000000000000000000000000 --- a/spaces/timpal0l/chat-ui/src/lib/types/Message.ts +++ /dev/null @@ -1,4 +0,0 @@ -export interface Message { - from: "user" | "assistant"; - content: string; -} diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Audiolabel60withcracktorrent.md b/spaces/tioseFevbu/cartoon-converter/scripts/Audiolabel60withcracktorrent.md deleted file mode 100644 index ae4bc056893a23a40cee6fa5742636e684e8cd23..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Audiolabel60withcracktorrent.md +++ /dev/null @@ -1,32 +0,0 @@ - -

              How to Download AudioLabel 6.0 with Crack Torrent for Free

              -

              If you are looking for a software that can help you design and print custom CD, DVD, Blu-ray, and LightScribe labels, you might be interested in AudioLabel 6.0. This software is easy to use and has a lot of features, such as importing album information, adding images and photos, editing text, and supporting various label paper and printers. However, AudioLabel 6.0 is not a free software and you need to pay $29.95 to get the full version.

              -

              Fortunately, there is a way to download AudioLabel 6.0 with crack torrent for free. A crack torrent is a file that contains the cracked version of a software and allows you to bypass the registration process and use the software without paying anything. However, downloading crack torrents is illegal and risky, as they may contain viruses, malware, or spyware that can harm your computer or steal your personal information. Therefore, you should be careful and follow these steps to download AudioLabel 6.0 with crack torrent safely:

              -

              audiolabel60withcracktorrent


              Download ○○○ https://urlcod.com/2uHxqP



              -
                -
              1. Find a reliable torrent site that offers AudioLabel 6.0 with crack torrent. You can use the search results below to find some possible sites[^1^] [^2^] [^3^]. However, you should always check the comments and ratings of the torrent before downloading it, as they may indicate the quality and safety of the torrent.
              2. -
              3. Download and install a torrent client that can open and download torrent files. Some popular torrent clients are uTorrent, BitTorrent, qBittorrent, etc. You can find them online for free.
              4. -
              5. Open the torrent file that you downloaded from the torrent site with your torrent client. The torrent client will start downloading the AudioLabel 6.0 with crack torrent to your computer.
              6. -
              7. Once the download is complete, open the folder that contains the AudioLabel 6.0 with crack torrent. You should see a file named AudioLabel.exe or something similar. This is the cracked version of AudioLabel 6.0 that you can use without registration.
              8. -
              9. Double-click on the AudioLabel.exe file to run it. You should see the AudioLabel interface and be able to use all its features for free.
              10. -
              -

              Congratulations! You have successfully downloaded AudioLabel 6.0 with crack torrent for free. However, you should be aware that using cracked software is illegal and unethical, as it violates the copyright of the software developer and deprives them of their income. Moreover, using cracked software may expose you to legal consequences or security risks. Therefore, we recommend that you buy the original version of AudioLabel 6.0 from their official website if you like their product and want to support them.

              - -

              Why Choose AudioLabel 6.0?

              -

              AudioLabel 6.0 is a powerful and versatile software that can help you create professional-looking labels for your CD, DVD, Blu-ray, and LightScribe discs and cases. AudioLabel 6.0 has many features that make it stand out from other label makers, such as:

              -
                -
              • It can automatically import your album information from the internet or your disc and fill out the label for you. You can also edit the information manually or add your own custom fields.
              • -
              • It can support any label paper and any printer, including direct disc printers and LightScribe drives. You can also adjust the dimensions of any label template to fit any non-standard paper.
              • -
              • It has an easy-to-use interface that lets you drag and drop images, photos, backgrounds, and text onto your label. You can also use the smart text editor to format and arrange your text quickly.
              • -
              • It has an art gallery that contains hundreds of images and backgrounds that you can use for your label. You can also use the image search function to find more images on your computer or online.
              • -
              • It can print your label on any disc surface, including printable discs, LightScribe discs, and adhesive labels. You can also print your label on any paper or case insert.
              • -
              -

              With AudioLabel 6.0, you can design and print labels for any type of disc and case, such as music CDs, movie DVDs, data discs, photo discs, wedding discs, etc. You can also save your label as a PDF file or an image file for future use or sharing.

              - -

              How to Buy AudioLabel 6.0?

              -

              If you want to buy AudioLabel 6.0 and enjoy its full features without any limitations or risks, you can visit their official website[^1^] and order it online. The price of AudioLabel 6.0 is $29.95 for a single user license. You can pay with PayPal or credit card and get an instant download link and a registration code via email.

              -

              When you buy AudioLabel 6.0, you will also get free lifetime updates and free technical support. You can also request a refund within 30 days if you are not satisfied with the product.

              -

              -

              AudioLabel 6.0 is a one-time purchase and there are no recurring fees or subscriptions. You can use AudioLabel 6.0 on up to three computers with the same license.

              cec2833e83
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Bf2 Bad Company 2 Download.md b/spaces/tioseFevbu/cartoon-converter/scripts/Bf2 Bad Company 2 Download.md deleted file mode 100644 index 82dd6dadaf7b503fb176efbbd6b333f0f5124ce2..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Bf2 Bad Company 2 Download.md +++ /dev/null @@ -1,27 +0,0 @@ -
              -``` -

              How to Download and Play Battlefield: Bad Company 2 on PC

              -

              Battlefield: Bad Company 2 is a first-person shooter game that offers an immersive and thrilling experience of vehicular combat and squad-based warfare. The game features a single-player campaign that follows the exploits of the B Company squad in various locations around the world, as well as a multiplayer mode that supports up to 32 players and multiple game modes. If you are looking for a way to download and play Battlefield: Bad Company 2 on your PC, here are some steps you can follow:

              -
                -
              1. First, you need to have a Steam account and the Steam client installed on your PC. You can create a Steam account and download the Steam client from https://store.steampowered.com/.
              2. -
              3. Next, you need to purchase Battlefield: Bad Company 2 from the Steam store. You can find the game page at https://store.steampowered.com/app/24960/Battlefield_Bad_Company_2/. The game costs $19.99, but you can also buy it as part of a bundle with other Battlefield games or add-ons.
              4. -
              5. Once you have purchased the game, you can download and install it from your Steam library. The game requires about 15GB of free disk space for the digital download version, or 10GB for the disc version.
              6. -
              7. After the installation is complete, you can launch the game from your Steam library or from your desktop shortcut. You may need to update your video and sound card drivers before playing the game.
              8. -
              9. To play the single-player campaign, you can select "Play Campaign" from the main menu and choose your difficulty level. To play the multiplayer mode, you can select "Play Multiplayer" from the main menu and join or create a server. You can also customize your loadout, unlock weapons and gadgets, and check your stats and achievements.
              10. -
              -

              Battlefield: Bad Company 2 is a game that will keep you entertained for hours with its stunning graphics, realistic physics, destructible environments, and intense action. Whether you prefer to play solo or with friends, you will find something to enjoy in this game. Download it today and join the battlefield!

              -

              bf2 bad company 2 download


              Download Ziphttps://urlcod.com/2uHvpC



              -``` - -``` -

              If you are new to Battlefield: Bad Company 2, you may want to check out some gameplay tips and tricks to improve your skills and enjoy the game more. Here are some of them:

              -
                -
              • Use cover and destruction to your advantage. The game's Frostbite engine allows you to destroy almost anything in the environment, creating new paths and fire points. You can also use cover to hide from enemy fire and heal yourself or your teammates.
              • -
              • Experiment with different weapons and gadgets. The game offers a variety of weapons and gadgets for each class, such as assault rifles, sniper rifles, shotguns, rocket launchers, C4 explosives, motion sensors, defibrillators, and more. You can unlock more weapons and gadgets by earning experience points and completing challenges.
              • -
              • Work as a team and communicate with your squad. The game is designed for squad-based gameplay, where you can join or create a squad of up to four players and cooperate with them in multiplayer modes. You can also communicate with your squad using voice chat or the commo rose system, which allows you to issue commands and requests.
              • -
              • Learn the maps and game modes. The game features 10 multiplayer maps, each with a different layout and theme. You can play on these maps in five game modes: Rush, Conquest, Squad Rush, Squad Deathmatch, and Onslaught. Each game mode has its own objectives and strategies, so you should familiarize yourself with them before jumping into a match.
              • -
              -

              Battlefield: Bad Company 2 is a game that will challenge you and reward you for your creativity and teamwork. Whether you prefer to play solo or with friends, you will find something to enjoy in this game. Download it today and join the battlefield!

              -```

              81aa517590
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Cisco Vwlc Keygen [UPD].md b/spaces/tioseFevbu/cartoon-converter/scripts/Cisco Vwlc Keygen [UPD].md deleted file mode 100644 index 071bc7927e1d292a5249a66a967d14df1ebef9ca..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Cisco Vwlc Keygen [UPD].md +++ /dev/null @@ -1,107 +0,0 @@ - -

              Cisco Vwlc Keygen: What Is It and How to Use It

              -

              If you are looking for a way to deploy and manage a wireless network without investing in physical hardware, you might be interested in Cisco Virtual Wireless LAN Controller (VWLC). Cisco VWLC is a software-based solution that allows you to run a wireless controller on a virtual machine, such as VMware or other platforms. With Cisco VWLC, you can control and manage multiple access points (APs) across your network, as well as configure and monitor wireless settings, policies, and security.

              -

              However, before you can use Cisco VWLC, you need to activate a license that determines how many APs you can support. Depending on your needs, you can obtain an evaluation or permanent license from Cisco. But what if you don't want to pay for a license or go through the hassle of obtaining one? This is where a keygen comes in handy.

              -

              Cisco Vwlc Keygen


              Download Zip ••• https://urlcod.com/2uHvjA



              -

              A keygen is a software tool that generates a serial number or activation code for a specific software product. By using a keygen, you can bypass the license verification process and use the software without any restrictions. However, using a keygen also comes with some risks, such as malware infection, legal issues, or software malfunction.

              -

              In this article, we will explain what Cisco VWLC Keygen is and how to use it. We will also cover how to download and deploy Cisco VWLC OVA, how to activate and manage Cisco VWLC license, and how to join APs to Cisco VWLC. By the end of this article, you will have a better understanding of how to use Cisco VWLC Keygen and whether it is worth it or not.

              -

              How to Download and Deploy Cisco VWLC OVA

              -

              The first step to use Cisco VWLC Keygen is to download and deploy Cisco VWLC OVA. OVA stands for Open Virtualization Appliance, which is a file format that contains a pre-configured virtual machine image. By using an OVA file, you can easily install and run a virtual machine on your preferred platform.

              -

              To download Cisco VWLC OVA, you need to visit the official website of Cisco and find the latest version of Cisco VWLC software. As of this writing, the latest version is 8.10.151.0. You will need a valid Cisco account to access the download page. Alternatively, you can search for Cisco VWLC OVA on other websites or torrent sites, but be careful of fake or malicious files.

              -

              Once you have downloaded the OVA file, you need to deploy it on your virtualization platform. The process may vary depending on your platform, but here are some general steps:

              -
                -
              • Launch your virtualization platform, such as VMware Workstation or ESXi.
              • -
              • Select File -> Deploy OVF Template or similar option.
              • -
              • Browse to your downloaded OVA file and select it.
              • -
              • Follow the wizard instructions and customize the settings as needed.
              • Wait for the deployment to finish and power on the virtual machine.
              • -
              -

              Congratulations, you have successfully deployed Cisco VWLC OVA on your virtualization platform. Now, you need to configure the basic settings of Cisco VWLC.

              -

              -

              How to Configure the Basic Settings of Cisco VWLC

              -

              After you have deployed Cisco VWLC OVA, you need to configure the basic settings of Cisco VWLC, such as hostname, IP address, admin password, and NTP server. To do this, you need to access the console of Cisco VWLC and follow the setup wizard. Here are the steps:

              -
                -
              • Open the console of Cisco VWLC on your virtualization platform.
              • -
              • Press any key to stop the auto-boot process.
              • -
              • Type setup and press Enter to start the setup wizard.
              • -
              • Enter the hostname for Cisco VWLC and press Enter.
              • -
              • Enter the IP address, subnet mask, and default gateway for Cisco VWLC and press Enter.
              • -
              • Enter the IP address of the primary DNS server and press Enter.
              • -
              • Enter the domain name for Cisco VWLC and press Enter.
              • -
              • Enter the admin username and password for Cisco VWLC and press Enter.
              • -
              • Enter the IP address of the NTP server and press Enter.
              • -
              • Review the summary of your configuration and type save and press Enter to save it.
              • -
              • Type reboot and press Enter to reboot Cisco VWLC.
              • -
              -

              You have completed the basic configuration of Cisco VWLC. Now, you can access the web interface of Cisco VWLC by using a browser and entering the IP address of Cisco VWLC. You will need to enter the admin username and password that you set up earlier. From there, you can further configure and manage your wireless network.

              -

              How to Activate and Manage Cisco VWLC License

              -

              The next step to use Cisco VWLC Keygen is to activate and manage Cisco VWLC license. As mentioned before, you need a license to use Cisco VWLC and support a certain number of APs. There are two types of licenses that you can use: evaluation or permanent.

              -

              An evaluation license is a temporary license that allows you to use Cisco VWLC for a limited period of time, usually 60 days. You can obtain an evaluation license from Cisco by registering your product and requesting a license file. You will receive an email with a link to download the license file. You can then upload the license file to Cisco VWLC and activate it.

              -

              A permanent license is a permanent license that allows you to use Cisco VWLC indefinitely. You can obtain a permanent license from Cisco by purchasing it from an authorized reseller or partner. You will receive an email with a Product Authorization Key (PAK) that you need to register on Cisco website. You will then receive another email with a link to download the license file. You can then upload the license file to Cisco VWLC and activate it.

              -

              To activate a license on Cisco VWLC, you need to follow these steps:

              -
                -
              • Login to the web interface of Cisco VWLC by using a browser and entering the IP address of Cisco VWLC.
              • -
              • Navigate to Management -> Software Activation -> Licenses.
              • -
              • Select Add License from the drop-down menu.
              • -
              • Browse to your downloaded license file and select it.
              • -
              • Select Install License.
              • -
              • Wait for the installation to complete and verify that your license is active.
              • -
              -

              You have successfully activated a license on Cisco VWLC. You can now see how many APs you can support and manage them accordingly. You can also view and modify your license settings by navigating to Management -> Software Activation -> Licenses.

              -

              However, if you don't want to obtain a license from Cisco, you can use a keygen instead. A keygen is a software tool that generates a serial number or activation code for a specific software product. By using a keygen, you can bypass the license verification process and use the software without any restrictions.

              -

              To use a keygen for Cisco VWLC, you need to follow these steps:

              -
                -
              • Download a keygen for Cisco VWLC from a reliable source or create your own keygen by using a programming language or tool.
              • -
              • Run the keygen on your computer and enter some information, such as product name, version, platform, etc.
              • -
              • Click on Generate or similar button to generate a serial number or activation code for Cisco VWLC.
              • -
              • Login to the web interface of Cisco VWLC by using a browser and entering the IP address of Cisco VWLC.
              • -
              • Navigate to Management -> Software Activation -> Licenses.
              • -
              • Select Add License from the drop-down menu.
              • -
              • Enter the serial number or activation code that you generated from the keygen and select Install License.
              • -
              • Wait for the installation to complete and verify that your license is active.
              • -
              -

              You have successfully activated a license on Cisco VWLC by using a keygen. You can now use Cisco VWLC without any limitations or expiration date. However, you should be aware of the risks and consequences of using a keygen, such as:

              -
                -
              • You may violate the terms and conditions of Cisco and face legal actions or penalties.
              • -
              • You may expose your computer or network to malware or viruses that may be embedded in the keygen or the license file.
              • -
              • You may experience software errors or malfunctions that may affect your wireless network performance or security.
              • -
              • You may not receive any updates or support from Cisco or its partners.
              • -
              -

              Therefore, you should use a keygen at your own risk and discretion. We do not recommend or endorse the use of a keygen for Cisco VWLC or any other software product.

              -

              How to Join Access Points to Cisco VWLC

              -

              The final step to use Cisco VWLC Keygen is to join access points to Cisco VWLC. Access points are devices that provide wireless connectivity to your network devices, such as laptops, smartphones, tablets, etc. By joining access points to Cisco VWLC, you can control and manage them centrally, as well as configure and monitor their wireless settings, policies, and security.

              -

              To join access points to Cisco VWLC, you need to meet some requirements and limitations, such as:

              -
                -
              • Your access points must be compatible with Cisco VWLC. You can check the compatibility list on Cisco website or documentation.
              • -
              • Your access points must be configured in flexconnect mode or other supported modes. Flexconnect mode allows access points to operate independently from Cisco VWLC in case of network outage or failure.
              • -
              • Your access points must have a valid IP address and DNS resolution. You can use DHCP or static IP address assignment for your access points.
              • -
              • Your access points must be able to reach Cisco VWLC over the network. You can use Layer 2 or Layer 3 connectivity for your access points.
              • -
              -

              To configure access points in flexconnect mode and join them to Cisco VWLC, you need to follow these steps:

              -
                -
              • Connect your access point to a power source and a network switch.
              • -
              • Open a browser on your computer and enter the default IP address of your access point, which is usually 192.168.1.1.
              • -
              • Login to the web interface of your access point by using the default username and password, which are usually admin and admin.
              • -
              • Navigate to Wireless -> Basic Settings and select FlexConnect from the AP Mode drop-down menu.
              • -
              • Enter the IP address of Cisco VWLC in the Primary Controller field and click Apply.
              • -
              • Wait for the access point to reboot and join Cisco VWLC.
              • -
              -

              You have successfully joined an access point to Cisco VWLC in flexconnect mode. You can now see your access point on the web interface of Cisco VWLC by navigating to Wireless -> Access Points -> All APs. From there, you can further configure and manage your access point settings, such as SSID, security, radio, etc.

              -

              Conclusion

              -

              In this article, we have explained what Cisco VWLC Keygen is and how to use it. We have also covered how to download and deploy Cisco VWLC OVA, how to configure the basic settings of Cisco VWLC, how to activate and manage Cisco VWLC license, and how to join access points to Cisco VWLC. By following these steps, you can use Cisco VWLC Keygen to deploy and manage a wireless network without investing in physical hardware or paying for a license. However, you should also be aware of the risks and consequences of using a keygen, such as malware infection, legal issues, or software malfunction. Therefore, you should use a keygen at your own risk and discretion. We do not recommend or endorse the use of a keygen for Cisco VWLC or any other software product.

              -

              We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

              -

              FAQs

              -

              Here are some frequently asked questions about Cisco VWLC Keygen:

              -
                -
              1. What is the difference between Cisco VWLC and physical WLC?
              2. -

                Cisco VWLC is a software-based solution that allows you to run a wireless controller on a virtual machine, such as VMware or other platforms. Cisco VWLC can support up to 200 APs and 6000 clients. Physical WLC is a hardware-based solution that allows you to run a wireless controller on a dedicated device, such as Cisco 2504 or 5508. Physical WLC can support up to 6000 APs and 64000 clients.

                -
              3. How can I update Cisco VWLC software?
              4. -

                To update Cisco VWLC software, you need to download the latest version of Cisco VWLC software from Cisco website or other sources. You will need a valid Cisco account to access the download page. Alternatively, you can search for Cisco VWLC software on other websites or torrent sites, but be careful of fake or malicious files. Once you have downloaded the software file, you need to upload it to Cisco VWLC by using the web interface or the console. You can then install the software update and reboot Cisco VWLC.

                -
              5. How can I backup and restore Cisco VWLC configuration?
              6. -

                To backup and restore Cisco VWLC configuration, you need to use the web interface or the console of Cisco VWLC. You can export and import the configuration file by using the Management -> Configuration -> Backup/Restore option on the web interface. You can also use the copy and restore commands on the console. You should backup your configuration file regularly and store it in a safe location.

                -
              7. How can I reset Cisco VWLC to factory default settings?
              8. -

                To reset Cisco VWLC to factory default settings, you need to use the console of Cisco VWLC. You can access the console by using your virtualization platform or a terminal emulator program. You need to reboot Cisco VWLC and press any key to stop the auto-boot process. Then, you need to type restore-default and press Enter. You will be prompted to confirm your action and enter the admin password. After that, Cisco VWLC will be reset to factory default settings and reboot.

                -
              9. How can I troubleshoot Cisco VWLC issues?
              10. -

                To troubleshoot Cisco VWLC issues, you need to use the web interface or the console of Cisco VWLC. You can check the status and logs of Cisco VWLC by using the Monitor -> Summary option on the web interface. You can also use various commands on the console, such as show, debug, ping, traceroute, etc. You should also refer to the official documentation and support resources from Cisco or its partners.

                -

              b2dd77e56b
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Josiam Pakrathu Software Free Download In Tamil TOP.md b/spaces/tioseFevbu/cartoon-converter/scripts/Josiam Pakrathu Software Free Download In Tamil TOP.md deleted file mode 100644 index 40c59b2b42e83ae51772ef708c3692be6df4563f..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Josiam Pakrathu Software Free Download In Tamil TOP.md +++ /dev/null @@ -1,24 +0,0 @@ - -Hello, this is Bing. I can help you with writing a title and an article with SEO optimization and HTML formatting for the keyword "Josiam Pakrathu Software Free Download In Tamil". Here is a possible title and article for you: - -

              How to Download Josiam Pakrathu Software for Free in Tamil

              -

              Josiam Pakrathu is a popular Tamil astrology software that can help you generate accurate horoscopes, match compatibility, and predictions based on your date of birth, time of birth, and place of birth. It is ideal for astrologers and astrology students who want to learn more about the ancient science of astrology.

              -

              Josiam Pakrathu Software Free Download In Tamil


              DOWNLOAD ►►► https://urlcod.com/2uHvr0



              -

              However, Josiam Pakrathu software is not free and you need to pay a certain amount to download it from the official website. But what if you want to try it out for free before buying it? Is there a way to download Josiam Pakrathu software for free in Tamil?

              -

              The answer is yes, there is a way to download Josiam Pakrathu software for free in Tamil. But you need to be careful and follow some steps to avoid any malware or virus infection on your computer. Here are the steps to download Josiam Pakrathu software for free in Tamil:

              -
                -
              1. Go to a trusted website that offers free downloads of Josiam Pakrathu software in Tamil. For example, you can go to Heaven's Child[^1^], Lawcate.com[^2^], Sway.office.com[^3^] [^4^], or SoundCloud[^5^]. These websites have links to download Josiam Pakrathu software for free in Tamil.
              2. -
              3. Click on the download link and choose a location on your computer where you want to save the file. The file name should be something like Josiam_Pakrathu_Tamil_Astrology_Software.exe or similar.
              4. -
              5. Before opening the file, scan it with your antivirus software to make sure it is safe and does not contain any malware or virus. If your antivirus software detects any threat, delete the file immediately and do not open it.
              6. -
              7. If the file is clean, double-click on it and follow the installation instructions. You may need to enter some details like your name, email address, and phone number to complete the installation.
              8. -
              9. Once the installation is done, you can launch Josiam Pakrathu software from your desktop or start menu. You can then enter your birth details and generate your horoscope, match compatibility, and predictions in Tamil.
              10. -
              -

              Congratulations! You have successfully downloaded Josiam Pakrathu software for free in Tamil. Enjoy using this software and learn more about your destiny and personality.

              -

              -

              Note: This article is for informational purposes only and does not endorse or promote any illegal or unethical activity. Downloading Josiam Pakrathu software for free in Tamil may violate the terms and conditions of the original software provider and may result in legal action. We recommend that you buy the original software from the official website if you like it and want to support the developers.

              Okay, I will write a few more paragraphs for you. Here they are: - -

              Josiam Pakrathu software is based on the principles of Vedic astrology, which is an ancient system of astrology that originated in India. Vedic astrology uses the position of the planets, stars, and constellations at the time of your birth to reveal your personality, traits, strengths, weaknesses, opportunities, and challenges in life.

              -

              With Josiam Pakrathu software, you can generate your birth chart or jathagam kattam in Tamil, which is a graphical representation of the planetary positions at your birth time. You can also get detailed reports on various aspects of your life such as career, education, health, wealth, marriage, family, children, etc. You can also get predictions for your future based on the dasa and bhukti periods, which are the planetary cycles that influence your life events.

              -

              Another feature of Josiam Pakrathu software is the jathaga porutham or compatibility matching in Tamil. This feature helps you find your ideal partner based on the compatibility of your horoscopes. You can check the compatibility of your horoscopes based on various factors such as nakshatra or star compatibility, rasi or zodiac sign compatibility, mangal dosha or kuja dosha compatibility, etc. You can also get suggestions for remedies and solutions to overcome any obstacles or problems in your relationship.

              7196e7f11a
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/K-Pop VR Torrent Download !!TOP!!.md b/spaces/tioseFevbu/cartoon-converter/scripts/K-Pop VR Torrent Download !!TOP!!.md deleted file mode 100644 index e0bea19d3cdcfa384e73d79aa501fdb03c6d4b76..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/K-Pop VR Torrent Download !!TOP!!.md +++ /dev/null @@ -1,34 +0,0 @@ - -

              K-Pop VR Torrent Download: How to Enjoy the Best of Korean Music in Virtual Reality

              - -

              If you are a fan of K-pop, you might have wondered what it would be like to watch your favorite idols perform live in front of you. Well, thanks to the advancement of technology, you can now experience that with K-pop VR torrent download.

              -

              K-Pop VR Torrent Download


              Download Zip ✓✓✓ https://urlcod.com/2uHxcv



              - -

              K-pop VR is a term that refers to virtual reality videos of K-pop concerts, music videos, dance practices, and other scenes that feature Korean pop stars. These videos are usually filmed in 3D 180° or 360° format, which means you can look around and feel like you are actually there.

              - -

              To watch these videos, you need a VR headset, such as Oculus Quest 2, Samsung Gear VR, Google Cardboard, or PlayStation VR. You also need a compatible smartphone or computer that can play VR videos. And of course, you need to download the K-pop VR torrents from the internet.

              - -

              But where can you find these torrents? And how can you download them safely and legally? In this article, we will answer these questions and give you some tips on how to enjoy the best of K-pop in virtual reality.

              - -

              Where to Find K-pop VR Torrents

              - -

              There are many websites that offer K-pop VR torrents for free or for a fee. Some of them are official sources, such as YouTube or VENTA X, which are authorized by the K-pop agencies or artists to distribute their VR content. Others are unofficial sources, such as Reddit or torrent sites, which are uploaded by fans or pirates without permission.

              - -

              The advantage of using official sources is that you can be sure that the quality and safety of the videos are guaranteed. You can also support the K-pop industry by watching their authorized content. However, the downside is that you might have to pay for some videos or subscribe to a service. You might also have limited choices or availability depending on your region or device.

              -

              - -

              The advantage of using unofficial sources is that you can access a wider variety of K-pop VR videos for free. You can also find some rare or exclusive content that is not available elsewhere. However, the downside is that you might encounter some risks, such as malware, viruses, legal issues, or low-quality videos. You might also violate the copyright laws or the ethical standards of the K-pop fandom by downloading unauthorized content.

              - -

              Therefore, before you decide where to download your K-pop VR torrents, you should weigh the pros and cons carefully and choose wisely. Here are some of the best websites that offer K-pop VR torrents in 2023:

              - -
                -
              • 3D 180° 360° VR KPOP MV Dance Concert - YouTube: This is a playlist of over 200 K-pop VR videos on YouTube, featuring groups like Dreamcatcher, Ghost9, DKB, ICHILLIN', and more. You can watch them for free on your smartphone or computer with a VR headset.
              • -
              • Kpop download sites : r/Piracy - Reddit: This is a Reddit post that lists some websites where you can download K-pop music and videos for free. Some of them also offer K-pop VR torrents, such as ilkpop.com, matikiri.net, and wallkpop.com. However, be careful of the ads and pop-ups that might contain malware or viruses.
              • -
              • Best Korean Torrent Sites in 2023 | VPNpro: This is an article that reviews some of the best torrent sites for downloading Korean content in 2023. Some of them also have K-pop VR torrents, such as Nyaa.si, TorrentKim3.net, and TorrentLeech.org. However, be aware of the legal risks and ethical issues that might arise from using these sites.
              • -
              - -

              How to Download K-pop VR Torrents Safely and Legally

              - -

              Once you have found your desired K-pop VR torrents from the websites above, you need to download them to your device using a torrent client, such as BitTorrent or uTorrent. However, before

              cec2833e83
              -
              -
              \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/latin1prober.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/latin1prober.py deleted file mode 100644 index 241f14ab914b26e0ec4f3dec7e734b72c5b43810..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/latin1prober.py +++ /dev/null @@ -1,145 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 2001 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# Shy Shalom - original C code -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .charsetprober import CharSetProber -from .enums import ProbingState - -FREQ_CAT_NUM = 4 - -UDF = 0 # undefined -OTH = 1 # other -ASC = 2 # ascii capital letter -ASS = 3 # ascii small letter -ACV = 4 # accent capital vowel -ACO = 5 # accent capital other -ASV = 6 # accent small vowel -ASO = 7 # accent small other -CLASS_NUM = 8 # total classes - -# fmt: off -Latin1_CharToClass = ( - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F - OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47 - ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F - ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57 - ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F - OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67 - ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F - ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77 - ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F - OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87 - OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F - UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97 - OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF - ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7 - ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF - ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7 - ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF - ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7 - ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF - ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7 - ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF -) - -# 0 : illegal -# 1 : very unlikely -# 2 : normal -# 3 : very likely -Latin1ClassModel = ( -# UDF OTH ASC ASS ACV ACO ASV ASO - 0, 0, 0, 0, 0, 0, 0, 0, # UDF - 0, 3, 3, 3, 3, 3, 3, 3, # OTH - 0, 3, 3, 3, 3, 3, 3, 3, # ASC - 0, 3, 3, 3, 1, 1, 3, 3, # ASS - 0, 3, 3, 3, 1, 2, 1, 2, # ACV - 0, 3, 3, 3, 3, 3, 3, 3, # ACO - 0, 3, 1, 3, 1, 1, 1, 3, # ASV - 0, 3, 1, 3, 1, 1, 3, 3, # ASO -) -# fmt: on - - -class Latin1Prober(CharSetProber): - def __init__(self): - super().__init__() - self._last_char_class = None - self._freq_counter = None - self.reset() - - def reset(self): - self._last_char_class = OTH - self._freq_counter = [0] * FREQ_CAT_NUM - super().reset() - - @property - def charset_name(self): - return "ISO-8859-1" - - @property - def language(self): - return "" - - def feed(self, byte_str): - byte_str = self.remove_xml_tags(byte_str) - for c in byte_str: - char_class = Latin1_CharToClass[c] - freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM) + char_class] - if freq == 0: - self._state = ProbingState.NOT_ME - break - self._freq_counter[freq] += 1 - self._last_char_class = char_class - - return self.state - - def get_confidence(self): - if self.state == ProbingState.NOT_ME: - return 0.01 - - total = sum(self._freq_counter) - confidence = ( - 0.0 - if total < 0.01 - else (self._freq_counter[3] - self._freq_counter[1] * 20.0) / total - ) - confidence = max(confidence, 0.0) - # lower the confidence of latin1 so that other more accurate - # detector can take priority. - confidence *= 0.73 - return confidence diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/live.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/live.py deleted file mode 100644 index e635fe5c97ee7f3d20222566444cf0da63b82526..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/live.py +++ /dev/null @@ -1,373 +0,0 @@ -import sys -from threading import Event, RLock, Thread -from types import TracebackType -from typing import IO, Any, Callable, List, Optional, TextIO, Type, cast - -from . import get_console -from .console import Console, ConsoleRenderable, RenderableType, RenderHook -from .control import Control -from .file_proxy import FileProxy -from .jupyter import JupyterMixin -from .live_render import LiveRender, VerticalOverflowMethod -from .screen import Screen -from .text import Text - - -class _RefreshThread(Thread): - """A thread that calls refresh() at regular intervals.""" - - def __init__(self, live: "Live", refresh_per_second: float) -> None: - self.live = live - self.refresh_per_second = refresh_per_second - self.done = Event() - super().__init__(daemon=True) - - def stop(self) -> None: - self.done.set() - - def run(self) -> None: - while not self.done.wait(1 / self.refresh_per_second): - with self.live._lock: - if not self.done.is_set(): - self.live.refresh() - - -class Live(JupyterMixin, RenderHook): - """Renders an auto-updating live display of any given renderable. - - Args: - renderable (RenderableType, optional): The renderable to live display. Defaults to displaying nothing. - console (Console, optional): Optional Console instance. Default will an internal Console instance writing to stdout. - screen (bool, optional): Enable alternate screen mode. Defaults to False. - auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()` or `update()` with refresh flag. Defaults to True - refresh_per_second (float, optional): Number of times per second to refresh the live display. Defaults to 4. - transient (bool, optional): Clear the renderable on exit (has no effect when screen=True). Defaults to False. - redirect_stdout (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True. - redirect_stderr (bool, optional): Enable redirection of stderr. Defaults to True. - vertical_overflow (VerticalOverflowMethod, optional): How to handle renderable when it is too tall for the console. Defaults to "ellipsis". - get_renderable (Callable[[], RenderableType], optional): Optional callable to get renderable. Defaults to None. - """ - - def __init__( - self, - renderable: Optional[RenderableType] = None, - *, - console: Optional[Console] = None, - screen: bool = False, - auto_refresh: bool = True, - refresh_per_second: float = 4, - transient: bool = False, - redirect_stdout: bool = True, - redirect_stderr: bool = True, - vertical_overflow: VerticalOverflowMethod = "ellipsis", - get_renderable: Optional[Callable[[], RenderableType]] = None, - ) -> None: - assert refresh_per_second > 0, "refresh_per_second must be > 0" - self._renderable = renderable - self.console = console if console is not None else get_console() - self._screen = screen - self._alt_screen = False - - self._redirect_stdout = redirect_stdout - self._redirect_stderr = redirect_stderr - self._restore_stdout: Optional[IO[str]] = None - self._restore_stderr: Optional[IO[str]] = None - - self._lock = RLock() - self.ipy_widget: Optional[Any] = None - self.auto_refresh = auto_refresh - self._started: bool = False - self.transient = True if screen else transient - - self._refresh_thread: Optional[_RefreshThread] = None - self.refresh_per_second = refresh_per_second - - self.vertical_overflow = vertical_overflow - self._get_renderable = get_renderable - self._live_render = LiveRender( - self.get_renderable(), vertical_overflow=vertical_overflow - ) - - @property - def is_started(self) -> bool: - """Check if live display has been started.""" - return self._started - - def get_renderable(self) -> RenderableType: - renderable = ( - self._get_renderable() - if self._get_renderable is not None - else self._renderable - ) - return renderable or "" - - def start(self, refresh: bool = False) -> None: - """Start live rendering display. - - Args: - refresh (bool, optional): Also refresh. Defaults to False. - """ - with self._lock: - if self._started: - return - self.console.set_live(self) - self._started = True - if self._screen: - self._alt_screen = self.console.set_alt_screen(True) - self.console.show_cursor(False) - self._enable_redirect_io() - self.console.push_render_hook(self) - if refresh: - try: - self.refresh() - except Exception: - # If refresh fails, we want to stop the redirection of sys.stderr, - # so the error stacktrace is properly displayed in the terminal. - # (or, if the code that calls Rich captures the exception and wants to display something, - # let this be displayed in the terminal). - self.stop() - raise - if self.auto_refresh: - self._refresh_thread = _RefreshThread(self, self.refresh_per_second) - self._refresh_thread.start() - - def stop(self) -> None: - """Stop live rendering display.""" - with self._lock: - if not self._started: - return - self.console.clear_live() - self._started = False - - if self.auto_refresh and self._refresh_thread is not None: - self._refresh_thread.stop() - self._refresh_thread = None - # allow it to fully render on the last even if overflow - self.vertical_overflow = "visible" - with self.console: - try: - if not self._alt_screen and not self.console.is_jupyter: - self.refresh() - finally: - self._disable_redirect_io() - self.console.pop_render_hook() - if not self._alt_screen and self.console.is_terminal: - self.console.line() - self.console.show_cursor(True) - if self._alt_screen: - self.console.set_alt_screen(False) - - if self.transient and not self._alt_screen: - self.console.control(self._live_render.restore_cursor()) - if self.ipy_widget is not None and self.transient: - self.ipy_widget.close() # pragma: no cover - - def __enter__(self) -> "Live": - self.start(refresh=self._renderable is not None) - return self - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - self.stop() - - def _enable_redirect_io(self) -> None: - """Enable redirecting of stdout / stderr.""" - if self.console.is_terminal or self.console.is_jupyter: - if self._redirect_stdout and not isinstance(sys.stdout, FileProxy): - self._restore_stdout = sys.stdout - sys.stdout = cast("TextIO", FileProxy(self.console, sys.stdout)) - if self._redirect_stderr and not isinstance(sys.stderr, FileProxy): - self._restore_stderr = sys.stderr - sys.stderr = cast("TextIO", FileProxy(self.console, sys.stderr)) - - def _disable_redirect_io(self) -> None: - """Disable redirecting of stdout / stderr.""" - if self._restore_stdout: - sys.stdout = cast("TextIO", self._restore_stdout) - self._restore_stdout = None - if self._restore_stderr: - sys.stderr = cast("TextIO", self._restore_stderr) - self._restore_stderr = None - - @property - def renderable(self) -> RenderableType: - """Get the renderable that is being displayed - - Returns: - RenderableType: Displayed renderable. - """ - renderable = self.get_renderable() - return Screen(renderable) if self._alt_screen else renderable - - def update(self, renderable: RenderableType, *, refresh: bool = False) -> None: - """Update the renderable that is being displayed - - Args: - renderable (RenderableType): New renderable to use. - refresh (bool, optional): Refresh the display. Defaults to False. - """ - with self._lock: - self._renderable = renderable - if refresh: - self.refresh() - - def refresh(self) -> None: - """Update the display of the Live Render.""" - with self._lock: - self._live_render.set_renderable(self.renderable) - if self.console.is_jupyter: # pragma: no cover - try: - from IPython.display import display - from ipywidgets import Output - except ImportError: - import warnings - - warnings.warn('install "ipywidgets" for Jupyter support') - else: - if self.ipy_widget is None: - self.ipy_widget = Output() - display(self.ipy_widget) - - with self.ipy_widget: - self.ipy_widget.clear_output(wait=True) - self.console.print(self._live_render.renderable) - elif self.console.is_terminal and not self.console.is_dumb_terminal: - with self.console: - self.console.print(Control()) - elif ( - not self._started and not self.transient - ): # if it is finished allow files or dumb-terminals to see final result - with self.console: - self.console.print(Control()) - - def process_renderables( - self, renderables: List[ConsoleRenderable] - ) -> List[ConsoleRenderable]: - """Process renderables to restore cursor and display progress.""" - self._live_render.vertical_overflow = self.vertical_overflow - if self.console.is_interactive: - # lock needs acquiring as user can modify live_render renderable at any time unlike in Progress. - with self._lock: - reset = ( - Control.home() - if self._alt_screen - else self._live_render.position_cursor() - ) - renderables = [reset, *renderables, self._live_render] - elif ( - not self._started and not self.transient - ): # if it is finished render the final output for files or dumb_terminals - renderables = [*renderables, self._live_render] - - return renderables - - -if __name__ == "__main__": # pragma: no cover - import random - import time - from itertools import cycle - from typing import Dict, List, Tuple - - from .align import Align - from .console import Console - from .live import Live as Live - from .panel import Panel - from .rule import Rule - from .syntax import Syntax - from .table import Table - - console = Console() - - syntax = Syntax( - '''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: - """Iterate and generate a tuple with a flag for last value.""" - iter_values = iter(values) - try: - previous_value = next(iter_values) - except StopIteration: - return - for value in iter_values: - yield False, previous_value - previous_value = value - yield True, previous_value''', - "python", - line_numbers=True, - ) - - table = Table("foo", "bar", "baz") - table.add_row("1", "2", "3") - - progress_renderables = [ - "You can make the terminal shorter and taller to see the live table hide" - "Text may be printed while the progress bars are rendering.", - Panel("In fact, [i]any[/i] renderable will work"), - "Such as [magenta]tables[/]...", - table, - "Pretty printed structures...", - {"type": "example", "text": "Pretty printed"}, - "Syntax...", - syntax, - Rule("Give it a try!"), - ] - - examples = cycle(progress_renderables) - - exchanges = [ - "SGD", - "MYR", - "EUR", - "USD", - "AUD", - "JPY", - "CNH", - "HKD", - "CAD", - "INR", - "DKK", - "GBP", - "RUB", - "NZD", - "MXN", - "IDR", - "TWD", - "THB", - "VND", - ] - with Live(console=console) as live_table: - exchange_rate_dict: Dict[Tuple[str, str], float] = {} - - for index in range(100): - select_exchange = exchanges[index % len(exchanges)] - - for exchange in exchanges: - if exchange == select_exchange: - continue - time.sleep(0.4) - if random.randint(0, 10) < 1: - console.log(next(examples)) - exchange_rate_dict[(select_exchange, exchange)] = 200 / ( - (random.random() * 320) + 1 - ) - if len(exchange_rate_dict) > len(exchanges) - 1: - exchange_rate_dict.pop(list(exchange_rate_dict.keys())[0]) - table = Table(title="Exchange Rates") - - table.add_column("Source Currency") - table.add_column("Destination Currency") - table.add_column("Exchange Rate") - - for ((source, dest), exchange_rate) in exchange_rate_dict.items(): - table.add_row( - source, - dest, - Text( - f"{exchange_rate:.4f}", - style="red" if exchange_rate < 1.0 else "green", - ), - ) - - live_table.update(Align.center(table)) diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/msvc.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/msvc.py deleted file mode 100644 index 281ea1c2af6b0eb5f02ecc6d115f2d6884be74b5..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/msvc.py +++ /dev/null @@ -1,1805 +0,0 @@ -""" -Improved support for Microsoft Visual C++ compilers. - -Known supported compilers: --------------------------- -Microsoft Visual C++ 9.0: - Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64) - Microsoft Windows SDK 6.1 (x86, x64, ia64) - Microsoft Windows SDK 7.0 (x86, x64, ia64) - -Microsoft Visual C++ 10.0: - Microsoft Windows SDK 7.1 (x86, x64, ia64) - -Microsoft Visual C++ 14.X: - Microsoft Visual C++ Build Tools 2015 (x86, x64, arm) - Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64) - Microsoft Visual Studio Build Tools 2019 (x86, x64, arm, arm64) - -This may also support compilers shipped with compatible Visual Studio versions. -""" - -import json -from io import open -from os import listdir, pathsep -from os.path import join, isfile, isdir, dirname -import sys -import contextlib -import platform -import itertools -import subprocess -import distutils.errors -from setuptools.extern.packaging.version import LegacyVersion -from setuptools.extern.more_itertools import unique_everseen - -from .monkey import get_unpatched - -if platform.system() == 'Windows': - import winreg - from os import environ -else: - # Mock winreg and environ so the module can be imported on this platform. - - class winreg: - HKEY_USERS = None - HKEY_CURRENT_USER = None - HKEY_LOCAL_MACHINE = None - HKEY_CLASSES_ROOT = None - - environ = dict() - -_msvc9_suppress_errors = ( - # msvc9compiler isn't available on some platforms - ImportError, - - # msvc9compiler raises DistutilsPlatformError in some - # environments. See #1118. - distutils.errors.DistutilsPlatformError, -) - -try: - from distutils.msvc9compiler import Reg -except _msvc9_suppress_errors: - pass - - -def msvc9_find_vcvarsall(version): - """ - Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone - compiler build for Python - (VCForPython / Microsoft Visual C++ Compiler for Python 2.7). - - Fall back to original behavior when the standalone compiler is not - available. - - Redirect the path of "vcvarsall.bat". - - Parameters - ---------- - version: float - Required Microsoft Visual C++ version. - - Return - ------ - str - vcvarsall.bat path - """ - vc_base = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f' - key = vc_base % ('', version) - try: - # Per-user installs register the compiler path here - productdir = Reg.get_value(key, "installdir") - except KeyError: - try: - # All-user installs on a 64-bit system register here - key = vc_base % ('Wow6432Node\\', version) - productdir = Reg.get_value(key, "installdir") - except KeyError: - productdir = None - - if productdir: - vcvarsall = join(productdir, "vcvarsall.bat") - if isfile(vcvarsall): - return vcvarsall - - return get_unpatched(msvc9_find_vcvarsall)(version) - - -def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs): - """ - Patched "distutils.msvc9compiler.query_vcvarsall" for support extra - Microsoft Visual C++ 9.0 and 10.0 compilers. - - Set environment without use of "vcvarsall.bat". - - Parameters - ---------- - ver: float - Required Microsoft Visual C++ version. - arch: str - Target architecture. - - Return - ------ - dict - environment - """ - # Try to get environment from vcvarsall.bat (Classical way) - try: - orig = get_unpatched(msvc9_query_vcvarsall) - return orig(ver, arch, *args, **kwargs) - except distutils.errors.DistutilsPlatformError: - # Pass error if Vcvarsall.bat is missing - pass - except ValueError: - # Pass error if environment not set after executing vcvarsall.bat - pass - - # If error, try to set environment directly - try: - return EnvironmentInfo(arch, ver).return_env() - except distutils.errors.DistutilsPlatformError as exc: - _augment_exception(exc, ver, arch) - raise - - -def _msvc14_find_vc2015(): - """Python 3.8 "distutils/_msvccompiler.py" backport""" - try: - key = winreg.OpenKey( - winreg.HKEY_LOCAL_MACHINE, - r"Software\Microsoft\VisualStudio\SxS\VC7", - 0, - winreg.KEY_READ | winreg.KEY_WOW64_32KEY - ) - except OSError: - return None, None - - best_version = 0 - best_dir = None - with key: - for i in itertools.count(): - try: - v, vc_dir, vt = winreg.EnumValue(key, i) - except OSError: - break - if v and vt == winreg.REG_SZ and isdir(vc_dir): - try: - version = int(float(v)) - except (ValueError, TypeError): - continue - if version >= 14 and version > best_version: - best_version, best_dir = version, vc_dir - return best_version, best_dir - - -def _msvc14_find_vc2017(): - """Python 3.8 "distutils/_msvccompiler.py" backport - - Returns "15, path" based on the result of invoking vswhere.exe - If no install is found, returns "None, None" - - The version is returned to avoid unnecessarily changing the function - result. It may be ignored when the path is not None. - - If vswhere.exe is not available, by definition, VS 2017 is not - installed. - """ - root = environ.get("ProgramFiles(x86)") or environ.get("ProgramFiles") - if not root: - return None, None - - try: - path = subprocess.check_output([ - join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"), - "-latest", - "-prerelease", - "-requiresAny", - "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", - "-requires", "Microsoft.VisualStudio.Workload.WDExpress", - "-property", "installationPath", - "-products", "*", - ]).decode(encoding="mbcs", errors="strict").strip() - except (subprocess.CalledProcessError, OSError, UnicodeDecodeError): - return None, None - - path = join(path, "VC", "Auxiliary", "Build") - if isdir(path): - return 15, path - - return None, None - - -PLAT_SPEC_TO_RUNTIME = { - 'x86': 'x86', - 'x86_amd64': 'x64', - 'x86_arm': 'arm', - 'x86_arm64': 'arm64' -} - - -def _msvc14_find_vcvarsall(plat_spec): - """Python 3.8 "distutils/_msvccompiler.py" backport""" - _, best_dir = _msvc14_find_vc2017() - vcruntime = None - - if plat_spec in PLAT_SPEC_TO_RUNTIME: - vcruntime_plat = PLAT_SPEC_TO_RUNTIME[plat_spec] - else: - vcruntime_plat = 'x64' if 'amd64' in plat_spec else 'x86' - - if best_dir: - vcredist = join(best_dir, "..", "..", "redist", "MSVC", "**", - vcruntime_plat, "Microsoft.VC14*.CRT", - "vcruntime140.dll") - try: - import glob - vcruntime = glob.glob(vcredist, recursive=True)[-1] - except (ImportError, OSError, LookupError): - vcruntime = None - - if not best_dir: - best_version, best_dir = _msvc14_find_vc2015() - if best_version: - vcruntime = join(best_dir, 'redist', vcruntime_plat, - "Microsoft.VC140.CRT", "vcruntime140.dll") - - if not best_dir: - return None, None - - vcvarsall = join(best_dir, "vcvarsall.bat") - if not isfile(vcvarsall): - return None, None - - if not vcruntime or not isfile(vcruntime): - vcruntime = None - - return vcvarsall, vcruntime - - -def _msvc14_get_vc_env(plat_spec): - """Python 3.8 "distutils/_msvccompiler.py" backport""" - if "DISTUTILS_USE_SDK" in environ: - return { - key.lower(): value - for key, value in environ.items() - } - - vcvarsall, vcruntime = _msvc14_find_vcvarsall(plat_spec) - if not vcvarsall: - raise distutils.errors.DistutilsPlatformError( - "Unable to find vcvarsall.bat" - ) - - try: - out = subprocess.check_output( - 'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec), - stderr=subprocess.STDOUT, - ).decode('utf-16le', errors='replace') - except subprocess.CalledProcessError as exc: - raise distutils.errors.DistutilsPlatformError( - "Error executing {}".format(exc.cmd) - ) from exc - - env = { - key.lower(): value - for key, _, value in - (line.partition('=') for line in out.splitlines()) - if key and value - } - - if vcruntime: - env['py_vcruntime_redist'] = vcruntime - return env - - -def msvc14_get_vc_env(plat_spec): - """ - Patched "distutils._msvccompiler._get_vc_env" for support extra - Microsoft Visual C++ 14.X compilers. - - Set environment without use of "vcvarsall.bat". - - Parameters - ---------- - plat_spec: str - Target architecture. - - Return - ------ - dict - environment - """ - - # Always use backport from CPython 3.8 - try: - return _msvc14_get_vc_env(plat_spec) - except distutils.errors.DistutilsPlatformError as exc: - _augment_exception(exc, 14.0) - raise - - -def msvc14_gen_lib_options(*args, **kwargs): - """ - Patched "distutils._msvccompiler.gen_lib_options" for fix - compatibility between "numpy.distutils" and "distutils._msvccompiler" - (for Numpy < 1.11.2) - """ - if "numpy.distutils" in sys.modules: - import numpy as np - if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'): - return np.distutils.ccompiler.gen_lib_options(*args, **kwargs) - return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs) - - -def _augment_exception(exc, version, arch=''): - """ - Add details to the exception message to help guide the user - as to what action will resolve it. - """ - # Error if MSVC++ directory not found or environment not set - message = exc.args[0] - - if "vcvarsall" in message.lower() or "visual c" in message.lower(): - # Special error message if MSVC++ not installed - tmpl = 'Microsoft Visual C++ {version:0.1f} or greater is required.' - message = tmpl.format(**locals()) - msdownload = 'www.microsoft.com/download/details.aspx?id=%d' - if version == 9.0: - if arch.lower().find('ia64') > -1: - # For VC++ 9.0, if IA64 support is needed, redirect user - # to Windows SDK 7.0. - # Note: No download link available from Microsoft. - message += ' Get it with "Microsoft Windows SDK 7.0"' - else: - # For VC++ 9.0 redirect user to Vc++ for Python 2.7 : - # This redirection link is maintained by Microsoft. - # Contact vspython@microsoft.com if it needs updating. - message += ' Get it from http://aka.ms/vcpython27' - elif version == 10.0: - # For VC++ 10.0 Redirect user to Windows SDK 7.1 - message += ' Get it with "Microsoft Windows SDK 7.1": ' - message += msdownload % 8279 - elif version >= 14.0: - # For VC++ 14.X Redirect user to latest Visual C++ Build Tools - message += (' Get it with "Microsoft C++ Build Tools": ' - r'https://visualstudio.microsoft.com' - r'/visual-cpp-build-tools/') - - exc.args = (message, ) - - -class PlatformInfo: - """ - Current and Target Architectures information. - - Parameters - ---------- - arch: str - Target architecture. - """ - current_cpu = environ.get('processor_architecture', '').lower() - - def __init__(self, arch): - self.arch = arch.lower().replace('x64', 'amd64') - - @property - def target_cpu(self): - """ - Return Target CPU architecture. - - Return - ------ - str - Target CPU - """ - return self.arch[self.arch.find('_') + 1:] - - def target_is_x86(self): - """ - Return True if target CPU is x86 32 bits.. - - Return - ------ - bool - CPU is x86 32 bits - """ - return self.target_cpu == 'x86' - - def current_is_x86(self): - """ - Return True if current CPU is x86 32 bits.. - - Return - ------ - bool - CPU is x86 32 bits - """ - return self.current_cpu == 'x86' - - def current_dir(self, hidex86=False, x64=False): - """ - Current platform specific subfolder. - - Parameters - ---------- - hidex86: bool - return '' and not '\x86' if architecture is x86. - x64: bool - return '\x64' and not '\amd64' if architecture is amd64. - - Return - ------ - str - subfolder: '\target', or '' (see hidex86 parameter) - """ - return ( - '' if (self.current_cpu == 'x86' and hidex86) else - r'\x64' if (self.current_cpu == 'amd64' and x64) else - r'\%s' % self.current_cpu - ) - - def target_dir(self, hidex86=False, x64=False): - r""" - Target platform specific subfolder. - - Parameters - ---------- - hidex86: bool - return '' and not '\x86' if architecture is x86. - x64: bool - return '\x64' and not '\amd64' if architecture is amd64. - - Return - ------ - str - subfolder: '\current', or '' (see hidex86 parameter) - """ - return ( - '' if (self.target_cpu == 'x86' and hidex86) else - r'\x64' if (self.target_cpu == 'amd64' and x64) else - r'\%s' % self.target_cpu - ) - - def cross_dir(self, forcex86=False): - r""" - Cross platform specific subfolder. - - Parameters - ---------- - forcex86: bool - Use 'x86' as current architecture even if current architecture is - not x86. - - Return - ------ - str - subfolder: '' if target architecture is current architecture, - '\current_target' if not. - """ - current = 'x86' if forcex86 else self.current_cpu - return ( - '' if self.target_cpu == current else - self.target_dir().replace('\\', '\\%s_' % current) - ) - - -class RegistryInfo: - """ - Microsoft Visual Studio related registry information. - - Parameters - ---------- - platform_info: PlatformInfo - "PlatformInfo" instance. - """ - HKEYS = (winreg.HKEY_USERS, - winreg.HKEY_CURRENT_USER, - winreg.HKEY_LOCAL_MACHINE, - winreg.HKEY_CLASSES_ROOT) - - def __init__(self, platform_info): - self.pi = platform_info - - @property - def visualstudio(self): - """ - Microsoft Visual Studio root registry key. - - Return - ------ - str - Registry key - """ - return 'VisualStudio' - - @property - def sxs(self): - """ - Microsoft Visual Studio SxS registry key. - - Return - ------ - str - Registry key - """ - return join(self.visualstudio, 'SxS') - - @property - def vc(self): - """ - Microsoft Visual C++ VC7 registry key. - - Return - ------ - str - Registry key - """ - return join(self.sxs, 'VC7') - - @property - def vs(self): - """ - Microsoft Visual Studio VS7 registry key. - - Return - ------ - str - Registry key - """ - return join(self.sxs, 'VS7') - - @property - def vc_for_python(self): - """ - Microsoft Visual C++ for Python registry key. - - Return - ------ - str - Registry key - """ - return r'DevDiv\VCForPython' - - @property - def microsoft_sdk(self): - """ - Microsoft SDK registry key. - - Return - ------ - str - Registry key - """ - return 'Microsoft SDKs' - - @property - def windows_sdk(self): - """ - Microsoft Windows/Platform SDK registry key. - - Return - ------ - str - Registry key - """ - return join(self.microsoft_sdk, 'Windows') - - @property - def netfx_sdk(self): - """ - Microsoft .NET Framework SDK registry key. - - Return - ------ - str - Registry key - """ - return join(self.microsoft_sdk, 'NETFXSDK') - - @property - def windows_kits_roots(self): - """ - Microsoft Windows Kits Roots registry key. - - Return - ------ - str - Registry key - """ - return r'Windows Kits\Installed Roots' - - def microsoft(self, key, x86=False): - """ - Return key in Microsoft software registry. - - Parameters - ---------- - key: str - Registry key path where look. - x86: str - Force x86 software registry. - - Return - ------ - str - Registry key - """ - node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node' - return join('Software', node64, 'Microsoft', key) - - def lookup(self, key, name): - """ - Look for values in registry in Microsoft software registry. - - Parameters - ---------- - key: str - Registry key path where look. - name: str - Value name to find. - - Return - ------ - str - value - """ - key_read = winreg.KEY_READ - openkey = winreg.OpenKey - closekey = winreg.CloseKey - ms = self.microsoft - for hkey in self.HKEYS: - bkey = None - try: - bkey = openkey(hkey, ms(key), 0, key_read) - except (OSError, IOError): - if not self.pi.current_is_x86(): - try: - bkey = openkey(hkey, ms(key, True), 0, key_read) - except (OSError, IOError): - continue - else: - continue - try: - return winreg.QueryValueEx(bkey, name)[0] - except (OSError, IOError): - pass - finally: - if bkey: - closekey(bkey) - - -class SystemInfo: - """ - Microsoft Windows and Visual Studio related system information. - - Parameters - ---------- - registry_info: RegistryInfo - "RegistryInfo" instance. - vc_ver: float - Required Microsoft Visual C++ version. - """ - - # Variables and properties in this class use originals CamelCase variables - # names from Microsoft source files for more easy comparison. - WinDir = environ.get('WinDir', '') - ProgramFiles = environ.get('ProgramFiles', '') - ProgramFilesx86 = environ.get('ProgramFiles(x86)', ProgramFiles) - - def __init__(self, registry_info, vc_ver=None): - self.ri = registry_info - self.pi = self.ri.pi - - self.known_vs_paths = self.find_programdata_vs_vers() - - # Except for VS15+, VC version is aligned with VS version - self.vs_ver = self.vc_ver = ( - vc_ver or self._find_latest_available_vs_ver()) - - def _find_latest_available_vs_ver(self): - """ - Find the latest VC version - - Return - ------ - float - version - """ - reg_vc_vers = self.find_reg_vs_vers() - - if not (reg_vc_vers or self.known_vs_paths): - raise distutils.errors.DistutilsPlatformError( - 'No Microsoft Visual C++ version found') - - vc_vers = set(reg_vc_vers) - vc_vers.update(self.known_vs_paths) - return sorted(vc_vers)[-1] - - def find_reg_vs_vers(self): - """ - Find Microsoft Visual Studio versions available in registry. - - Return - ------ - list of float - Versions - """ - ms = self.ri.microsoft - vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs) - vs_vers = [] - for hkey, key in itertools.product(self.ri.HKEYS, vckeys): - try: - bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ) - except (OSError, IOError): - continue - with bkey: - subkeys, values, _ = winreg.QueryInfoKey(bkey) - for i in range(values): - with contextlib.suppress(ValueError): - ver = float(winreg.EnumValue(bkey, i)[0]) - if ver not in vs_vers: - vs_vers.append(ver) - for i in range(subkeys): - with contextlib.suppress(ValueError): - ver = float(winreg.EnumKey(bkey, i)) - if ver not in vs_vers: - vs_vers.append(ver) - return sorted(vs_vers) - - def find_programdata_vs_vers(self): - r""" - Find Visual studio 2017+ versions from information in - "C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances". - - Return - ------ - dict - float version as key, path as value. - """ - vs_versions = {} - instances_dir = \ - r'C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances' - - try: - hashed_names = listdir(instances_dir) - - except (OSError, IOError): - # Directory not exists with all Visual Studio versions - return vs_versions - - for name in hashed_names: - try: - # Get VS installation path from "state.json" file - state_path = join(instances_dir, name, 'state.json') - with open(state_path, 'rt', encoding='utf-8') as state_file: - state = json.load(state_file) - vs_path = state['installationPath'] - - # Raises OSError if this VS installation does not contain VC - listdir(join(vs_path, r'VC\Tools\MSVC')) - - # Store version and path - vs_versions[self._as_float_version( - state['installationVersion'])] = vs_path - - except (OSError, IOError, KeyError): - # Skip if "state.json" file is missing or bad format - continue - - return vs_versions - - @staticmethod - def _as_float_version(version): - """ - Return a string version as a simplified float version (major.minor) - - Parameters - ---------- - version: str - Version. - - Return - ------ - float - version - """ - return float('.'.join(version.split('.')[:2])) - - @property - def VSInstallDir(self): - """ - Microsoft Visual Studio directory. - - Return - ------ - str - path - """ - # Default path - default = join(self.ProgramFilesx86, - 'Microsoft Visual Studio %0.1f' % self.vs_ver) - - # Try to get path from registry, if fail use default path - return self.ri.lookup(self.ri.vs, '%0.1f' % self.vs_ver) or default - - @property - def VCInstallDir(self): - """ - Microsoft Visual C++ directory. - - Return - ------ - str - path - """ - path = self._guess_vc() or self._guess_vc_legacy() - - if not isdir(path): - msg = 'Microsoft Visual C++ directory not found' - raise distutils.errors.DistutilsPlatformError(msg) - - return path - - def _guess_vc(self): - """ - Locate Visual C++ for VS2017+. - - Return - ------ - str - path - """ - if self.vs_ver <= 14.0: - return '' - - try: - # First search in known VS paths - vs_dir = self.known_vs_paths[self.vs_ver] - except KeyError: - # Else, search with path from registry - vs_dir = self.VSInstallDir - - guess_vc = join(vs_dir, r'VC\Tools\MSVC') - - # Subdir with VC exact version as name - try: - # Update the VC version with real one instead of VS version - vc_ver = listdir(guess_vc)[-1] - self.vc_ver = self._as_float_version(vc_ver) - return join(guess_vc, vc_ver) - except (OSError, IOError, IndexError): - return '' - - def _guess_vc_legacy(self): - """ - Locate Visual C++ for versions prior to 2017. - - Return - ------ - str - path - """ - default = join(self.ProgramFilesx86, - r'Microsoft Visual Studio %0.1f\VC' % self.vs_ver) - - # Try to get "VC++ for Python" path from registry as default path - reg_path = join(self.ri.vc_for_python, '%0.1f' % self.vs_ver) - python_vc = self.ri.lookup(reg_path, 'installdir') - default_vc = join(python_vc, 'VC') if python_vc else default - - # Try to get path from registry, if fail use default path - return self.ri.lookup(self.ri.vc, '%0.1f' % self.vs_ver) or default_vc - - @property - def WindowsSdkVersion(self): - """ - Microsoft Windows SDK versions for specified MSVC++ version. - - Return - ------ - tuple of str - versions - """ - if self.vs_ver <= 9.0: - return '7.0', '6.1', '6.0a' - elif self.vs_ver == 10.0: - return '7.1', '7.0a' - elif self.vs_ver == 11.0: - return '8.0', '8.0a' - elif self.vs_ver == 12.0: - return '8.1', '8.1a' - elif self.vs_ver >= 14.0: - return '10.0', '8.1' - - @property - def WindowsSdkLastVersion(self): - """ - Microsoft Windows SDK last version. - - Return - ------ - str - version - """ - return self._use_last_dir_name(join(self.WindowsSdkDir, 'lib')) - - @property # noqa: C901 - def WindowsSdkDir(self): # noqa: C901 # is too complex (12) # FIXME - """ - Microsoft Windows SDK directory. - - Return - ------ - str - path - """ - sdkdir = '' - for ver in self.WindowsSdkVersion: - # Try to get it from registry - loc = join(self.ri.windows_sdk, 'v%s' % ver) - sdkdir = self.ri.lookup(loc, 'installationfolder') - if sdkdir: - break - if not sdkdir or not isdir(sdkdir): - # Try to get "VC++ for Python" version from registry - path = join(self.ri.vc_for_python, '%0.1f' % self.vc_ver) - install_base = self.ri.lookup(path, 'installdir') - if install_base: - sdkdir = join(install_base, 'WinSDK') - if not sdkdir or not isdir(sdkdir): - # If fail, use default new path - for ver in self.WindowsSdkVersion: - intver = ver[:ver.rfind('.')] - path = r'Microsoft SDKs\Windows Kits\%s' % intver - d = join(self.ProgramFiles, path) - if isdir(d): - sdkdir = d - if not sdkdir or not isdir(sdkdir): - # If fail, use default old path - for ver in self.WindowsSdkVersion: - path = r'Microsoft SDKs\Windows\v%s' % ver - d = join(self.ProgramFiles, path) - if isdir(d): - sdkdir = d - if not sdkdir: - # If fail, use Platform SDK - sdkdir = join(self.VCInstallDir, 'PlatformSDK') - return sdkdir - - @property - def WindowsSDKExecutablePath(self): - """ - Microsoft Windows SDK executable directory. - - Return - ------ - str - path - """ - # Find WinSDK NetFx Tools registry dir name - if self.vs_ver <= 11.0: - netfxver = 35 - arch = '' - else: - netfxver = 40 - hidex86 = True if self.vs_ver <= 12.0 else False - arch = self.pi.current_dir(x64=True, hidex86=hidex86) - fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-')) - - # list all possibles registry paths - regpaths = [] - if self.vs_ver >= 14.0: - for ver in self.NetFxSdkVersion: - regpaths += [join(self.ri.netfx_sdk, ver, fx)] - - for ver in self.WindowsSdkVersion: - regpaths += [join(self.ri.windows_sdk, 'v%sA' % ver, fx)] - - # Return installation folder from the more recent path - for path in regpaths: - execpath = self.ri.lookup(path, 'installationfolder') - if execpath: - return execpath - - @property - def FSharpInstallDir(self): - """ - Microsoft Visual F# directory. - - Return - ------ - str - path - """ - path = join(self.ri.visualstudio, r'%0.1f\Setup\F#' % self.vs_ver) - return self.ri.lookup(path, 'productdir') or '' - - @property - def UniversalCRTSdkDir(self): - """ - Microsoft Universal CRT SDK directory. - - Return - ------ - str - path - """ - # Set Kit Roots versions for specified MSVC++ version - vers = ('10', '81') if self.vs_ver >= 14.0 else () - - # Find path of the more recent Kit - for ver in vers: - sdkdir = self.ri.lookup(self.ri.windows_kits_roots, - 'kitsroot%s' % ver) - if sdkdir: - return sdkdir or '' - - @property - def UniversalCRTSdkLastVersion(self): - """ - Microsoft Universal C Runtime SDK last version. - - Return - ------ - str - version - """ - return self._use_last_dir_name(join(self.UniversalCRTSdkDir, 'lib')) - - @property - def NetFxSdkVersion(self): - """ - Microsoft .NET Framework SDK versions. - - Return - ------ - tuple of str - versions - """ - # Set FxSdk versions for specified VS version - return (('4.7.2', '4.7.1', '4.7', - '4.6.2', '4.6.1', '4.6', - '4.5.2', '4.5.1', '4.5') - if self.vs_ver >= 14.0 else ()) - - @property - def NetFxSdkDir(self): - """ - Microsoft .NET Framework SDK directory. - - Return - ------ - str - path - """ - sdkdir = '' - for ver in self.NetFxSdkVersion: - loc = join(self.ri.netfx_sdk, ver) - sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder') - if sdkdir: - break - return sdkdir - - @property - def FrameworkDir32(self): - """ - Microsoft .NET Framework 32bit directory. - - Return - ------ - str - path - """ - # Default path - guess_fw = join(self.WinDir, r'Microsoft.NET\Framework') - - # Try to get path from registry, if fail use default path - return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw - - @property - def FrameworkDir64(self): - """ - Microsoft .NET Framework 64bit directory. - - Return - ------ - str - path - """ - # Default path - guess_fw = join(self.WinDir, r'Microsoft.NET\Framework64') - - # Try to get path from registry, if fail use default path - return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw - - @property - def FrameworkVersion32(self): - """ - Microsoft .NET Framework 32bit versions. - - Return - ------ - tuple of str - versions - """ - return self._find_dot_net_versions(32) - - @property - def FrameworkVersion64(self): - """ - Microsoft .NET Framework 64bit versions. - - Return - ------ - tuple of str - versions - """ - return self._find_dot_net_versions(64) - - def _find_dot_net_versions(self, bits): - """ - Find Microsoft .NET Framework versions. - - Parameters - ---------- - bits: int - Platform number of bits: 32 or 64. - - Return - ------ - tuple of str - versions - """ - # Find actual .NET version in registry - reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits) - dot_net_dir = getattr(self, 'FrameworkDir%d' % bits) - ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or '' - - # Set .NET versions for specified MSVC++ version - if self.vs_ver >= 12.0: - return ver, 'v4.0' - elif self.vs_ver >= 10.0: - return 'v4.0.30319' if ver.lower()[:2] != 'v4' else ver, 'v3.5' - elif self.vs_ver == 9.0: - return 'v3.5', 'v2.0.50727' - elif self.vs_ver == 8.0: - return 'v3.0', 'v2.0.50727' - - @staticmethod - def _use_last_dir_name(path, prefix=''): - """ - Return name of the last dir in path or '' if no dir found. - - Parameters - ---------- - path: str - Use dirs in this path - prefix: str - Use only dirs starting by this prefix - - Return - ------ - str - name - """ - matching_dirs = ( - dir_name - for dir_name in reversed(listdir(path)) - if isdir(join(path, dir_name)) and - dir_name.startswith(prefix) - ) - return next(matching_dirs, None) or '' - - -class EnvironmentInfo: - """ - Return environment variables for specified Microsoft Visual C++ version - and platform : Lib, Include, Path and libpath. - - This function is compatible with Microsoft Visual C++ 9.0 to 14.X. - - Script created by analysing Microsoft environment configuration files like - "vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ... - - Parameters - ---------- - arch: str - Target architecture. - vc_ver: float - Required Microsoft Visual C++ version. If not set, autodetect the last - version. - vc_min_ver: float - Minimum Microsoft Visual C++ version. - """ - - # Variables and properties in this class use originals CamelCase variables - # names from Microsoft source files for more easy comparison. - - def __init__(self, arch, vc_ver=None, vc_min_ver=0): - self.pi = PlatformInfo(arch) - self.ri = RegistryInfo(self.pi) - self.si = SystemInfo(self.ri, vc_ver) - - if self.vc_ver < vc_min_ver: - err = 'No suitable Microsoft Visual C++ version found' - raise distutils.errors.DistutilsPlatformError(err) - - @property - def vs_ver(self): - """ - Microsoft Visual Studio. - - Return - ------ - float - version - """ - return self.si.vs_ver - - @property - def vc_ver(self): - """ - Microsoft Visual C++ version. - - Return - ------ - float - version - """ - return self.si.vc_ver - - @property - def VSTools(self): - """ - Microsoft Visual Studio Tools. - - Return - ------ - list of str - paths - """ - paths = [r'Common7\IDE', r'Common7\Tools'] - - if self.vs_ver >= 14.0: - arch_subdir = self.pi.current_dir(hidex86=True, x64=True) - paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow'] - paths += [r'Team Tools\Performance Tools'] - paths += [r'Team Tools\Performance Tools%s' % arch_subdir] - - return [join(self.si.VSInstallDir, path) for path in paths] - - @property - def VCIncludes(self): - """ - Microsoft Visual C++ & Microsoft Foundation Class Includes. - - Return - ------ - list of str - paths - """ - return [join(self.si.VCInstallDir, 'Include'), - join(self.si.VCInstallDir, r'ATLMFC\Include')] - - @property - def VCLibraries(self): - """ - Microsoft Visual C++ & Microsoft Foundation Class Libraries. - - Return - ------ - list of str - paths - """ - if self.vs_ver >= 15.0: - arch_subdir = self.pi.target_dir(x64=True) - else: - arch_subdir = self.pi.target_dir(hidex86=True) - paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir] - - if self.vs_ver >= 14.0: - paths += [r'Lib\store%s' % arch_subdir] - - return [join(self.si.VCInstallDir, path) for path in paths] - - @property - def VCStoreRefs(self): - """ - Microsoft Visual C++ store references Libraries. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 14.0: - return [] - return [join(self.si.VCInstallDir, r'Lib\store\references')] - - @property - def VCTools(self): - """ - Microsoft Visual C++ Tools. - - Return - ------ - list of str - paths - """ - si = self.si - tools = [join(si.VCInstallDir, 'VCPackages')] - - forcex86 = True if self.vs_ver <= 10.0 else False - arch_subdir = self.pi.cross_dir(forcex86) - if arch_subdir: - tools += [join(si.VCInstallDir, 'Bin%s' % arch_subdir)] - - if self.vs_ver == 14.0: - path = 'Bin%s' % self.pi.current_dir(hidex86=True) - tools += [join(si.VCInstallDir, path)] - - elif self.vs_ver >= 15.0: - host_dir = (r'bin\HostX86%s' if self.pi.current_is_x86() else - r'bin\HostX64%s') - tools += [join( - si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))] - - if self.pi.current_cpu != self.pi.target_cpu: - tools += [join( - si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))] - - else: - tools += [join(si.VCInstallDir, 'Bin')] - - return tools - - @property - def OSLibraries(self): - """ - Microsoft Windows SDK Libraries. - - Return - ------ - list of str - paths - """ - if self.vs_ver <= 10.0: - arch_subdir = self.pi.target_dir(hidex86=True, x64=True) - return [join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)] - - else: - arch_subdir = self.pi.target_dir(x64=True) - lib = join(self.si.WindowsSdkDir, 'lib') - libver = self._sdk_subdir - return [join(lib, '%sum%s' % (libver, arch_subdir))] - - @property - def OSIncludes(self): - """ - Microsoft Windows SDK Include. - - Return - ------ - list of str - paths - """ - include = join(self.si.WindowsSdkDir, 'include') - - if self.vs_ver <= 10.0: - return [include, join(include, 'gl')] - - else: - if self.vs_ver >= 14.0: - sdkver = self._sdk_subdir - else: - sdkver = '' - return [join(include, '%sshared' % sdkver), - join(include, '%sum' % sdkver), - join(include, '%swinrt' % sdkver)] - - @property - def OSLibpath(self): - """ - Microsoft Windows SDK Libraries Paths. - - Return - ------ - list of str - paths - """ - ref = join(self.si.WindowsSdkDir, 'References') - libpath = [] - - if self.vs_ver <= 9.0: - libpath += self.OSLibraries - - if self.vs_ver >= 11.0: - libpath += [join(ref, r'CommonConfiguration\Neutral')] - - if self.vs_ver >= 14.0: - libpath += [ - ref, - join(self.si.WindowsSdkDir, 'UnionMetadata'), - join( - ref, 'Windows.Foundation.UniversalApiContract', '1.0.0.0'), - join(ref, 'Windows.Foundation.FoundationContract', '1.0.0.0'), - join( - ref, 'Windows.Networking.Connectivity.WwanContract', - '1.0.0.0'), - join( - self.si.WindowsSdkDir, 'ExtensionSDKs', 'Microsoft.VCLibs', - '%0.1f' % self.vs_ver, 'References', 'CommonConfiguration', - 'neutral'), - ] - return libpath - - @property - def SdkTools(self): - """ - Microsoft Windows SDK Tools. - - Return - ------ - list of str - paths - """ - return list(self._sdk_tools()) - - def _sdk_tools(self): - """ - Microsoft Windows SDK Tools paths generator. - - Return - ------ - generator of str - paths - """ - if self.vs_ver < 15.0: - bin_dir = 'Bin' if self.vs_ver <= 11.0 else r'Bin\x86' - yield join(self.si.WindowsSdkDir, bin_dir) - - if not self.pi.current_is_x86(): - arch_subdir = self.pi.current_dir(x64=True) - path = 'Bin%s' % arch_subdir - yield join(self.si.WindowsSdkDir, path) - - if self.vs_ver in (10.0, 11.0): - if self.pi.target_is_x86(): - arch_subdir = '' - else: - arch_subdir = self.pi.current_dir(hidex86=True, x64=True) - path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir - yield join(self.si.WindowsSdkDir, path) - - elif self.vs_ver >= 15.0: - path = join(self.si.WindowsSdkDir, 'Bin') - arch_subdir = self.pi.current_dir(x64=True) - sdkver = self.si.WindowsSdkLastVersion - yield join(path, '%s%s' % (sdkver, arch_subdir)) - - if self.si.WindowsSDKExecutablePath: - yield self.si.WindowsSDKExecutablePath - - @property - def _sdk_subdir(self): - """ - Microsoft Windows SDK version subdir. - - Return - ------ - str - subdir - """ - ucrtver = self.si.WindowsSdkLastVersion - return ('%s\\' % ucrtver) if ucrtver else '' - - @property - def SdkSetup(self): - """ - Microsoft Windows SDK Setup. - - Return - ------ - list of str - paths - """ - if self.vs_ver > 9.0: - return [] - - return [join(self.si.WindowsSdkDir, 'Setup')] - - @property - def FxTools(self): - """ - Microsoft .NET Framework Tools. - - Return - ------ - list of str - paths - """ - pi = self.pi - si = self.si - - if self.vs_ver <= 10.0: - include32 = True - include64 = not pi.target_is_x86() and not pi.current_is_x86() - else: - include32 = pi.target_is_x86() or pi.current_is_x86() - include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64' - - tools = [] - if include32: - tools += [join(si.FrameworkDir32, ver) - for ver in si.FrameworkVersion32] - if include64: - tools += [join(si.FrameworkDir64, ver) - for ver in si.FrameworkVersion64] - return tools - - @property - def NetFxSDKLibraries(self): - """ - Microsoft .Net Framework SDK Libraries. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 14.0 or not self.si.NetFxSdkDir: - return [] - - arch_subdir = self.pi.target_dir(x64=True) - return [join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)] - - @property - def NetFxSDKIncludes(self): - """ - Microsoft .Net Framework SDK Includes. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 14.0 or not self.si.NetFxSdkDir: - return [] - - return [join(self.si.NetFxSdkDir, r'include\um')] - - @property - def VsTDb(self): - """ - Microsoft Visual Studio Team System Database. - - Return - ------ - list of str - paths - """ - return [join(self.si.VSInstallDir, r'VSTSDB\Deploy')] - - @property - def MSBuild(self): - """ - Microsoft Build Engine. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 12.0: - return [] - elif self.vs_ver < 15.0: - base_path = self.si.ProgramFilesx86 - arch_subdir = self.pi.current_dir(hidex86=True) - else: - base_path = self.si.VSInstallDir - arch_subdir = '' - - path = r'MSBuild\%0.1f\bin%s' % (self.vs_ver, arch_subdir) - build = [join(base_path, path)] - - if self.vs_ver >= 15.0: - # Add Roslyn C# & Visual Basic Compiler - build += [join(base_path, path, 'Roslyn')] - - return build - - @property - def HTMLHelpWorkshop(self): - """ - Microsoft HTML Help Workshop. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 11.0: - return [] - - return [join(self.si.ProgramFilesx86, 'HTML Help Workshop')] - - @property - def UCRTLibraries(self): - """ - Microsoft Universal C Runtime SDK Libraries. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 14.0: - return [] - - arch_subdir = self.pi.target_dir(x64=True) - lib = join(self.si.UniversalCRTSdkDir, 'lib') - ucrtver = self._ucrt_subdir - return [join(lib, '%sucrt%s' % (ucrtver, arch_subdir))] - - @property - def UCRTIncludes(self): - """ - Microsoft Universal C Runtime SDK Include. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 14.0: - return [] - - include = join(self.si.UniversalCRTSdkDir, 'include') - return [join(include, '%sucrt' % self._ucrt_subdir)] - - @property - def _ucrt_subdir(self): - """ - Microsoft Universal C Runtime SDK version subdir. - - Return - ------ - str - subdir - """ - ucrtver = self.si.UniversalCRTSdkLastVersion - return ('%s\\' % ucrtver) if ucrtver else '' - - @property - def FSharp(self): - """ - Microsoft Visual F#. - - Return - ------ - list of str - paths - """ - if 11.0 > self.vs_ver > 12.0: - return [] - - return [self.si.FSharpInstallDir] - - @property - def VCRuntimeRedist(self): - """ - Microsoft Visual C++ runtime redistributable dll. - - Return - ------ - str - path - """ - vcruntime = 'vcruntime%d0.dll' % self.vc_ver - arch_subdir = self.pi.target_dir(x64=True).strip('\\') - - # Installation prefixes candidates - prefixes = [] - tools_path = self.si.VCInstallDir - redist_path = dirname(tools_path.replace(r'\Tools', r'\Redist')) - if isdir(redist_path): - # Redist version may not be exactly the same as tools - redist_path = join(redist_path, listdir(redist_path)[-1]) - prefixes += [redist_path, join(redist_path, 'onecore')] - - prefixes += [join(tools_path, 'redist')] # VS14 legacy path - - # CRT directory - crt_dirs = ('Microsoft.VC%d.CRT' % (self.vc_ver * 10), - # Sometime store in directory with VS version instead of VC - 'Microsoft.VC%d.CRT' % (int(self.vs_ver) * 10)) - - # vcruntime path - for prefix, crt_dir in itertools.product(prefixes, crt_dirs): - path = join(prefix, arch_subdir, crt_dir, vcruntime) - if isfile(path): - return path - - def return_env(self, exists=True): - """ - Return environment dict. - - Parameters - ---------- - exists: bool - It True, only return existing paths. - - Return - ------ - dict - environment - """ - env = dict( - include=self._build_paths('include', - [self.VCIncludes, - self.OSIncludes, - self.UCRTIncludes, - self.NetFxSDKIncludes], - exists), - lib=self._build_paths('lib', - [self.VCLibraries, - self.OSLibraries, - self.FxTools, - self.UCRTLibraries, - self.NetFxSDKLibraries], - exists), - libpath=self._build_paths('libpath', - [self.VCLibraries, - self.FxTools, - self.VCStoreRefs, - self.OSLibpath], - exists), - path=self._build_paths('path', - [self.VCTools, - self.VSTools, - self.VsTDb, - self.SdkTools, - self.SdkSetup, - self.FxTools, - self.MSBuild, - self.HTMLHelpWorkshop, - self.FSharp], - exists), - ) - if self.vs_ver >= 14 and isfile(self.VCRuntimeRedist): - env['py_vcruntime_redist'] = self.VCRuntimeRedist - return env - - def _build_paths(self, name, spec_path_lists, exists): - """ - Given an environment variable name and specified paths, - return a pathsep-separated string of paths containing - unique, extant, directories from those paths and from - the environment variable. Raise an error if no paths - are resolved. - - Parameters - ---------- - name: str - Environment variable name - spec_path_lists: list of str - Paths - exists: bool - It True, only return existing paths. - - Return - ------ - str - Pathsep-separated paths - """ - # flatten spec_path_lists - spec_paths = itertools.chain.from_iterable(spec_path_lists) - env_paths = environ.get(name, '').split(pathsep) - paths = itertools.chain(spec_paths, env_paths) - extant_paths = list(filter(isdir, paths)) if exists else paths - if not extant_paths: - msg = "%s environment variable is empty" % name.upper() - raise distutils.errors.DistutilsPlatformError(msg) - unique_paths = unique_everseen(extant_paths) - return pathsep.join(unique_paths) diff --git a/spaces/tom-beer/birds-israel/README.md b/spaces/tom-beer/birds-israel/README.md deleted file mode 100644 index 31281e3c58d2783ef9729eaf674bac56f3d44f82..0000000000000000000000000000000000000000 --- a/spaces/tom-beer/birds-israel/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Birds Israel -emoji: 🐢 -colorFrom: yellow -colorTo: green -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false -license: cc-by-nc-nd-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/tomofi/MMOCR/tests/test_utils/test_mask/test_mask_utils.py b/spaces/tomofi/MMOCR/tests/test_utils/test_mask/test_mask_utils.py deleted file mode 100644 index 12319bbbc734e9e74555ad48f0122dcf0b041372..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/tests/test_utils/test_mask/test_mask_utils.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -"""Test text mask_utils.""" -import tempfile -from unittest import mock - -import numpy as np -import pytest - -import mmocr.core.evaluation.utils as eval_utils -import mmocr.core.mask as mask_utils -import mmocr.core.visualize as visualize_utils - - -def test_points2boundary(): - - points = np.array([[1, 2]]) - text_repr_type = 'quad' - text_score = None - - # test invalid arguments - with pytest.raises(AssertionError): - mask_utils.points2boundary([], text_repr_type, text_score) - - with pytest.raises(AssertionError): - mask_utils.points2boundary(points, '', text_score) - with pytest.raises(AssertionError): - mask_utils.points2boundary(points, '', 1.1) - - # test quad - points = np.array([[0, 0], [1, 0], [2, 0], [0, 1], [1, 1], [2, 1], [0, 2], - [1, 2], [2, 2]]) - text_repr_type = 'quad' - text_score = None - - result = mask_utils.points2boundary(points, text_repr_type, text_score) - pred_poly = eval_utils.points2polygon(result) - target_poly = eval_utils.points2polygon([2, 2, 0, 2, 0, 0, 2, 0]) - assert eval_utils.poly_iou(pred_poly, target_poly) == 1 - - # test poly - text_repr_type = 'poly' - result = mask_utils.points2boundary(points, text_repr_type, text_score) - pred_poly = eval_utils.points2polygon(result) - target_poly = eval_utils.points2polygon([0, 0, 0, 2, 2, 2, 2, 0]) - assert eval_utils.poly_iou(pred_poly, target_poly) == 1 - - -def test_seg2boundary(): - - seg = np.array([[]]) - text_repr_type = 'quad' - text_score = None - # test invalid arguments - with pytest.raises(AssertionError): - mask_utils.seg2boundary([[]], text_repr_type, text_score) - with pytest.raises(AssertionError): - mask_utils.seg2boundary(seg, 1, text_score) - with pytest.raises(AssertionError): - mask_utils.seg2boundary(seg, text_repr_type, 1.1) - - seg = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) - result = mask_utils.seg2boundary(seg, text_repr_type, text_score) - pred_poly = eval_utils.points2polygon(result) - target_poly = eval_utils.points2polygon([2, 2, 0, 2, 0, 0, 2, 0]) - assert eval_utils.poly_iou(pred_poly, target_poly) == 1 - - -@mock.patch('%s.visualize_utils.plt' % __name__) -def test_show_feature(mock_plt): - - features = [np.random.rand(10, 10)] - names = ['test'] - to_uint8 = [0] - out_file = None - - # test invalid arguments - with pytest.raises(AssertionError): - visualize_utils.show_feature([], names, to_uint8, out_file) - with pytest.raises(AssertionError): - visualize_utils.show_feature(features, [1], to_uint8, out_file) - with pytest.raises(AssertionError): - visualize_utils.show_feature(features, names, ['a'], out_file) - with pytest.raises(AssertionError): - visualize_utils.show_feature(features, names, to_uint8, 1) - with pytest.raises(AssertionError): - visualize_utils.show_feature(features, names, to_uint8, [0, 1]) - - visualize_utils.show_feature(features, names, to_uint8) - - # test showing img - mock_plt.title.assert_called_once_with('test') - mock_plt.show.assert_called_once() - - # test saving fig - out_file = tempfile.NamedTemporaryFile().name - visualize_utils.show_feature(features, names, to_uint8, out_file) - mock_plt.savefig.assert_called_once() - - -@mock.patch('%s.visualize_utils.plt' % __name__) -def test_show_img_boundary(mock_plt): - img = np.random.rand(10, 10) - boundary = [0, 0, 1, 0, 1, 1, 0, 1] - # test invalid arguments - with pytest.raises(AssertionError): - visualize_utils.show_img_boundary([], boundary) - with pytest.raises(AssertionError): - visualize_utils.show_img_boundary(img, np.array([])) - - # test showing img - - visualize_utils.show_img_boundary(img, boundary) - mock_plt.imshow.assert_called_once() - mock_plt.show.assert_called_once() - - -@mock.patch('%s.visualize_utils.mmcv' % __name__) -def test_show_pred_gt(mock_mmcv): - preds = [[0, 0, 1, 0, 1, 1, 0, 1]] - gts = [[0, 0, 1, 0, 1, 1, 0, 1]] - show = True - win_name = 'test' - wait_time = 0 - out_file = tempfile.NamedTemporaryFile().name - - with pytest.raises(AssertionError): - visualize_utils.show_pred_gt(np.array([]), gts) - with pytest.raises(AssertionError): - visualize_utils.show_pred_gt(preds, np.array([])) - - # test showing img - - visualize_utils.show_pred_gt(preds, gts, show, win_name, wait_time, - out_file) - mock_mmcv.imshow.assert_called_once() - mock_mmcv.imwrite.assert_called_once() - - -@mock.patch('%s.visualize_utils.mmcv.imshow' % __name__) -@mock.patch('%s.visualize_utils.mmcv.imwrite' % __name__) -def test_imshow_pred_boundary(mock_imshow, mock_imwrite): - img = './tests/data/test_img1.jpg' - boundaries_with_scores = [[0, 0, 1, 0, 1, 1, 0, 1, 1]] - labels = [1] - file = tempfile.NamedTemporaryFile().name - visualize_utils.imshow_pred_boundary( - img, boundaries_with_scores, labels, show=True, out_file=file) - mock_imwrite.assert_called_once() - mock_imshow.assert_called_once() - - -@mock.patch('%s.visualize_utils.mmcv.imshow' % __name__) -@mock.patch('%s.visualize_utils.mmcv.imwrite' % __name__) -def test_imshow_text_char_boundary(mock_imshow, mock_imwrite): - - img = './tests/data/test_img1.jpg' - text_quads = [[0, 0, 1, 0, 1, 1, 0, 1]] - boundaries = [[0, 0, 1, 0, 1, 1, 0, 1]] - char_quads = [[[0, 0, 1, 0, 1, 1, 0, 1], [0, 0, 1, 0, 1, 1, 0, 1]]] - chars = [['a', 'b']] - show = True, - out_file = tempfile.NamedTemporaryFile().name - visualize_utils.imshow_text_char_boundary( - img, - text_quads, - boundaries, - char_quads, - chars, - show=show, - out_file=out_file) - mock_imwrite.assert_called_once() - mock_imshow.assert_called_once() - - -@mock.patch('%s.visualize_utils.cv2.drawContours' % __name__) -def test_overlay_mask_img(mock_drawContours): - - img = np.random.rand(10, 10) - mask = np.zeros((10, 10)) - visualize_utils.overlay_mask_img(img, mask) - mock_drawContours.assert_called_once() - - -def test_extract_boundary(): - result = {} - - # test invalid arguments - with pytest.raises(AssertionError): - mask_utils.extract_boundary(result) - - result = {'boundary_result': [0, 1]} - with pytest.raises(AssertionError): - mask_utils.extract_boundary(result) - - result = {'boundary_result': [[0, 0, 1, 0, 1, 1, 0, 1, 1]]} - - output = mask_utils.extract_boundary(result) - assert output[2] == [1] diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docs/Makefile b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docs/Makefile deleted file mode 100644 index d4bb2cbb9eddb1bb1b4f366623044af8e4830919..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docs/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/dense_test_mixins.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/dense_test_mixins.py deleted file mode 100644 index dd81364dec90e97c30a6e2220a5e0fe96373c5bd..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/dense_test_mixins.py +++ /dev/null @@ -1,100 +0,0 @@ -from inspect import signature - -import torch - -from mmdet.core import bbox2result, bbox_mapping_back, multiclass_nms - - -class BBoxTestMixin(object): - """Mixin class for test time augmentation of bboxes.""" - - def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas): - """Merge augmented detection bboxes and scores. - - Args: - aug_bboxes (list[Tensor]): shape (n, 4*#class) - aug_scores (list[Tensor] or None): shape (n, #class) - img_shapes (list[Tensor]): shape (3, ). - - Returns: - tuple: (bboxes, scores) - """ - recovered_bboxes = [] - for bboxes, img_info in zip(aug_bboxes, img_metas): - img_shape = img_info[0]['img_shape'] - scale_factor = img_info[0]['scale_factor'] - flip = img_info[0]['flip'] - flip_direction = img_info[0]['flip_direction'] - bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, - flip_direction) - recovered_bboxes.append(bboxes) - bboxes = torch.cat(recovered_bboxes, dim=0) - if aug_scores is None: - return bboxes - else: - scores = torch.cat(aug_scores, dim=0) - return bboxes, scores - - def aug_test_bboxes(self, feats, img_metas, rescale=False): - """Test det bboxes with test time augmentation. - - Args: - feats (list[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains features for all images in the batch. - img_metas (list[list[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. each dict has image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[ndarray]: bbox results of each class - """ - # check with_nms argument - gb_sig = signature(self.get_bboxes) - gb_args = [p.name for p in gb_sig.parameters.values()] - if hasattr(self, '_get_bboxes'): - gbs_sig = signature(self._get_bboxes) - else: - gbs_sig = signature(self._get_bboxes_single) - gbs_args = [p.name for p in gbs_sig.parameters.values()] - assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \ - f'{self.__class__.__name__}' \ - ' does not support test-time augmentation' - - aug_bboxes = [] - aug_scores = [] - aug_factors = [] # score_factors for NMS - for x, img_meta in zip(feats, img_metas): - # only one image in the batch - outs = self.forward(x) - bbox_inputs = outs + (img_meta, self.test_cfg, False, False) - bbox_outputs = self.get_bboxes(*bbox_inputs)[0] - aug_bboxes.append(bbox_outputs[0]) - aug_scores.append(bbox_outputs[1]) - # bbox_outputs of some detectors (e.g., ATSS, FCOS, YOLOv3) - # contains additional element to adjust scores before NMS - if len(bbox_outputs) >= 3: - aug_factors.append(bbox_outputs[2]) - - # after merging, bboxes will be rescaled to the original image size - merged_bboxes, merged_scores = self.merge_aug_bboxes( - aug_bboxes, aug_scores, img_metas) - merged_factors = torch.cat(aug_factors, dim=0) if aug_factors else None - det_bboxes, det_labels = multiclass_nms( - merged_bboxes, - merged_scores, - self.test_cfg.score_thr, - self.test_cfg.nms, - self.test_cfg.max_per_img, - score_factors=merged_factors) - - if rescale: - _det_bboxes = det_bboxes - else: - _det_bboxes = det_bboxes.clone() - _det_bboxes[:, :4] *= det_bboxes.new_tensor( - img_metas[0][0]['scale_factor']) - bbox_results = bbox2result(_det_bboxes, det_labels, self.num_classes) - return bbox_results diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/yolof_head.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/yolof_head.py deleted file mode 100644 index e15d4d4a6f40150af803c196b8f5db3d68a61f6d..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/yolof_head.py +++ /dev/null @@ -1,415 +0,0 @@ -import torch -import torch.nn as nn -from mmcv.cnn import (ConvModule, bias_init_with_prob, constant_init, is_norm, - normal_init) -from mmcv.runner import force_fp32 - -from mmdet.core import anchor_inside_flags, multi_apply, reduce_mean, unmap -from ..builder import HEADS -from .anchor_head import AnchorHead - -INF = 1e8 - - -def levels_to_images(mlvl_tensor): - """Concat multi-level feature maps by image. - - [feature_level0, feature_level1...] -> [feature_image0, feature_image1...] - Convert the shape of each element in mlvl_tensor from (N, C, H, W) to - (N, H*W , C), then split the element to N elements with shape (H*W, C), and - concat elements in same image of all level along first dimension. - - Args: - mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from - corresponding level. Each element is of shape (N, C, H, W) - - Returns: - list[torch.Tensor]: A list that contains N tensors and each tensor is - of shape (num_elements, C) - """ - batch_size = mlvl_tensor[0].size(0) - batch_list = [[] for _ in range(batch_size)] - channels = mlvl_tensor[0].size(1) - for t in mlvl_tensor: - t = t.permute(0, 2, 3, 1) - t = t.view(batch_size, -1, channels).contiguous() - for img in range(batch_size): - batch_list[img].append(t[img]) - return [torch.cat(item, 0) for item in batch_list] - - -@HEADS.register_module() -class YOLOFHead(AnchorHead): - """YOLOFHead Paper link: https://arxiv.org/abs/2103.09460. - - Args: - num_classes (int): The number of object classes (w/o background) - in_channels (List[int]): The number of input channels per scale. - cls_num_convs (int): The number of convolutions of cls branch. - Default 2. - reg_num_convs (int): The number of convolutions of reg branch. - Default 4. - norm_cfg (dict): Dictionary to construct and config norm layer. - """ - - def __init__(self, - num_classes, - in_channels, - num_cls_convs=2, - num_reg_convs=4, - norm_cfg=dict(type='BN', requires_grad=True), - **kwargs): - self.num_cls_convs = num_cls_convs - self.num_reg_convs = num_reg_convs - self.norm_cfg = norm_cfg - super(YOLOFHead, self).__init__(num_classes, in_channels, **kwargs) - - def _init_layers(self): - cls_subnet = [] - bbox_subnet = [] - for i in range(self.num_cls_convs): - cls_subnet.append( - ConvModule( - self.in_channels, - self.in_channels, - kernel_size=3, - padding=1, - norm_cfg=self.norm_cfg)) - for i in range(self.num_reg_convs): - bbox_subnet.append( - ConvModule( - self.in_channels, - self.in_channels, - kernel_size=3, - padding=1, - norm_cfg=self.norm_cfg)) - self.cls_subnet = nn.Sequential(*cls_subnet) - self.bbox_subnet = nn.Sequential(*bbox_subnet) - self.cls_score = nn.Conv2d( - self.in_channels, - self.num_anchors * self.num_classes, - kernel_size=3, - stride=1, - padding=1) - self.bbox_pred = nn.Conv2d( - self.in_channels, - self.num_anchors * 4, - kernel_size=3, - stride=1, - padding=1) - self.object_pred = nn.Conv2d( - self.in_channels, - self.num_anchors, - kernel_size=3, - stride=1, - padding=1) - - def init_weights(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - normal_init(m, mean=0, std=0.01) - if is_norm(m): - constant_init(m, 1) - - # Use prior in model initialization to improve stability - bias_cls = bias_init_with_prob(0.01) - torch.nn.init.constant_(self.cls_score.bias, bias_cls) - - def forward_single(self, feature): - cls_score = self.cls_score(self.cls_subnet(feature)) - N, _, H, W = cls_score.shape - cls_score = cls_score.view(N, -1, self.num_classes, H, W) - - reg_feat = self.bbox_subnet(feature) - bbox_reg = self.bbox_pred(reg_feat) - objectness = self.object_pred(reg_feat) - - # implicit objectness - objectness = objectness.view(N, -1, 1, H, W) - normalized_cls_score = cls_score + objectness - torch.log( - 1. + torch.clamp(cls_score.exp(), max=INF) + - torch.clamp(objectness.exp(), max=INF)) - normalized_cls_score = normalized_cls_score.view(N, -1, H, W) - return normalized_cls_score, bbox_reg - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (batch, num_anchors * num_classes, h, w) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (batch, num_anchors * 4, h, w) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. Default: None - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - assert len(cls_scores) == 1 - assert self.anchor_generator.num_levels == 1 - - device = cls_scores[0].device - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - - # The output level is always 1 - anchor_list = [anchors[0] for anchors in anchor_list] - valid_flag_list = [valid_flags[0] for valid_flags in valid_flag_list] - - cls_scores_list = levels_to_images(cls_scores) - bbox_preds_list = levels_to_images(bbox_preds) - - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - cls_scores_list, - bbox_preds_list, - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels) - if cls_reg_targets is None: - return None - (batch_labels, batch_label_weights, num_total_pos, num_total_neg, - batch_bbox_weights, batch_pos_predicted_boxes, - batch_target_boxes) = cls_reg_targets - - flatten_labels = batch_labels.reshape(-1) - batch_label_weights = batch_label_weights.reshape(-1) - cls_score = cls_scores[0].permute(0, 2, 3, - 1).reshape(-1, self.cls_out_channels) - - num_total_samples = (num_total_pos + - num_total_neg) if self.sampling else num_total_pos - num_total_samples = reduce_mean( - cls_score.new_tensor(num_total_samples)).clamp_(1.0).item() - - # classification loss - loss_cls = self.loss_cls( - cls_score, - flatten_labels, - batch_label_weights, - avg_factor=num_total_samples) - - # regression loss - if batch_pos_predicted_boxes.shape[0] == 0: - # no pos sample - loss_bbox = batch_pos_predicted_boxes.sum() * 0 - else: - loss_bbox = self.loss_bbox( - batch_pos_predicted_boxes, - batch_target_boxes, - batch_bbox_weights.float(), - avg_factor=num_total_samples) - - return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) - - def get_targets(self, - cls_scores_list, - bbox_preds_list, - anchor_list, - valid_flag_list, - gt_bboxes_list, - img_metas, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - label_channels=1, - unmap_outputs=True): - """Compute regression and classification targets for anchors in - multiple images. - - Args: - cls_scores_list (list[Tensor]): Classification scores of - each image. each is a 4D-tensor, the shape is - (h * w, num_anchors * num_classes). - bbox_preds_list (list[Tensor]): Bbox preds of each image. - each is a 4D-tensor, the shape is (h * w, num_anchors * 4). - anchor_list (list[Tensor]): Anchors of each image. Each element of - is a tensor of shape (h * w * num_anchors, 4). - valid_flag_list (list[Tensor]): Valid flags of each image. Each - element of is a tensor of shape (h * w * num_anchors, ) - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. - img_metas (list[dict]): Meta info of each image. - gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be - ignored. - gt_labels_list (list[Tensor]): Ground truth labels of each box. - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Returns: - tuple: Usually returns a tuple containing learning targets. - - - batch_labels (Tensor): Label of all images. Each element \ - of is a tensor of shape (batch, h * w * num_anchors) - - batch_label_weights (Tensor): Label weights of all images \ - of is a tensor of shape (batch, h * w * num_anchors) - - num_total_pos (int): Number of positive samples in all \ - images. - - num_total_neg (int): Number of negative samples in all \ - images. - additional_returns: This function enables user-defined returns from - `self._get_targets_single`. These returns are currently refined - to properties at each feature map (i.e. having HxW dimension). - The results will be concatenated after the end - """ - num_imgs = len(img_metas) - assert len(anchor_list) == len(valid_flag_list) == num_imgs - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - if gt_labels_list is None: - gt_labels_list = [None for _ in range(num_imgs)] - results = multi_apply( - self._get_targets_single, - bbox_preds_list, - anchor_list, - valid_flag_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - label_channels=label_channels, - unmap_outputs=unmap_outputs) - (all_labels, all_label_weights, pos_inds_list, neg_inds_list, - sampling_results_list) = results[:5] - rest_results = list(results[5:]) # user-added return values - # no valid anchors - if any([labels is None for labels in all_labels]): - return None - # sampled anchors of all images - num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) - num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) - - batch_labels = torch.stack(all_labels, 0) - batch_label_weights = torch.stack(all_label_weights, 0) - - res = (batch_labels, batch_label_weights, num_total_pos, num_total_neg) - for i, rests in enumerate(rest_results): # user-added return values - rest_results[i] = torch.cat(rests, 0) - - return res + tuple(rest_results) - - def _get_targets_single(self, - bbox_preds, - flat_anchors, - valid_flags, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - label_channels=1, - unmap_outputs=True): - """Compute regression and classification targets for anchors in a - single image. - - Args: - bbox_preds (Tensor): Bbox prediction of the image, which - shape is (h * w ,4) - flat_anchors (Tensor): Anchors of the image, which shape is - (h * w * num_anchors ,4) - valid_flags (Tensor): Valid flags of the image, which shape is - (h * w * num_anchors,). - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - img_meta (dict): Meta info of the image. - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Returns: - tuple: - labels (Tensor): Labels of image, which shape is - (h * w * num_anchors, ). - label_weights (Tensor): Label weights of image, which shape is - (h * w * num_anchors, ). - pos_inds (Tensor): Pos index of image. - neg_inds (Tensor): Neg index of image. - sampling_result (obj:`SamplingResult`): Sampling result. - pos_bbox_weights (Tensor): The Weight of using to calculate - the bbox branch loss, which shape is (num, ). - pos_predicted_boxes (Tensor): boxes predicted value of - using to calculate the bbox branch loss, which shape is - (num, 4). - pos_target_boxes (Tensor): boxes target value of - using to calculate the bbox branch loss, which shape is - (num, 4). - """ - inside_flags = anchor_inside_flags(flat_anchors, valid_flags, - img_meta['img_shape'][:2], - self.train_cfg.allowed_border) - if not inside_flags.any(): - return (None, ) * 8 - # assign gt and sample anchors - anchors = flat_anchors[inside_flags, :] - bbox_preds = bbox_preds.reshape(-1, 4) - bbox_preds = bbox_preds[inside_flags, :] - - # decoded bbox - decoder_bbox_preds = self.bbox_coder.decode(anchors, bbox_preds) - assign_result = self.assigner.assign( - decoder_bbox_preds, anchors, gt_bboxes, gt_bboxes_ignore, - None if self.sampling else gt_labels) - - pos_bbox_weights = assign_result.get_extra_property('pos_idx') - pos_predicted_boxes = assign_result.get_extra_property( - 'pos_predicted_boxes') - pos_target_boxes = assign_result.get_extra_property('target_boxes') - - sampling_result = self.sampler.sample(assign_result, anchors, - gt_bboxes) - num_valid_anchors = anchors.shape[0] - labels = anchors.new_full((num_valid_anchors, ), - self.num_classes, - dtype=torch.long) - label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - if gt_labels is None: - # Only rpn gives gt_labels as None - # Foreground is the first class since v2.5.0 - labels[pos_inds] = 0 - else: - labels[pos_inds] = gt_labels[ - sampling_result.pos_assigned_gt_inds] - if self.train_cfg.pos_weight <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = self.train_cfg.pos_weight - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - # map up to original set of anchors - if unmap_outputs: - num_total_anchors = flat_anchors.size(0) - labels = unmap( - labels, num_total_anchors, inside_flags, - fill=self.num_classes) # fill bg label - label_weights = unmap(label_weights, num_total_anchors, - inside_flags) - - return (labels, label_weights, pos_inds, neg_inds, sampling_result, - pos_bbox_weights, pos_predicted_boxes, pos_target_boxes) diff --git a/spaces/tsereno/SportsTrainer/README.md b/spaces/tsereno/SportsTrainer/README.md deleted file mode 100644 index 29e5fa3abcae4aff71ab8b9dc2cda12839961374..0000000000000000000000000000000000000000 --- a/spaces/tsereno/SportsTrainer/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: SportsTrainer -emoji: 🐠 -colorFrom: indigo -colorTo: indigo -sdk: gradio -app_file: app.py -pinned: false -license: other ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/ulysses115/Nogizaka46-so/inference/infer_tool_grad.py b/spaces/ulysses115/Nogizaka46-so/inference/infer_tool_grad.py deleted file mode 100644 index e1d3b85f4028e1db83da6a24c3050e7a35c34cb2..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/Nogizaka46-so/inference/infer_tool_grad.py +++ /dev/null @@ -1,160 +0,0 @@ -import hashlib -import json -import logging -import os -import time -from pathlib import Path -import io -import librosa -import maad -import numpy as np -from inference import slicer -import parselmouth -import soundfile -import torch -import torchaudio - -from hubert import hubert_model -import utils -from models import SynthesizerTrn -logging.getLogger('numba').setLevel(logging.WARNING) -logging.getLogger('matplotlib').setLevel(logging.WARNING) - -def resize2d_f0(x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp(np.arange(0, len(source) * target_len, len(source)) / target_len, np.arange(0, len(source)), - source) - res = np.nan_to_num(target) - return res - -def get_f0(x, p_len,f0_up_key=0): - - time_step = 160 / 16000 * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - - f0 = parselmouth.Sound(x, 16000).to_pitch_ac( - time_step=time_step / 1000, voicing_threshold=0.6, - pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency'] - - pad_size=(p_len - len(f0) + 1) // 2 - if(pad_size>0 or p_len - len(f0) - pad_size>0): - f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant') - - f0 *= pow(2, f0_up_key / 12) - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0 - -def clean_pitch(input_pitch): - num_nan = np.sum(input_pitch == 1) - if num_nan / len(input_pitch) > 0.9: - input_pitch[input_pitch != 1] = 1 - return input_pitch - - -def plt_pitch(input_pitch): - input_pitch = input_pitch.astype(float) - input_pitch[input_pitch == 1] = np.nan - return input_pitch - - -def f0_to_pitch(ff): - f0_pitch = 69 + 12 * np.log2(ff / 440) - return f0_pitch - - -def fill_a_to_b(a, b): - if len(a) < len(b): - for _ in range(0, len(b) - len(a)): - a.append(a[0]) - - -def mkdir(paths: list): - for path in paths: - if not os.path.exists(path): - os.mkdir(path) - - -class VitsSvc(object): - def __init__(self): - self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - self.SVCVITS = None - self.hps = None - self.speakers = None - self.hubert_soft = utils.get_hubert_model() - - def set_device(self, device): - self.device = torch.device(device) - self.hubert_soft.to(self.device) - if self.SVCVITS != None: - self.SVCVITS.to(self.device) - - def loadCheckpoint(self, path): - self.hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json") - self.SVCVITS = SynthesizerTrn( - self.hps.data.filter_length // 2 + 1, - self.hps.train.segment_size // self.hps.data.hop_length, - **self.hps.model) - _ = utils.load_checkpoint(f"checkpoints/{path}/model.pth", self.SVCVITS, None) - _ = self.SVCVITS.eval().to(self.device) - self.speakers = self.hps.spk - - def get_units(self, source, sr): - source = source.unsqueeze(0).to(self.device) - with torch.inference_mode(): - units = self.hubert_soft.units(source) - return units - - - def get_unit_pitch(self, in_path, tran): - source, sr = torchaudio.load(in_path) - source = torchaudio.functional.resample(source, sr, 16000) - if len(source.shape) == 2 and source.shape[1] >= 2: - source = torch.mean(source, dim=0).unsqueeze(0) - soft = self.get_units(source, sr).squeeze(0).cpu().numpy() - f0_coarse, f0 = get_f0(source.cpu().numpy()[0], soft.shape[0]*2, tran) - return soft, f0 - - def infer(self, speaker_id, tran, raw_path): - speaker_id = self.speakers[speaker_id] - sid = torch.LongTensor([int(speaker_id)]).to(self.device).unsqueeze(0) - soft, pitch = self.get_unit_pitch(raw_path, tran) - f0 = torch.FloatTensor(clean_pitch(pitch)).unsqueeze(0).to(self.device) - stn_tst = torch.FloatTensor(soft) - with torch.no_grad(): - x_tst = stn_tst.unsqueeze(0).to(self.device) - x_tst = torch.repeat_interleave(x_tst, repeats=2, dim=1).transpose(1, 2) - audio = self.SVCVITS.infer(x_tst, f0=f0, g=sid)[0,0].data.float() - return audio, audio.shape[-1] - - def inference(self,srcaudio,chara,tran,slice_db): - sampling_rate, audio = srcaudio - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - soundfile.write("tmpwav.wav", audio, 16000, format="wav") - chunks = slicer.cut("tmpwav.wav", db_thresh=slice_db) - audio_data, audio_sr = slicer.chunks2audio("tmpwav.wav", chunks) - audio = [] - for (slice_tag, data) in audio_data: - length = int(np.ceil(len(data) / audio_sr * self.hps.data.sampling_rate)) - raw_path = io.BytesIO() - soundfile.write(raw_path, data, audio_sr, format="wav") - raw_path.seek(0) - if slice_tag: - _audio = np.zeros(length) - else: - out_audio, out_sr = self.infer(chara, tran, raw_path) - _audio = out_audio.cpu().numpy() - audio.extend(list(_audio)) - audio = (np.array(audio) * 32768.0).astype('int16') - return (self.hps.data.sampling_rate,audio) \ No newline at end of file diff --git a/spaces/vinthony/SadTalker/src/face3d/models/arcface_torch/configs/glint360k_r34.py b/spaces/vinthony/SadTalker/src/face3d/models/arcface_torch/configs/glint360k_r34.py deleted file mode 100644 index fda2701758a839a7161d09c25f0ca3d26033baff..0000000000000000000000000000000000000000 --- a/spaces/vinthony/SadTalker/src/face3d/models/arcface_torch/configs/glint360k_r34.py +++ /dev/null @@ -1,26 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.loss = "cosface" -config.network = "r34" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "/train_tmp/glint360k" -config.num_classes = 360232 -config.num_image = 17091657 -config.num_epoch = 20 -config.warmup_epoch = -1 -config.decay_epoch = [8, 12, 15, 18] -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/vinthony/SadTalker/src/face3d/models/arcface_torch/utils/utils_callbacks.py b/spaces/vinthony/SadTalker/src/face3d/models/arcface_torch/utils/utils_callbacks.py deleted file mode 100644 index bd2f56cba47c57de102710ff56eaac591e59f4da..0000000000000000000000000000000000000000 --- a/spaces/vinthony/SadTalker/src/face3d/models/arcface_torch/utils/utils_callbacks.py +++ /dev/null @@ -1,117 +0,0 @@ -import logging -import os -import time -from typing import List - -import torch - -from eval import verification -from utils.utils_logging import AverageMeter - - -class CallBackVerification(object): - def __init__(self, frequent, rank, val_targets, rec_prefix, image_size=(112, 112)): - self.frequent: int = frequent - self.rank: int = rank - self.highest_acc: float = 0.0 - self.highest_acc_list: List[float] = [0.0] * len(val_targets) - self.ver_list: List[object] = [] - self.ver_name_list: List[str] = [] - if self.rank is 0: - self.init_dataset(val_targets=val_targets, data_dir=rec_prefix, image_size=image_size) - - def ver_test(self, backbone: torch.nn.Module, global_step: int): - results = [] - for i in range(len(self.ver_list)): - acc1, std1, acc2, std2, xnorm, embeddings_list = verification.test( - self.ver_list[i], backbone, 10, 10) - logging.info('[%s][%d]XNorm: %f' % (self.ver_name_list[i], global_step, xnorm)) - logging.info('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' % (self.ver_name_list[i], global_step, acc2, std2)) - if acc2 > self.highest_acc_list[i]: - self.highest_acc_list[i] = acc2 - logging.info( - '[%s][%d]Accuracy-Highest: %1.5f' % (self.ver_name_list[i], global_step, self.highest_acc_list[i])) - results.append(acc2) - - def init_dataset(self, val_targets, data_dir, image_size): - for name in val_targets: - path = os.path.join(data_dir, name + ".bin") - if os.path.exists(path): - data_set = verification.load_bin(path, image_size) - self.ver_list.append(data_set) - self.ver_name_list.append(name) - - def __call__(self, num_update, backbone: torch.nn.Module): - if self.rank is 0 and num_update > 0 and num_update % self.frequent == 0: - backbone.eval() - self.ver_test(backbone, num_update) - backbone.train() - - -class CallBackLogging(object): - def __init__(self, frequent, rank, total_step, batch_size, world_size, writer=None): - self.frequent: int = frequent - self.rank: int = rank - self.time_start = time.time() - self.total_step: int = total_step - self.batch_size: int = batch_size - self.world_size: int = world_size - self.writer = writer - - self.init = False - self.tic = 0 - - def __call__(self, - global_step: int, - loss: AverageMeter, - epoch: int, - fp16: bool, - learning_rate: float, - grad_scaler: torch.cuda.amp.GradScaler): - if self.rank == 0 and global_step > 0 and global_step % self.frequent == 0: - if self.init: - try: - speed: float = self.frequent * self.batch_size / (time.time() - self.tic) - speed_total = speed * self.world_size - except ZeroDivisionError: - speed_total = float('inf') - - time_now = (time.time() - self.time_start) / 3600 - time_total = time_now / ((global_step + 1) / self.total_step) - time_for_end = time_total - time_now - if self.writer is not None: - self.writer.add_scalar('time_for_end', time_for_end, global_step) - self.writer.add_scalar('learning_rate', learning_rate, global_step) - self.writer.add_scalar('loss', loss.avg, global_step) - if fp16: - msg = "Speed %.2f samples/sec Loss %.4f LearningRate %.4f Epoch: %d Global Step: %d " \ - "Fp16 Grad Scale: %2.f Required: %1.f hours" % ( - speed_total, loss.avg, learning_rate, epoch, global_step, - grad_scaler.get_scale(), time_for_end - ) - else: - msg = "Speed %.2f samples/sec Loss %.4f LearningRate %.4f Epoch: %d Global Step: %d " \ - "Required: %1.f hours" % ( - speed_total, loss.avg, learning_rate, epoch, global_step, time_for_end - ) - logging.info(msg) - loss.reset() - self.tic = time.time() - else: - self.init = True - self.tic = time.time() - - -class CallBackModelCheckpoint(object): - def __init__(self, rank, output="./"): - self.rank: int = rank - self.output: str = output - - def __call__(self, global_step, backbone, partial_fc, ): - if global_step > 100 and self.rank == 0: - path_module = os.path.join(self.output, "backbone.pth") - torch.save(backbone.module.state_dict(), path_module) - logging.info("Pytorch Model Saved in '{}'".format(path_module)) - - if global_step > 100 and partial_fc is not None: - partial_fc.save_params() diff --git a/spaces/vpsrikanth/FaceSimilarity/app/Hackathon_setup/face_recognition_model.py b/spaces/vpsrikanth/FaceSimilarity/app/Hackathon_setup/face_recognition_model.py deleted file mode 100644 index 8c7fc10a4ee55b4f89e2eafd052b60fbdaaac96a..0000000000000000000000000000000000000000 --- a/spaces/vpsrikanth/FaceSimilarity/app/Hackathon_setup/face_recognition_model.py +++ /dev/null @@ -1,93 +0,0 @@ -import math -import torch -import torchvision -import torch.nn as nn -import torch.nn.functional as F -from torchvision import transforms -from torch.autograd import Variable -import random -# Add more imports if required - -# Sample Transformation function -# YOUR CODE HERE for changing the Transformation values. -trnscm = transforms.Compose([transforms.Resize((100,100)),transforms.Grayscale(), transforms.ToTensor()]) - -class SiameseNetwork(nn.Module): - def __init__(self): - super(SiameseNetwork, self).__init__() - self.cnn1 = nn.Sequential( - nn.ReflectionPad2d(1), #Pads the input tensor using the reflection of the input boundary, it similar to the padding. - nn.Conv2d(1, 4, kernel_size=3), - nn.ReLU(inplace=True), - nn.BatchNorm2d(4), - - nn.ReflectionPad2d(1), - nn.Conv2d(4, 8, kernel_size=3), - nn.ReLU(inplace=True), - nn.BatchNorm2d(8), - - - nn.ReflectionPad2d(1), - nn.Conv2d(8, 8, kernel_size=3), - nn.ReLU(inplace=True), - nn.BatchNorm2d(8), - ) - - self.fc1 = nn.Sequential( - nn.Linear(8*100*100, 500), - nn.ReLU(inplace=True), - - nn.Linear(500, 500), - nn.ReLU(inplace=True), - - nn.Linear(500, 5)) - - # forward_once is for one image. This can be used while classifying the face images - def forward_once(self, x): - output = self.cnn1(x) - output = output.view(output.size()[0], -1) - output = self.fc1(output) - return output - - def forward(self, input1, input2): - output1 = self.forward_once(input1) - output2 = self.forward_once(input2) - return output1, output2 - -class MLPClassifier(nn.Module): - def __init__(self, input_dim, output_dim): - super(MLPClassifier, self).__init__() - self.fc1 = nn.Linear(input_dim, 64) - self.relu = nn.ReLU() - self.fc2 = nn.Linear(64, 32) - self.relu = nn.ReLU() - self.fc3 = nn.Linear(32, output_dim) - - def forward(self, x): - x = self.fc1(x) - x = self.relu(x) - x = self.fc2(x) - x = self.relu(x) - x = self.fc3(x) - # print(x.shape) - return x - -##Example Network -class Siamese(torch.nn.Module): - def __init__(self): - super(Siamese, self).__init__() - #YOUR CODE HERE - - def forward(self, x): - pass # remove 'pass' once you have written your code - #YOUR CODE HERE - -########################################################################################################## -## Sample classification network (Specify if you are using a pytorch classifier during the training) ## -## classifier = nn.Sequential(nn.Linear(64, 64), nn.BatchNorm1d(64), nn.ReLU(), nn.Linear...) ## -########################################################################################################## - -# YOUR CODE HERE for pytorch classifier - -# Definition of classes as dictionary -classes = ['Devopriya','Praveen', 'Richa','Ravi','Srikanth','Tarakveer'] \ No newline at end of file diff --git a/spaces/wangguanlin/vits_Kazari/mandarin.py b/spaces/wangguanlin/vits_Kazari/mandarin.py deleted file mode 100644 index 97a0187388cb7dabd764dae5e18b94081963e6b3..0000000000000000000000000000000000000000 --- a/spaces/wangguanlin/vits_Kazari/mandarin.py +++ /dev/null @@ -1,253 +0,0 @@ -import os -import sys -import re -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba -import cn2an -import logging - -logging.getLogger('jieba').setLevel(logging.WARNING) -jieba.set_dictionary(os.path.dirname(sys.argv[0])+'/jieba/dict.txt') -jieba.initialize() - - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - -# List of (romaji, ipa) pairs: -_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ʃy', 'ʃ'), - ('ʧʰy', 'ʧʰ'), - ('ʧ⁼y', 'ʧ⁼'), - ('NN', 'n'), - ('Ng', 'ŋ'), - ('y', 'j'), - ('h', 'x') -]] - -# List of (bopomofo, ipa) pairs: -_bopomofo_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'x'), - ('ㄐ', 'tʃ⁼'), - ('ㄑ', 'tʃʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ts`⁼'), - ('ㄔ', 'ts`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ts⁼'), - ('ㄘ', 'tsʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'ɛ'), - ('ㄞ', 'aɪ'), - ('ㄟ', 'eɪ'), - ('ㄠ', 'ɑʊ'), - ('ㄡ', 'oʊ'), - ('ㄧㄢ', 'jɛn'), - ('ㄩㄢ', 'ɥæn'), - ('ㄢ', 'an'), - ('ㄧㄣ', 'in'), - ('ㄩㄣ', 'ɥn'), - ('ㄣ', 'ən'), - ('ㄤ', 'ɑŋ'), - ('ㄧㄥ', 'iŋ'), - ('ㄨㄥ', 'ʊŋ'), - ('ㄩㄥ', 'jʊŋ'), - ('ㄥ', 'əŋ'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def number_to_chinese(text): - numbers = re.findall(r'\d+(?:\.?\d+)?', text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - return text - - -def chinese_to_bopomofo(text): - text = text.replace('、', ',').replace(';', ',').replace(':', ',') - words = jieba.lcut(text, cut_all=False) - text = '' - for word in words: - bopomofos = lazy_pinyin(word, BOPOMOFO) - if not re.search('[\u4e00-\u9fff]', word): - text += word - continue - for i in range(len(bopomofos)): - if re.match('[\u3105-\u3129]', bopomofos[i][-1]): - bopomofos[i] += 'ˉ' - if text != '': - text += ' ' - text += ''.join(bopomofos) - return text - - -def latin_to_bopomofo(text): - for regex, replacement in _latin_to_bopomofo: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_romaji(text): - for regex, replacement in _bopomofo_to_romaji: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_ipa(text): - for regex, replacement in _bopomofo_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_romaji(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_romaji(text) - text = re.sub('i[aoe]', lambda x: 'y'+x.group(0)[1:], text) - text = re.sub('u[aoəe]', lambda x: 'w'+x.group(0)[1:], text) - text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', lambda x: x.group(1) + - 'ɹ`'+x.group(2), text).replace('ɻ', 'ɹ`') - text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', - lambda x: x.group(1)+'ɹ'+x.group(2), text) - return text - - -def chinese_to_lazy_ipa(text): - text = chinese_to_romaji(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_ipa(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_ipa(text) - text = re.sub('i[aoe]', lambda x: 'y'+x.group(0)[1:], text) - text = re.sub('u[aoəe]', lambda x: 'w'+x.group(0)[1:], text) - text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', lambda x: x.group(1) + - 'ɹ`'+x.group(2), text).replace('ɻ', 'ɹ`') - text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', - lambda x: x.group(1)+'ɹ'+x.group(2), text) - return text diff --git a/spaces/weidacn/deepdanbooru/Dockerfile b/spaces/weidacn/deepdanbooru/Dockerfile deleted file mode 100644 index 34e4d8a401b6bfd6347795c39f70fd56d9ac1bc8..0000000000000000000000000000000000000000 --- a/spaces/weidacn/deepdanbooru/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM tensorflow/tensorflow:2.7.0 -LABEL author="AW" -WORKDIR /app -ADD . /app -EXPOSE 7860 -RUN python3 -m pip install -r requirements.txt -i https://mirrors.tencent.com/pypi/simple -CMD python3 /app/main.py diff --git a/spaces/weidacn/deepdanbooru/deepdanbooru/commands/make_training_database.py b/spaces/weidacn/deepdanbooru/deepdanbooru/commands/make_training_database.py deleted file mode 100644 index 8d2013f34a20ac5d05f6e0dfb70cc654362530cb..0000000000000000000000000000000000000000 --- a/spaces/weidacn/deepdanbooru/deepdanbooru/commands/make_training_database.py +++ /dev/null @@ -1,135 +0,0 @@ -import os -import sqlite3 - - -def make_training_database( - source_path, - output_path, - start_id, - end_id, - use_deleted, - chunk_size, - overwrite, - vacuum, -): - """ - Make sqlite database for training. Also add system tags. - """ - if source_path == output_path: - raise Exception("Source path and output path is equal.") - - if os.path.exists(output_path): - if overwrite: - os.remove(output_path) - else: - raise Exception(f"{output_path} is already exists.") - - source_connection = sqlite3.connect(source_path) - source_connection.row_factory = sqlite3.Row - source_cursor = source_connection.cursor() - - output_connection = sqlite3.connect(output_path) - output_connection.row_factory = sqlite3.Row - output_cursor = output_connection.cursor() - - table_name = "posts" - id_column_name = "id" - md5_column_name = "md5" - extension_column_name = "file_ext" - tags_column_name = "tag_string" - tag_count_general_column_name = "tag_count_general" - rating_column_name = "rating" - score_column_name = "score" - deleted_column_name = "is_deleted" - - # Create output table - print("Creating table ...") - output_cursor.execute( - f"""CREATE TABLE {table_name} ( - {id_column_name} INTEGER NOT NULL PRIMARY KEY, - {md5_column_name} TEXT, - {extension_column_name} TEXT, - {tags_column_name} TEXT, - {tag_count_general_column_name} INTEGER )""" - ) - output_connection.commit() - print("Creating table is complete.") - - current_start_id = start_id - - while True: - print(f"Fetching source rows ... ({current_start_id}~)") - source_cursor.execute( - f"""SELECT - {id_column_name},{md5_column_name},{extension_column_name},{tags_column_name},{tag_count_general_column_name},{rating_column_name},{score_column_name},{deleted_column_name} - FROM {table_name} WHERE ({id_column_name} >= ?) ORDER BY {id_column_name} ASC LIMIT ?""", - (current_start_id, chunk_size), - ) - - rows = source_cursor.fetchall() - - if not rows: - break - - insert_params = [] - - for row in rows: - post_id = row[id_column_name] - md5 = row[md5_column_name] - extension = row[extension_column_name] - tags = row[tags_column_name] - general_tag_count = row[tag_count_general_column_name] - rating = row[rating_column_name] - # score = row[score_column_name] - is_deleted = row[deleted_column_name] - - if post_id > end_id: - break - - if is_deleted and not use_deleted: - continue - - if rating == "g": - tags += f" rating:general" - elif rating == "s": - tags += f" rating:sensitive" - elif rating == "q": - tags += f" rating:questionable" - elif rating == "e": - tags += f" rating:explicit" - - # if score < -6: - # tags += f' score:very_bad' - # elif score >= -6 and score < 0: - # tags += f' score:bad' - # elif score >= 0 and score < 7: - # tags += f' score:average' - # elif score >= 7 and score < 13: - # tags += f' score:good' - # elif score >= 13: - # tags += f' score:very_good' - - insert_params.append((post_id, md5, extension, tags, general_tag_count)) - - if insert_params: - print("Inserting ...") - output_cursor.executemany( - f"""INSERT INTO {table_name} ( - {id_column_name},{md5_column_name},{extension_column_name},{tags_column_name},{tag_count_general_column_name}) - values (?, ?, ?, ?, ?)""", - insert_params, - ) - output_connection.commit() - - current_start_id = rows[-1][id_column_name] + 1 - - if current_start_id > end_id or len(rows) < chunk_size: - break - - if vacuum: - print("Vacuum ...") - output_cursor.execute("vacuum") - output_connection.commit() - - source_connection.close() - output_connection.close() diff --git a/spaces/wffcyrus/MetaGPT-v1/metagpt/actions/project_management.py b/spaces/wffcyrus/MetaGPT-v1/metagpt/actions/project_management.py deleted file mode 100644 index 1062f8984819a022936498fc717329a162d30ea1..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/metagpt/actions/project_management.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/11 19:12 -@Author : alexanderwu -@File : project_management.py -@Modified By: mashenquan, 2023-8-9, align `run` parameters with the parent :class:`Action` class. -""" -from typing import List, Tuple - -import aiofiles - -from metagpt.actions.action import Action -from metagpt.config import CONFIG - -PROMPT_TEMPLATE = """ -# Context -{context} - -## Format example -{format_example} ------ -Role: You are a project manager; the goal is to break down tasks according to PRD/technical design, give a task list, and analyze task dependencies to start with the prerequisite modules -Requirements: Based on the context, fill in the following missing information, note that all sections are returned in Python code triple quote form seperatedly. Here the granularity of the task is a file, if there are any missing files, you can supplement them -Attention: Use '##' to split sections, not '#', and '## ' SHOULD WRITE BEFORE the code and triple quote. - -## Required Python third-party packages: Provided in requirements.txt format - -## Required Other language third-party packages: Provided in requirements.txt format - -## Full API spec: Use OpenAPI 3.0. Describe all APIs that may be used by both frontend and backend. - -## Logic Analysis: Provided as a Python list[str, str]. the first is filename, the second is class/method/function should be implemented in this file. Analyze the dependencies between the files, which work should be done first - -## Task list: Provided as Python list[str]. Each str is a filename, the more at the beginning, the more it is a prerequisite dependency, should be done first - -## Shared Knowledge: Anything that should be public like utils' functions, config's variables details that should make clear first. - -## Anything UNCLEAR: Provide as Plain text. Make clear here. For example, don't forget a main entry. don't forget to init 3rd party libs. - -""" - -FORMAT_EXAMPLE = ''' ---- -## Required Python third-party packages -```python -""" -flask==1.1.2 -bcrypt==3.2.0 -""" -``` - -## Required Other language third-party packages -```python -""" -No third-party ... -""" -``` - -## Full API spec -```python -""" -openapi: 3.0.0 -... -description: A JSON object ... -""" -``` - -## Logic Analysis -```python -[ - ("game.py", "Contains ..."), -] -``` - -## Task list -```python -[ - "game.py", -] -``` - -## Shared Knowledge -```python -""" -'game.py' contains ... -""" -``` - -## Anything UNCLEAR -We need ... how to start. ---- -''' - -OUTPUT_MAPPING = { - "Required Python third-party packages": (str, ...), - "Required Other language third-party packages": (str, ...), - "Full API spec": (str, ...), - "Logic Analysis": (List[Tuple[str, str]], ...), - "Task list": (List[str], ...), - "Shared Knowledge": (str, ...), - "Anything UNCLEAR": (str, ...), -} - - -class WriteTasks(Action): - def __init__(self, name="CreateTasks", context=None, llm=None): - super().__init__(name, context, llm) - - async def _save(self, rsp): - file_path = CONFIG.workspace / "docs/api_spec_and_tasks.md" - async with aiofiles.open(file_path, "w") as f: - await f.write(rsp.content) - - # Write requirements.txt - requirements_path = CONFIG.workspace / "requirements.txt" - - async with aiofiles.open(requirements_path, "w") as f: - await f.write(rsp.instruct_content.dict().get("Required Python third-party packages").strip('"\n')) - - async def run(self, context, **kwargs): - prompt = PROMPT_TEMPLATE.format(context=context, format_example=FORMAT_EXAMPLE) - rsp = await self._aask_v1(prompt, "task", OUTPUT_MAPPING) - await self._save(rsp) - return rsp - - -class AssignTasks(Action): - async def run(self, *args, **kwargs): - # Here you should implement the actual action - pass diff --git a/spaces/wz758727829/ChuanhuChatGPT/ChuanhuChatbot.py b/spaces/wz758727829/ChuanhuChatGPT/ChuanhuChatbot.py deleted file mode 100644 index 086dc6a1e3da91f4078e163ffac03ab54ed0a7d0..0000000000000000000000000000000000000000 --- a/spaces/wz758727829/ChuanhuChatGPT/ChuanhuChatbot.py +++ /dev/null @@ -1,159 +0,0 @@ -import gradio as gr -# import openai -import os -import sys -import argparse -from utils import * -from presets import * - - -my_api_key = "" # 在这里输入你的 API 密钥 - -#if we are running in Docker -if os.environ.get('dockerrun') == 'yes': - dockerflag = True -else: - dockerflag = False - -authflag = False - -if dockerflag: - my_api_key = os.environ.get('my_api_key') - if my_api_key == "empty": - print("Please give a api key!") - sys.exit(1) - #auth - username = os.environ.get('USERNAME') - password = os.environ.get('PASSWORD') - if not (isinstance(username, type(None)) or isinstance(password, type(None))): - authflag = True -else: - if not my_api_key and os.path.exists("api_key.txt") and os.path.getsize("api_key.txt"): - with open("api_key.txt", "r") as f: - my_api_key = f.read().strip() - if os.path.exists("auth.json"): - with open("auth.json", "r") as f: - auth = json.load(f) - username = auth["username"] - password = auth["password"] - if username != "" and password != "": - authflag = True - -gr.Chatbot.postprocess = postprocess - -with gr.Blocks(css=customCSS) as demo: - gr.HTML(title) - with gr.Row(): - keyTxt = gr.Textbox(show_label=False, placeholder=f"在这里输入你的OpenAI API-key...", - value=my_api_key, type="password", visible=not HIDE_MY_KEY).style(container=True) - use_streaming_checkbox = gr.Checkbox(label="实时传输回答", value=True, visible=enable_streaming_option) - chatbot = gr.Chatbot() # .style(color_map=("#1D51EE", "#585A5B")) - history = gr.State([]) - token_count = gr.State([]) - promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2)) - TRUECOMSTANT = gr.State(True) - FALSECONSTANT = gr.State(False) - topic = gr.State("未命名对话历史记录") - - with gr.Row(): - with gr.Column(scale=12): - user_input = gr.Textbox(show_label=False, placeholder="在这里输入").style( - container=False) - with gr.Column(min_width=50, scale=1): - submitBtn = gr.Button("🚀", variant="primary") - with gr.Row(): - emptyBtn = gr.Button("🧹 新的对话") - retryBtn = gr.Button("🔄 重新生成") - delLastBtn = gr.Button("🗑️ 删除最近一条对话") - reduceTokenBtn = gr.Button("♻️ 总结对话") - status_display = gr.Markdown("status: ready") - systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入System Prompt...", - label="System prompt", value=initial_prompt).style(container=True) - with gr.Accordion(label="加载Prompt模板", open=False): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - templateFileSelectDropdown = gr.Dropdown(label="选择Prompt模板集合文件", choices=get_template_names(plain=True), multiselect=False, value=get_template_names(plain=True)[0]) - with gr.Column(scale=1): - templateRefreshBtn = gr.Button("🔄 刷新") - templaeFileReadBtn = gr.Button("📂 读入模板") - with gr.Row(): - with gr.Column(scale=6): - templateSelectDropdown = gr.Dropdown(label="从Prompt模板中加载", choices=load_template(get_template_names(plain=True)[0], mode=1), multiselect=False, value=load_template(get_template_names(plain=True)[0], mode=1)[0]) - with gr.Column(scale=1): - templateApplyBtn = gr.Button("⬇️ 应用") - with gr.Accordion(label="保存/加载对话历史记录", open=False): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - saveFileName = gr.Textbox( - show_label=True, placeholder=f"在这里输入保存的文件名...", label="设置保存文件名", value="对话历史记录").style(container=True) - with gr.Column(scale=1): - saveHistoryBtn = gr.Button("💾 保存对话") - with gr.Row(): - with gr.Column(scale=6): - historyFileSelectDropdown = gr.Dropdown(label="从列表中加载对话", choices=get_history_names(plain=True), multiselect=False, value=get_history_names(plain=True)[0]) - with gr.Column(scale=1): - historyRefreshBtn = gr.Button("🔄 刷新") - historyReadBtn = gr.Button("📂 读入对话") - #inputs, top_p, temperature, top_k, repetition_penalty - with gr.Accordion("参数", open=False): - top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.05, - interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider(minimum=-0, maximum=5.0, value=1.0, - step=0.1, interactive=True, label="Temperature",) - #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",) - #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", ) - gr.Markdown(description) - - - user_input.submit(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True) - user_input.submit(reset_textbox, [], [user_input]) - - submitBtn.click(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True) - submitBtn.click(reset_textbox, [], [user_input]) - - emptyBtn.click(reset_state, outputs=[chatbot, history, token_count, status_display], show_progress=True) - - retryBtn.click(retry, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True) - - delLastBtn.click(delete_last_conversation, [chatbot, history, token_count, use_streaming_checkbox], [ - chatbot, history, token_count, status_display], show_progress=True) - - reduceTokenBtn.click(reduce_token_size, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True) - - saveHistoryBtn.click(save_chat_history, [ - saveFileName, systemPromptTxt, history, chatbot], None, show_progress=True) - - saveHistoryBtn.click(get_history_names, None, [historyFileSelectDropdown]) - - historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown]) - - historyReadBtn.click(load_chat_history, [historyFileSelectDropdown, systemPromptTxt, history, chatbot], [saveFileName, systemPromptTxt, history, chatbot], show_progress=True) - - templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown]) - - templaeFileReadBtn.click(load_template, [templateFileSelectDropdown], [promptTemplates, templateSelectDropdown], show_progress=True) - - templateApplyBtn.click(get_template_content, [promptTemplates, templateSelectDropdown, systemPromptTxt], [systemPromptTxt], show_progress=True) - -print("川虎的温馨提示:访问 http://localhost:7860 查看界面") -# 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接 -demo.title = "川虎ChatGPT 🚀" - -if __name__ == "__main__": - #if running in Docker - if dockerflag: - if authflag: - demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=(username, password)) - else: - demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False) - #if not running in Docker - else: - if authflag: - demo.queue().launch(share=False, auth=(username, password)) - else: - demo.queue().launch(share=False) # 改为 share=True 可以创建公开分享链接 - #demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口 - #demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码 - #demo.queue().launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理 diff --git a/spaces/xp3857/Image_Restoration_Colorization/Face_Enhancement/models/networks/architecture.py b/spaces/xp3857/Image_Restoration_Colorization/Face_Enhancement/models/networks/architecture.py deleted file mode 100644 index 91eb91c8c9fd6500d191456bb3dd8b39d491bb5a..0000000000000000000000000000000000000000 --- a/spaces/xp3857/Image_Restoration_Colorization/Face_Enhancement/models/networks/architecture.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchvision -import torch.nn.utils.spectral_norm as spectral_norm -from models.networks.normalization import SPADE - - -# ResNet block that uses SPADE. -# It differs from the ResNet block of pix2pixHD in that -# it takes in the segmentation map as input, learns the skip connection if necessary, -# and applies normalization first and then convolution. -# This architecture seemed like a standard architecture for unconditional or -# class-conditional GAN architecture using residual block. -# The code was inspired from https://github.com/LMescheder/GAN_stability. -class SPADEResnetBlock(nn.Module): - def __init__(self, fin, fout, opt): - super().__init__() - # Attributes - self.learned_shortcut = fin != fout - fmiddle = min(fin, fout) - - self.opt = opt - # create conv layers - self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1) - self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1) - if self.learned_shortcut: - self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False) - - # apply spectral norm if specified - if "spectral" in opt.norm_G: - self.conv_0 = spectral_norm(self.conv_0) - self.conv_1 = spectral_norm(self.conv_1) - if self.learned_shortcut: - self.conv_s = spectral_norm(self.conv_s) - - # define normalization layers - spade_config_str = opt.norm_G.replace("spectral", "") - self.norm_0 = SPADE(spade_config_str, fin, opt.semantic_nc, opt) - self.norm_1 = SPADE(spade_config_str, fmiddle, opt.semantic_nc, opt) - if self.learned_shortcut: - self.norm_s = SPADE(spade_config_str, fin, opt.semantic_nc, opt) - - # note the resnet block with SPADE also takes in |seg|, - # the semantic segmentation map as input - def forward(self, x, seg, degraded_image): - x_s = self.shortcut(x, seg, degraded_image) - - dx = self.conv_0(self.actvn(self.norm_0(x, seg, degraded_image))) - dx = self.conv_1(self.actvn(self.norm_1(dx, seg, degraded_image))) - - out = x_s + dx - - return out - - def shortcut(self, x, seg, degraded_image): - if self.learned_shortcut: - x_s = self.conv_s(self.norm_s(x, seg, degraded_image)) - else: - x_s = x - return x_s - - def actvn(self, x): - return F.leaky_relu(x, 2e-1) - - -# ResNet block used in pix2pixHD -# We keep the same architecture as pix2pixHD. -class ResnetBlock(nn.Module): - def __init__(self, dim, norm_layer, activation=nn.ReLU(False), kernel_size=3): - super().__init__() - - pw = (kernel_size - 1) // 2 - self.conv_block = nn.Sequential( - nn.ReflectionPad2d(pw), - norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size)), - activation, - nn.ReflectionPad2d(pw), - norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size)), - ) - - def forward(self, x): - y = self.conv_block(x) - out = x + y - return out - - -# VGG architecter, used for the perceptual loss using a pretrained VGG network -class VGG19(torch.nn.Module): - def __init__(self, requires_grad=False): - super().__init__() - vgg_pretrained_features = torchvision.models.vgg19(pretrained=True).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - for x in range(2): - self.slice1.add_module(str(x), vgg_pretrained_features[x]) - for x in range(2, 7): - self.slice2.add_module(str(x), vgg_pretrained_features[x]) - for x in range(7, 12): - self.slice3.add_module(str(x), vgg_pretrained_features[x]) - for x in range(12, 21): - self.slice4.add_module(str(x), vgg_pretrained_features[x]) - for x in range(21, 30): - self.slice5.add_module(str(x), vgg_pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h_relu1 = self.slice1(X) - h_relu2 = self.slice2(h_relu1) - h_relu3 = self.slice3(h_relu2) - h_relu4 = self.slice4(h_relu3) - h_relu5 = self.slice5(h_relu4) - out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5] - return out - - -class SPADEResnetBlock_non_spade(nn.Module): - def __init__(self, fin, fout, opt): - super().__init__() - # Attributes - self.learned_shortcut = fin != fout - fmiddle = min(fin, fout) - - self.opt = opt - # create conv layers - self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1) - self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1) - if self.learned_shortcut: - self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False) - - # apply spectral norm if specified - if "spectral" in opt.norm_G: - self.conv_0 = spectral_norm(self.conv_0) - self.conv_1 = spectral_norm(self.conv_1) - if self.learned_shortcut: - self.conv_s = spectral_norm(self.conv_s) - - # define normalization layers - spade_config_str = opt.norm_G.replace("spectral", "") - self.norm_0 = SPADE(spade_config_str, fin, opt.semantic_nc, opt) - self.norm_1 = SPADE(spade_config_str, fmiddle, opt.semantic_nc, opt) - if self.learned_shortcut: - self.norm_s = SPADE(spade_config_str, fin, opt.semantic_nc, opt) - - # note the resnet block with SPADE also takes in |seg|, - # the semantic segmentation map as input - def forward(self, x, seg, degraded_image): - x_s = self.shortcut(x, seg, degraded_image) - - dx = self.conv_0(self.actvn(x)) - dx = self.conv_1(self.actvn(dx)) - - out = x_s + dx - - return out - - def shortcut(self, x, seg, degraded_image): - if self.learned_shortcut: - x_s = self.conv_s(x) - else: - x_s = x - return x_s - - def actvn(self, x): - return F.leaky_relu(x, 2e-1) diff --git a/spaces/xswu/HPSv2/src/open_clip/generation_utils.py b/spaces/xswu/HPSv2/src/open_clip/generation_utils.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/xuetao/bingo3/src/components/ui/dropdown-menu.tsx b/spaces/xuetao/bingo3/src/components/ui/dropdown-menu.tsx deleted file mode 100644 index 184d4e6007ef85187446362f69532ab077897fea..0000000000000000000000000000000000000000 --- a/spaces/xuetao/bingo3/src/components/ui/dropdown-menu.tsx +++ /dev/null @@ -1,128 +0,0 @@ -'use client' - -import * as React from 'react' -import * as DropdownMenuPrimitive from '@radix-ui/react-dropdown-menu' - -import { cn } from '@/lib/utils' - -const DropdownMenu = DropdownMenuPrimitive.Root - -const DropdownMenuTrigger = DropdownMenuPrimitive.Trigger - -const DropdownMenuGroup = DropdownMenuPrimitive.Group - -const DropdownMenuPortal = DropdownMenuPrimitive.Portal - -const DropdownMenuSub = DropdownMenuPrimitive.Sub - -const DropdownMenuRadioGroup = DropdownMenuPrimitive.RadioGroup - -const DropdownMenuSubContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DropdownMenuSubContent.displayName = - DropdownMenuPrimitive.SubContent.displayName - -const DropdownMenuContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, sideOffset = 4, ...props }, ref) => ( - - - -)) -DropdownMenuContent.displayName = DropdownMenuPrimitive.Content.displayName - -const DropdownMenuItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef & { - inset?: boolean - } ->(({ className, inset, ...props }, ref) => ( - -)) -DropdownMenuItem.displayName = DropdownMenuPrimitive.Item.displayName - -const DropdownMenuLabel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef & { - inset?: boolean - } ->(({ className, inset, ...props }, ref) => ( - -)) -DropdownMenuLabel.displayName = DropdownMenuPrimitive.Label.displayName - -const DropdownMenuSeparator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DropdownMenuSeparator.displayName = DropdownMenuPrimitive.Separator.displayName - -const DropdownMenuShortcut = ({ - className, - ...props -}: React.HTMLAttributes) => { - return ( - - ) -} -DropdownMenuShortcut.displayName = 'DropdownMenuShortcut' - -export { - DropdownMenu, - DropdownMenuTrigger, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuLabel, - DropdownMenuSeparator, - DropdownMenuShortcut, - DropdownMenuGroup, - DropdownMenuPortal, - DropdownMenuSub, - DropdownMenuSubContent, - DropdownMenuRadioGroup -} diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/components/PianoRoll/CanvasPianoRuler.tsx b/spaces/yderre-aubay/midi-player-demo/src/main/components/PianoRoll/CanvasPianoRuler.tsx deleted file mode 100644 index afd426b644af8b14e9ab10bfbab677d010e83d5f..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/components/PianoRoll/CanvasPianoRuler.tsx +++ /dev/null @@ -1,284 +0,0 @@ -import { findLast, isEqual } from "lodash" -import { observer } from "mobx-react-lite" -import React, { FC, MouseEventHandler, useCallback, useState } from "react" -import { BeatWithX } from "../../../common/helpers/mapBeats" -import { LoopSetting } from "../../../common/player" -import { Theme } from "../../../common/theme/Theme" -import { setLoopBegin, setLoopEnd, updateTimeSignature } from "../../actions" -import { Layout } from "../../Constants" -import { useContextMenu } from "../../hooks/useContextMenu" -import { useStores } from "../../hooks/useStores" -import { useTheme } from "../../hooks/useTheme" -import { RulerStore, TimeSignature } from "../../stores/RulerStore" -import DrawCanvas from "../DrawCanvas" -import { RulerContextMenu } from "./RulerContextMenu" -import { TimeSignatureDialog } from "./TimeSignatureDialog" - -const textPadding = 2 - -function drawRuler( - ctx: CanvasRenderingContext2D, - height: number, - beats: BeatWithX[], - theme: Theme, -) { - ctx.strokeStyle = theme.secondaryTextColor - ctx.lineWidth = 1 - ctx.beginPath() - - // 密過ぎる時は省略する - // Omit when it is too high - const shouldOmit = beats.length > 1 && beats[1].x - beats[0].x <= 5 - - beats.forEach(({ beat, measure, x }) => { - const isTop = beat === 0 - - if (isTop) { - ctx.moveTo(x, height / 2) - ctx.lineTo(x, height) - } else if (!shouldOmit) { - ctx.moveTo(x, height * 0.8) - ctx.lineTo(x, height) - } - - // 小節番号 - // War Number - // 省略時は2つに1つ描画 - // Default 1 drawing one for two - if (isTop && (!shouldOmit || measure % 2 === 0)) { - ctx.textBaseline = "top" - ctx.font = `12px ${theme.canvasFont}` - ctx.fillStyle = theme.secondaryTextColor - ctx.fillText(`${measure}`, x + textPadding, textPadding) - } - }) - - ctx.closePath() - ctx.stroke() -} - -function drawLoopPoints( - ctx: CanvasRenderingContext2D, - loop: LoopSetting, - height: number, - pixelsPerTick: number, - theme: Theme, -) { - const flagSize = 8 - ctx.lineWidth = 1 - ctx.fillStyle = loop.enabled ? theme.themeColor : theme.secondaryTextColor - ctx.strokeStyle = loop.enabled ? theme.themeColor : theme.secondaryTextColor - ctx.beginPath() - - const beginX = loop.begin * pixelsPerTick - const endX = loop.end * pixelsPerTick - - if (loop.begin !== null) { - const x = beginX - ctx.moveTo(x, 0) - ctx.lineTo(x, height) - - ctx.moveTo(x, 0) - ctx.lineTo(x + flagSize, 0) - ctx.lineTo(x, flagSize) - } - - if (loop.end !== null) { - const x = endX - ctx.moveTo(x, 0) - ctx.lineTo(x, height) - - ctx.moveTo(x, 0) - ctx.lineTo(x - flagSize, 0) - ctx.lineTo(x, flagSize) - } - - ctx.closePath() - ctx.fill() - ctx.stroke() -} - -function drawFlag( - ctx: CanvasRenderingContext2D, - x: number, - y: number, - width: number, - height: number, - flagSize: number, -) { - ctx.beginPath() - ctx.moveTo(x, y) - ctx.lineTo(x + width + flagSize, y) - ctx.lineTo(x + width, y + height) - ctx.lineTo(x, y + height) - ctx.lineTo(x, y) - ctx.closePath() - ctx.fill() -} - -function drawTimeSignatures( - ctx: CanvasRenderingContext2D, - height: number, - events: TimeSignature[], - pixelsPerTick: number, - theme: Theme, -) { - ctx.textBaseline = "bottom" - ctx.font = `11px ${theme.canvasFont}` - events.forEach((e) => { - const x = e.tick * pixelsPerTick - const text = `${e.numerator}/${e.denominator}` - const size = ctx.measureText(text) - const textHeight = - size.actualBoundingBoxAscent + size.actualBoundingBoxDescent - ctx.fillStyle = e.isSelected - ? theme.themeColor - : theme.secondaryBackgroundColor - const flagHeight = textHeight + textPadding * 4 - drawFlag( - ctx, - x, - height - flagHeight, - size.width + textPadding * 2, - flagHeight, - textHeight, - ) - ctx.fillStyle = theme.textColor - ctx.fillText(text, x + textPadding, height - textPadding) - }) -} - -export interface PianoRulerProps { - rulerStore: RulerStore - style?: React.CSSProperties -} - -// null = closed -interface TimeSignatureDialogState { - numerator: number - denominator: number -} - -const TIME_SIGNATURE_HIT_WIDTH = 20 - -const PianoRuler: FC = observer(({ rulerStore, style }) => { - const rootStore = useStores() - const theme = useTheme() - const { onContextMenu, menuProps } = useContextMenu() - const [timeSignatureDialogState, setTimeSignatureDialogState] = - useState(null) - const [rightClickTick, setRightClickTick] = useState(0) - const height = Layout.rulerHeight - - const { - canvasWidth: width, - transform: { pixelsPerTick }, - scrollLeft, - } = rulerStore.parent - const { beats, timeSignatures, quantizer } = rulerStore - const { - player, - player: { loop }, - } = rootStore - - const timeSignatureHitTest = (tick: number) => { - const widthTick = TIME_SIGNATURE_HIT_WIDTH / pixelsPerTick - return findLast( - timeSignatures, - (e) => e.tick < tick && e.tick + widthTick >= tick, - ) - } - - const onMouseDown: React.MouseEventHandler = useCallback( - (e) => { - const tick = rulerStore.getTick(e.nativeEvent.offsetX) - const quantizedTick = quantizer.round(tick) - const timeSignature = timeSignatureHitTest(tick) - - if (e.nativeEvent.ctrlKey) { - setLoopBegin(rootStore)(quantizedTick) - } else if (e.nativeEvent.altKey) { - setLoopEnd(rootStore)(quantizedTick) - } else { - if (timeSignature !== undefined) { - if (e.detail == 2) { - setTimeSignatureDialogState(timeSignature) - } else { - rulerStore.selectedTimeSignatureEventIds = [timeSignature.id] - } - } else { - rulerStore.selectedTimeSignatureEventIds = [] - player.position = quantizedTick - } - } - }, - [rootStore, quantizer, player, scrollLeft, pixelsPerTick, timeSignatures], - ) - - const draw = useCallback( - (ctx: CanvasRenderingContext2D) => { - ctx.clearRect(0, 0, width, height) - ctx.save() - ctx.translate(-scrollLeft + 0.5, 0) - drawRuler(ctx, height, beats, theme) - if (loop !== null) { - drawLoopPoints(ctx, loop, height, pixelsPerTick, theme) - } - drawTimeSignatures(ctx, height, timeSignatures, pixelsPerTick, theme) - ctx.restore() - }, - [width, pixelsPerTick, scrollLeft, beats, timeSignatures, loop], - ) - - const closeOpenTimeSignatureDialog = useCallback(() => { - setTimeSignatureDialogState(null) - }, []) - - const okTimeSignatureDialog = useCallback( - ({ numerator, denominator }: TimeSignatureDialogState) => { - rulerStore.selectedTimeSignatureEventIds.forEach((id) => { - updateTimeSignature(rootStore)(id, numerator, denominator) - }) - }, - [], - ) - - const onContextMenuWrapper: MouseEventHandler = useCallback( - (e) => { - setRightClickTick(rulerStore.getQuantizedTick(e.nativeEvent.offsetX)) - onContextMenu(e) - }, - [rulerStore], - ) - - return ( - <> - - - - - ) -}) - -function equals(props: PianoRulerProps, nextProps: PianoRulerProps) { - return isEqual(props.style, nextProps.style) -} - -export default React.memo(PianoRuler, equals) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/GroundingDINO/groundingdino/models/GroundingDINO/ms_deform_attn.py b/spaces/yizhangliu/Grounded-Segment-Anything/GroundingDINO/groundingdino/models/GroundingDINO/ms_deform_attn.py deleted file mode 100644 index 489d501bef364020212306d81e9b85c8daa27491..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/GroundingDINO/groundingdino/models/GroundingDINO/ms_deform_attn.py +++ /dev/null @@ -1,413 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Deformable DETR -# Copyright (c) 2020 SenseTime. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------------------------------ -# Modified from: -# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/functions/ms_deform_attn_func.py -# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/modules/ms_deform_attn.py -# https://github.com/open-mmlab/mmcv/blob/master/mmcv/ops/multi_scale_deform_attn.py -# ------------------------------------------------------------------------------------------------ - -import math -import warnings -from typing import Optional - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.init import constant_, xavier_uniform_ - -try: - from groundingdino import _C -except: - warnings.warn("Failed to load custom C++ ops. Running on CPU mode Only!") - - -# helpers -def _is_power_of_2(n): - if (not isinstance(n, int)) or (n < 0): - raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n))) - return (n & (n - 1) == 0) and n != 0 - - -class MultiScaleDeformableAttnFunction(Function): - @staticmethod - def forward( - ctx, - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - im2col_step, - ): - ctx.im2col_step = im2col_step - output = _C.ms_deform_attn_forward( - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - ctx.im2col_step, - ) - ctx.save_for_backward( - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - ) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - ( - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - ) = ctx.saved_tensors - grad_value, grad_sampling_loc, grad_attn_weight = _C.ms_deform_attn_backward( - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - grad_output, - ctx.im2col_step, - ) - - return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None - - -def multi_scale_deformable_attn_pytorch( - value: torch.Tensor, - value_spatial_shapes: torch.Tensor, - sampling_locations: torch.Tensor, - attention_weights: torch.Tensor, -) -> torch.Tensor: - - bs, _, num_heads, embed_dims = value.shape - _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape - value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1) - sampling_grids = 2 * sampling_locations - 1 - sampling_value_list = [] - for level, (H_, W_) in enumerate(value_spatial_shapes): - # bs, H_*W_, num_heads, embed_dims -> - # bs, H_*W_, num_heads*embed_dims -> - # bs, num_heads*embed_dims, H_*W_ -> - # bs*num_heads, embed_dims, H_, W_ - value_l_ = ( - value_list[level].flatten(2).transpose(1, 2).reshape(bs * num_heads, embed_dims, H_, W_) - ) - # bs, num_queries, num_heads, num_points, 2 -> - # bs, num_heads, num_queries, num_points, 2 -> - # bs*num_heads, num_queries, num_points, 2 - sampling_grid_l_ = sampling_grids[:, :, :, level].transpose(1, 2).flatten(0, 1) - # bs*num_heads, embed_dims, num_queries, num_points - sampling_value_l_ = F.grid_sample( - value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False - ) - sampling_value_list.append(sampling_value_l_) - # (bs, num_queries, num_heads, num_levels, num_points) -> - # (bs, num_heads, num_queries, num_levels, num_points) -> - # (bs, num_heads, 1, num_queries, num_levels*num_points) - attention_weights = attention_weights.transpose(1, 2).reshape( - bs * num_heads, 1, num_queries, num_levels * num_points - ) - output = ( - (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) - .sum(-1) - .view(bs, num_heads * embed_dims, num_queries) - ) - return output.transpose(1, 2).contiguous() - - -class MultiScaleDeformableAttention(nn.Module): - """Multi-Scale Deformable Attention Module used in Deformable-DETR - - `Deformable DETR: Deformable Transformers for End-to-End Object Detection. - `_. - - Args: - embed_dim (int): The embedding dimension of Attention. Default: 256. - num_heads (int): The number of attention heads. Default: 8. - num_levels (int): The number of feature map used in Attention. Default: 4. - num_points (int): The number of sampling points for each query - in each head. Default: 4. - img2col_steps (int): The step used in image_to_column. Defualt: 64. - dropout (float): Dropout layer used in output. Default: 0.1. - batch_first (bool): if ``True``, then the input and output tensor will be - provided as `(bs, n, embed_dim)`. Default: False. `(n, bs, embed_dim)` - """ - - def __init__( - self, - embed_dim: int = 256, - num_heads: int = 8, - num_levels: int = 4, - num_points: int = 4, - img2col_step: int = 64, - batch_first: bool = False, - ): - super().__init__() - if embed_dim % num_heads != 0: - raise ValueError( - "embed_dim must be divisible by num_heads, but got {} and {}".format( - embed_dim, num_heads - ) - ) - head_dim = embed_dim // num_heads - - self.batch_first = batch_first - - if not _is_power_of_2(head_dim): - warnings.warn( - """ - You'd better set d_model in MSDeformAttn to make sure that - each dim of the attention head a power of 2, which is more efficient. - """ - ) - - self.im2col_step = img2col_step - self.embed_dim = embed_dim - self.num_heads = num_heads - self.num_levels = num_levels - self.num_points = num_points - self.sampling_offsets = nn.Linear(embed_dim, num_heads * num_levels * num_points * 2) - self.attention_weights = nn.Linear(embed_dim, num_heads * num_levels * num_points) - self.value_proj = nn.Linear(embed_dim, embed_dim) - self.output_proj = nn.Linear(embed_dim, embed_dim) - - self.init_weights() - - def _reset_parameters(self): - return self.init_weights() - - def init_weights(self): - """ - Default initialization for Parameters of Module. - """ - constant_(self.sampling_offsets.weight.data, 0.0) - thetas = torch.arange(self.num_heads, dtype=torch.float32) * ( - 2.0 * math.pi / self.num_heads - ) - grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) - grid_init = ( - (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) - .view(self.num_heads, 1, 1, 2) - .repeat(1, self.num_levels, self.num_points, 1) - ) - for i in range(self.num_points): - grid_init[:, :, i, :] *= i + 1 - with torch.no_grad(): - self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) - constant_(self.attention_weights.weight.data, 0.0) - constant_(self.attention_weights.bias.data, 0.0) - xavier_uniform_(self.value_proj.weight.data) - constant_(self.value_proj.bias.data, 0.0) - xavier_uniform_(self.output_proj.weight.data) - constant_(self.output_proj.bias.data, 0.0) - - def freeze_sampling_offsets(self): - print("Freeze sampling offsets") - self.sampling_offsets.weight.requires_grad = False - self.sampling_offsets.bias.requires_grad = False - - def freeze_attention_weights(self): - print("Freeze attention weights") - self.attention_weights.weight.requires_grad = False - self.attention_weights.bias.requires_grad = False - - def forward( - self, - query: torch.Tensor, - key: Optional[torch.Tensor] = None, - value: Optional[torch.Tensor] = None, - query_pos: Optional[torch.Tensor] = None, - key_padding_mask: Optional[torch.Tensor] = None, - reference_points: Optional[torch.Tensor] = None, - spatial_shapes: Optional[torch.Tensor] = None, - level_start_index: Optional[torch.Tensor] = None, - **kwargs - ) -> torch.Tensor: - - """Forward Function of MultiScaleDeformableAttention - - Args: - query (torch.Tensor): Query embeddings with shape - `(num_query, bs, embed_dim)` - key (torch.Tensor): Key embeddings with shape - `(num_key, bs, embed_dim)` - value (torch.Tensor): Value embeddings with shape - `(num_key, bs, embed_dim)` - query_pos (torch.Tensor): The position embedding for `query`. Default: None. - key_padding_mask (torch.Tensor): ByteTensor for `query`, with shape `(bs, num_key)`, - indicating which elements within `key` to be ignored in attention. - reference_points (torch.Tensor): The normalized reference points - with shape `(bs, num_query, num_levels, 2)`, - all elements is range in [0, 1], top-left (0, 0), - bottom-right (1, 1), including padding are. - or `(N, Length_{query}, num_levels, 4)`, add additional - two dimensions `(h, w)` to form reference boxes. - spatial_shapes (torch.Tensor): Spatial shape of features in different levels. - With shape `(num_levels, 2)`, last dimension represents `(h, w)`. - level_start_index (torch.Tensor): The start index of each level. A tensor with - shape `(num_levels, )` which can be represented as - `[0, h_0 * w_0, h_0 * w_0 + h_1 * w_1, ...]`. - - Returns: - torch.Tensor: forward results with shape `(num_query, bs, embed_dim)` - """ - - if value is None: - value = query - - if query_pos is not None: - query = query + query_pos - - if not self.batch_first: - # change to (bs, num_query ,embed_dims) - query = query.permute(1, 0, 2) - value = value.permute(1, 0, 2) - - bs, num_query, _ = query.shape - bs, num_value, _ = value.shape - - assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value - - value = self.value_proj(value) - if key_padding_mask is not None: - value = value.masked_fill(key_padding_mask[..., None], float(0)) - value = value.view(bs, num_value, self.num_heads, -1) - sampling_offsets = self.sampling_offsets(query).view( - bs, num_query, self.num_heads, self.num_levels, self.num_points, 2 - ) - attention_weights = self.attention_weights(query).view( - bs, num_query, self.num_heads, self.num_levels * self.num_points - ) - attention_weights = attention_weights.softmax(-1) - attention_weights = attention_weights.view( - bs, - num_query, - self.num_heads, - self.num_levels, - self.num_points, - ) - - # bs, num_query, num_heads, num_levels, num_points, 2 - if reference_points.shape[-1] == 2: - offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) - sampling_locations = ( - reference_points[:, :, None, :, None, :] - + sampling_offsets / offset_normalizer[None, None, None, :, None, :] - ) - elif reference_points.shape[-1] == 4: - sampling_locations = ( - reference_points[:, :, None, :, None, :2] - + sampling_offsets - / self.num_points - * reference_points[:, :, None, :, None, 2:] - * 0.5 - ) - else: - raise ValueError( - "Last dim of reference_points must be 2 or 4, but get {} instead.".format( - reference_points.shape[-1] - ) - ) - - if torch.cuda.is_available() and value.is_cuda: - halffloat = False - if value.dtype == torch.float16: - halffloat = True - value = value.float() - sampling_locations = sampling_locations.float() - attention_weights = attention_weights.float() - - output = MultiScaleDeformableAttnFunction.apply( - value, - spatial_shapes, - level_start_index, - sampling_locations, - attention_weights, - self.im2col_step, - ) - - if halffloat: - output = output.half() - else: - output = multi_scale_deformable_attn_pytorch( - value, spatial_shapes, sampling_locations, attention_weights - ) - - output = self.output_proj(output) - - if not self.batch_first: - output = output.permute(1, 0, 2) - - return output - - -def create_dummy_class(klass, dependency, message=""): - """ - When a dependency of a class is not available, create a dummy class which throws ImportError - when used. - - Args: - klass (str): name of the class. - dependency (str): name of the dependency. - message: extra message to print - Returns: - class: a class object - """ - err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, klass) - if message: - err = err + " " + message - - class _DummyMetaClass(type): - # throw error on class attribute access - def __getattr__(_, __): # noqa: B902 - raise ImportError(err) - - class _Dummy(object, metaclass=_DummyMetaClass): - # throw error on constructor - def __init__(self, *args, **kwargs): - raise ImportError(err) - - return _Dummy - - -def create_dummy_func(func, dependency, message=""): - """ - When a dependency of a function is not available, create a dummy function which throws - ImportError when used. - - Args: - func (str): name of the function. - dependency (str or list[str]): name(s) of the dependency. - message: extra message to print - Returns: - function: a function object - """ - err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, func) - if message: - err = err + " " + message - - if isinstance(dependency, (list, tuple)): - dependency = ",".join(dependency) - - def _dummy(*args, **kwargs): - raise ImportError(err) - - return _dummy diff --git a/spaces/yl12053/so-vits-4.1-Kitasan-Black/onnx_export.py b/spaces/yl12053/so-vits-4.1-Kitasan-Black/onnx_export.py deleted file mode 100644 index a70a912cc1b6dd908ff6496bbc6fa8dd576e233b..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Kitasan-Black/onnx_export.py +++ /dev/null @@ -1,54 +0,0 @@ -import torch -from onnxexport.model_onnx import SynthesizerTrn -import utils - -def main(NetExport): - path = "SoVits4.0" - if NetExport: - device = torch.device("cpu") - hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json") - SVCVITS = SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model) - _ = utils.load_checkpoint(f"checkpoints/{path}/model.pth", SVCVITS, None) - _ = SVCVITS.eval().to(device) - for i in SVCVITS.parameters(): - i.requires_grad = False - - n_frame = 10 - test_hidden_unit = torch.rand(1, n_frame, 256) - test_pitch = torch.rand(1, n_frame) - test_mel2ph = torch.arange(0, n_frame, dtype=torch.int64)[None] # torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).unsqueeze(0) - test_uv = torch.ones(1, n_frame, dtype=torch.float32) - test_noise = torch.randn(1, 192, n_frame) - test_sid = torch.LongTensor([0]) - input_names = ["c", "f0", "mel2ph", "uv", "noise", "sid"] - output_names = ["audio", ] - - torch.onnx.export(SVCVITS, - ( - test_hidden_unit.to(device), - test_pitch.to(device), - test_mel2ph.to(device), - test_uv.to(device), - test_noise.to(device), - test_sid.to(device) - ), - f"checkpoints/{path}/model.onnx", - dynamic_axes={ - "c": [0, 1], - "f0": [1], - "mel2ph": [1], - "uv": [1], - "noise": [2], - }, - do_constant_folding=False, - opset_version=16, - verbose=False, - input_names=input_names, - output_names=output_names) - - -if __name__ == '__main__': - main(True) diff --git a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/inference/infer_tool.py b/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/inference/infer_tool.py deleted file mode 100644 index 442342c95210668c0ff1eda3e3d45f4750e0fbe6..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/inference/infer_tool.py +++ /dev/null @@ -1,529 +0,0 @@ -import hashlib -import io -import json -import logging -import os -import time -from pathlib import Path -from inference import slicer -import gc - -import librosa -import numpy as np -# import onnxruntime -import soundfile -import torch -import torchaudio - -import cluster -import utils -from models import SynthesizerTrn -import pickle - -from diffusion.unit2mel import load_model_vocoder -import yaml - -logging.getLogger('matplotlib').setLevel(logging.WARNING) - - -def read_temp(file_name): - if not os.path.exists(file_name): - with open(file_name, "w") as f: - f.write(json.dumps({"info": "temp_dict"})) - return {} - else: - try: - with open(file_name, "r") as f: - data = f.read() - data_dict = json.loads(data) - if os.path.getsize(file_name) > 50 * 1024 * 1024: - f_name = file_name.replace("\\", "/").split("/")[-1] - print(f"clean {f_name}") - for wav_hash in list(data_dict.keys()): - if int(time.time()) - int(data_dict[wav_hash]["time"]) > 14 * 24 * 3600: - del data_dict[wav_hash] - except Exception as e: - print(e) - print(f"{file_name} error,auto rebuild file") - data_dict = {"info": "temp_dict"} - return data_dict - - -def write_temp(file_name, data): - with open(file_name, "w") as f: - f.write(json.dumps(data)) - - -def timeit(func): - def run(*args, **kwargs): - t = time.time() - res = func(*args, **kwargs) - print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t)) - return res - - return run - - -def format_wav(audio_path): - if Path(audio_path).suffix == '.wav': - return - raw_audio, raw_sample_rate = librosa.load(audio_path, mono=True, sr=None) - soundfile.write(Path(audio_path).with_suffix(".wav"), raw_audio, raw_sample_rate) - - -def get_end_file(dir_path, end): - file_lists = [] - for root, dirs, files in os.walk(dir_path): - files = [f for f in files if f[0] != '.'] - dirs[:] = [d for d in dirs if d[0] != '.'] - for f_file in files: - if f_file.endswith(end): - file_lists.append(os.path.join(root, f_file).replace("\\", "/")) - return file_lists - - -def get_md5(content): - return hashlib.new("md5", content).hexdigest() - -def fill_a_to_b(a, b): - if len(a) < len(b): - for _ in range(0, len(b) - len(a)): - a.append(a[0]) - -def mkdir(paths: list): - for path in paths: - if not os.path.exists(path): - os.mkdir(path) - -def pad_array(arr, target_length): - current_length = arr.shape[0] - if current_length >= target_length: - return arr - else: - pad_width = target_length - current_length - pad_left = pad_width // 2 - pad_right = pad_width - pad_left - padded_arr = np.pad(arr, (pad_left, pad_right), 'constant', constant_values=(0, 0)) - return padded_arr - -def split_list_by_n(list_collection, n, pre=0): - for i in range(0, len(list_collection), n): - yield list_collection[i-pre if i-pre>=0 else i: i + n] - - -class F0FilterException(Exception): - pass - -class Svc(object): - def __init__(self, net_g_path, config_path, - device=None, - cluster_model_path="logs/44k/kmeans_10000.pt", - nsf_hifigan_enhance = False, - diffusion_model_path="logs/44k/diffusion/model_0.pt", - diffusion_config_path="configs/diffusion.yaml", - shallow_diffusion = False, - only_diffusion = False, - spk_mix_enable = False, - feature_retrieval = False - ): - self.net_g_path = net_g_path - self.only_diffusion = only_diffusion - self.shallow_diffusion = shallow_diffusion - self.feature_retrieval = feature_retrieval - if device is None: - self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") - else: - self.dev = torch.device(device) - self.net_g_ms = None - if not self.only_diffusion: - self.hps_ms = utils.get_hparams_from_file(config_path,True) - self.target_sample = self.hps_ms.data.sampling_rate - self.hop_size = self.hps_ms.data.hop_length - self.spk2id = self.hps_ms.spk - self.unit_interpolate_mode = self.hps_ms.data.unit_interpolate_mode if self.hps_ms.data.unit_interpolate_mode is not None else 'left' - self.vol_embedding = self.hps_ms.model.vol_embedding if self.hps_ms.model.vol_embedding is not None else False - self.speech_encoder = self.hps_ms.model.speech_encoder if self.hps_ms.model.speech_encoder is not None else 'vec768l12' - - self.nsf_hifigan_enhance = nsf_hifigan_enhance - if self.shallow_diffusion or self.only_diffusion: - if os.path.exists(diffusion_model_path) and os.path.exists(diffusion_model_path): - self.diffusion_model,self.vocoder,self.diffusion_args = load_model_vocoder(diffusion_model_path,self.dev,config_path=diffusion_config_path) - if self.only_diffusion: - self.target_sample = self.diffusion_args.data.sampling_rate - self.hop_size = self.diffusion_args.data.block_size - self.spk2id = self.diffusion_args.spk - self.speech_encoder = self.diffusion_args.data.encoder - self.unit_interpolate_mode = self.diffusion_args.data.unit_interpolate_mode if self.diffusion_args.data.unit_interpolate_mode!=None else 'left' - if spk_mix_enable: - self.diffusion_model.init_spkmix(len(self.spk2id)) - else: - print("No diffusion model or config found. Shallow diffusion mode will False") - self.shallow_diffusion = self.only_diffusion = False - - # load hubert and model - if not self.only_diffusion: - self.load_model(spk_mix_enable) - self.hubert_model = utils.get_speech_encoder(self.speech_encoder,device=self.dev) - self.volume_extractor = utils.Volume_Extractor(self.hop_size) - else: - self.hubert_model = utils.get_speech_encoder(self.diffusion_args.data.encoder,device=self.dev) - self.volume_extractor = utils.Volume_Extractor(self.diffusion_args.data.block_size) - - if os.path.exists(cluster_model_path): - if self.feature_retrieval: - with open(cluster_model_path,"rb") as f: - self.cluster_model = pickle.load(f) - self.big_npy = None - self.now_spk_id = -1 - else: - self.cluster_model = cluster.get_cluster_model(cluster_model_path) - else: - self.feature_retrieval=False - - if self.shallow_diffusion : self.nsf_hifigan_enhance = False - if self.nsf_hifigan_enhance: - from modules.enhancer import Enhancer - self.enhancer = Enhancer('nsf-hifigan', 'pretrain/nsf_hifigan/model',device=self.dev) - - def load_model(self, spk_mix_enable=False): - # get model configuration - self.net_g_ms = SynthesizerTrn( - self.hps_ms.data.filter_length // 2 + 1, - self.hps_ms.train.segment_size // self.hps_ms.data.hop_length, - **self.hps_ms.model) - _ = utils.load_checkpoint(self.net_g_path, self.net_g_ms, None) - if "half" in self.net_g_path and torch.cuda.is_available(): - _ = self.net_g_ms.half().eval().to(self.dev) - else: - _ = self.net_g_ms.eval().to(self.dev) - if spk_mix_enable: - self.net_g_ms.EnableCharacterMix(len(self.spk2id), self.dev) - - def get_unit_f0(self, wav, tran, cluster_infer_ratio, speaker, f0_filter ,f0_predictor,cr_threshold=0.05): - - f0_predictor_object = utils.get_f0_predictor(f0_predictor,hop_length=self.hop_size,sampling_rate=self.target_sample,device=self.dev,threshold=cr_threshold) - - f0, uv = f0_predictor_object.compute_f0_uv(wav) - if f0_filter and sum(f0) == 0: - raise F0FilterException("No voice detected") - f0 = torch.FloatTensor(f0).to(self.dev) - uv = torch.FloatTensor(uv).to(self.dev) - - f0 = f0 * 2 ** (tran / 12) - f0 = f0.unsqueeze(0) - uv = uv.unsqueeze(0) - - wav16k = librosa.resample(wav, orig_sr=self.target_sample, target_sr=16000) - wav16k = torch.from_numpy(wav16k).to(self.dev) - c = self.hubert_model.encoder(wav16k) - c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1],self.unit_interpolate_mode) - - if cluster_infer_ratio !=0: - if self.feature_retrieval: - speaker_id = self.spk2id.get(speaker) - if speaker_id is None: - raise RuntimeError("The name you entered is not in the speaker list!") - if not speaker_id and type(speaker) is int: - if len(self.spk2id.__dict__) >= speaker: - speaker_id = speaker - feature_index = self.cluster_model[speaker_id] - feat_np = c.transpose(0,1).cpu().numpy() - if self.big_npy is None or self.now_spk_id != speaker_id: - self.big_npy = feature_index.reconstruct_n(0, feature_index.ntotal) - self.now_spk_id = speaker_id - print("starting feature retrieval...") - score, ix = feature_index.search(feat_np, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - c = cluster_infer_ratio * npy + (1 - cluster_infer_ratio) * feat_np - c = torch.FloatTensor(c).to(self.dev).transpose(0,1) - print("end feature retrieval...") - else: - cluster_c = cluster.get_cluster_center_result(self.cluster_model, c.cpu().numpy().T, speaker).T - cluster_c = torch.FloatTensor(cluster_c).to(self.dev) - c = cluster_infer_ratio * cluster_c + (1 - cluster_infer_ratio) * c - - c = c.unsqueeze(0) - return c, f0, uv - - def infer(self, speaker, tran, raw_path, - cluster_infer_ratio=0, - auto_predict_f0=False, - noice_scale=0.4, - f0_filter=False, - f0_predictor='pm', - enhancer_adaptive_key = 0, - cr_threshold = 0.05, - k_step = 100, - frame = 0, - spk_mix = False, - second_encoding = False, - loudness_envelope_adjustment = 1 - ): - wav, sr = librosa.load(raw_path, sr=self.target_sample) - if spk_mix: - c, f0, uv = self.get_unit_f0(wav, tran, 0, None, f0_filter,f0_predictor,cr_threshold=cr_threshold) - n_frames = f0.size(1) - sid = speaker[:, frame:frame+n_frames].transpose(0,1) - else: - speaker_id = self.spk2id.get(speaker) - if not speaker_id and type(speaker) is int: - if len(self.spk2id.__dict__) >= speaker: - speaker_id = speaker - if speaker_id is None: - raise RuntimeError("The name you entered is not in the speaker list!") - sid = torch.LongTensor([int(speaker_id)]).to(self.dev).unsqueeze(0) - c, f0, uv = self.get_unit_f0(wav, tran, cluster_infer_ratio, speaker, f0_filter,f0_predictor,cr_threshold=cr_threshold) - n_frames = f0.size(1) - if "half" in self.net_g_path and torch.cuda.is_available(): - c = c.half() - with torch.no_grad(): - start = time.time() - vol = None - if not self.only_diffusion: - vol = self.volume_extractor.extract(torch.FloatTensor(wav).to(self.dev)[None,:])[None,:].to(self.dev) if self.vol_embedding else None - audio,f0 = self.net_g_ms.infer(c, f0=f0, g=sid, uv=uv, predict_f0=auto_predict_f0, noice_scale=noice_scale,vol=vol) - audio = audio[0,0].data.float() - audio_mel = self.vocoder.extract(audio[None,:],self.target_sample) if self.shallow_diffusion else None - else: - audio = torch.FloatTensor(wav).to(self.dev) - audio_mel = None - if self.only_diffusion or self.shallow_diffusion: - vol = self.volume_extractor.extract(audio[None,:])[None,:,None].to(self.dev) if vol==None else vol[:,:,None] - if self.shallow_diffusion and second_encoding: - audio16k = librosa.resample(audio.detach().cpu().numpy(), orig_sr=self.target_sample, target_sr=16000) - audio16k = torch.from_numpy(audio16k).to(self.dev) - c = self.hubert_model.encoder(audio16k) - c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1],self.unit_interpolate_mode) - f0 = f0[:,:,None] - c = c.transpose(-1,-2) - audio_mel = self.diffusion_model( - c, - f0, - vol, - spk_id = sid, - spk_mix_dict = None, - gt_spec=audio_mel, - infer=True, - infer_speedup=self.diffusion_args.infer.speedup, - method=self.diffusion_args.infer.method, - k_step=k_step) - audio = self.vocoder.infer(audio_mel, f0).squeeze() - if self.nsf_hifigan_enhance: - audio, _ = self.enhancer.enhance( - audio[None,:], - self.target_sample, - f0[:,:,None], - self.hps_ms.data.hop_length, - adaptive_key = enhancer_adaptive_key) - if loudness_envelope_adjustment != 1: - audio = utils.change_rms(wav,self.target_sample,audio,self.target_sample,loudness_envelope_adjustment) - use_time = time.time() - start - print("vits use time:{}".format(use_time)) - return audio, audio.shape[-1], n_frames - - def clear_empty(self): - # clean up vram - torch.cuda.empty_cache() - - def unload_model(self): - # unload model - self.net_g_ms = self.net_g_ms.to("cpu") - del self.net_g_ms - if hasattr(self,"enhancer"): - self.enhancer.enhancer = self.enhancer.enhancer.to("cpu") - del self.enhancer.enhancer - del self.enhancer - gc.collect() - - def slice_inference(self, - raw_audio_path, - spk, - tran, - slice_db, - cluster_infer_ratio, - auto_predict_f0, - noice_scale, - pad_seconds=0.5, - clip_seconds=0, - lg_num=0, - lgr_num =0.75, - f0_predictor='pm', - enhancer_adaptive_key = 0, - cr_threshold = 0.05, - k_step = 100, - use_spk_mix = False, - second_encoding = False, - loudness_envelope_adjustment = 1 - ): - if use_spk_mix: - if len(self.spk2id) == 1: - spk = self.spk2id.keys()[0] - use_spk_mix = False - wav_path = Path(raw_audio_path).with_suffix('.wav') - chunks = slicer.cut(wav_path, db_thresh=slice_db) - audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks) - per_size = int(clip_seconds*audio_sr) - lg_size = int(lg_num*audio_sr) - lg_size_r = int(lg_size*lgr_num) - lg_size_c_l = (lg_size-lg_size_r)//2 - lg_size_c_r = lg_size-lg_size_r-lg_size_c_l - lg = np.linspace(0,1,lg_size_r) if lg_size!=0 else 0 - - if use_spk_mix: - assert len(self.spk2id) == len(spk) - audio_length = 0 - for (slice_tag, data) in audio_data: - aud_length = int(np.ceil(len(data) / audio_sr * self.target_sample)) - if slice_tag: - audio_length += aud_length // self.hop_size - continue - if per_size != 0: - datas = split_list_by_n(data, per_size,lg_size) - else: - datas = [data] - for k,dat in enumerate(datas): - pad_len = int(audio_sr * pad_seconds) - per_length = int(np.ceil(len(dat) / audio_sr * self.target_sample)) - a_length = per_length + 2 * pad_len - audio_length += a_length // self.hop_size - audio_length += len(audio_data) - spk_mix_tensor = torch.zeros(size=(len(spk), audio_length)).to(self.dev) - for i in range(len(spk)): - last_end = None - for mix in spk[i]: - if mix[3]<0. or mix[2]<0.: - raise RuntimeError("mix value must higer Than zero!") - begin = int(audio_length * mix[0]) - end = int(audio_length * mix[1]) - length = end - begin - if length<=0: - raise RuntimeError("begin Must lower Than end!") - step = (mix[3] - mix[2])/length - if last_end is not None: - if last_end != begin: - raise RuntimeError("[i]EndTime Must Equal [i+1]BeginTime!") - last_end = end - if step == 0.: - spk_mix_data = torch.zeros(length).to(self.dev) + mix[2] - else: - spk_mix_data = torch.arange(mix[2],mix[3],step).to(self.dev) - if(len(spk_mix_data)>> construct_direction("cat2dog") -""" -def construct_direction(task_name): - if task_name=="cat2dog": - emb_dir = f"assets/embeddings_sd_1.4" - embs_a = torch.load(os.path.join(emb_dir, f"cat.pt")) - embs_b = torch.load(os.path.join(emb_dir, f"dog.pt")) - return (embs_b.mean(0)-embs_a.mean(0)).unsqueeze(0) - elif task_name=="dog2cat": - emb_dir = f"assets/embeddings_sd_1.4" - embs_a = torch.load(os.path.join(emb_dir, f"dog.pt")) - embs_b = torch.load(os.path.join(emb_dir, f"cat.pt")) - return (embs_b.mean(0)-embs_a.mean(0)).unsqueeze(0) - else: - raise NotImplementedError diff --git a/spaces/yuntian-deng/ChatGPT4/app.py b/spaces/yuntian-deng/ChatGPT4/app.py deleted file mode 100644 index 51a04907e72f860c6bf535f66bf1896fad65e974..0000000000000000000000000000000000000000 --- a/spaces/yuntian-deng/ChatGPT4/app.py +++ /dev/null @@ -1,196 +0,0 @@ -import gradio as gr -import os -import sys -import json -import requests - -MODEL = "gpt-4" -API_URL = os.getenv("API_URL") -DISABLED = os.getenv("DISABLED") == 'True' -OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") -NUM_THREADS = int(os.getenv("NUM_THREADS")) - -print (NUM_THREADS) - -def exception_handler(exception_type, exception, traceback): - print("%s: %s" % (exception_type.__name__, exception)) -sys.excepthook = exception_handler -sys.tracebacklimit = 0 - -#https://github.com/gradio-app/gradio/issues/3531#issuecomment-1484029099 -def parse_codeblock(text): - lines = text.split("\n") - for i, line in enumerate(lines): - if "```" in line: - if line != "```": - lines[i] = f'
              '
              -            else:
              -                lines[i] = '
              ' - else: - if i > 0: - lines[i] = "
              " + line.replace("<", "<").replace(">", ">") - return "".join(lines) - -def predict(inputs, top_p, temperature, chat_counter, chatbot, history, request:gr.Request): - payload = { - "model": MODEL, - "messages": [{"role": "user", "content": f"{inputs}"}], - "temperature" : 1.0, - "top_p":1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0, - } - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {OPENAI_API_KEY}", - "Headers": f"{request.kwargs['headers']}" - } - - # print(f"chat_counter - {chat_counter}") - if chat_counter != 0 : - messages = [] - for i, data in enumerate(history): - if i % 2 == 0: - role = 'user' - else: - role = 'assistant' - message = {} - message["role"] = role - message["content"] = data - messages.append(message) - - message = {} - message["role"] = "user" - message["content"] = inputs - messages.append(message) - payload = { - "model": MODEL, - "messages": messages, - "temperature" : temperature, - "top_p": top_p, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0, - } - - chat_counter += 1 - - history.append(inputs) - token_counter = 0 - partial_words = "" - counter = 0 - - try: - # make a POST request to the API endpoint using the requests.post method, passing in stream=True - response = requests.post(API_URL, headers=headers, json=payload, stream=True) - response_code = f"{response}" - #if response_code.strip() != "": - # #print(f"response code - {response}") - # raise Exception(f"Sorry, hitting rate limit. Please try again later. {response}") - - for chunk in response.iter_lines(): - #Skipping first chunk - if counter == 0: - counter += 1 - continue - #counter+=1 - # check whether each line is non-empty - if chunk.decode() : - chunk = chunk.decode() - # decode each line as response data is in bytes - if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']: - partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"] - if token_counter == 0: - history.append(" " + partial_words) - else: - history[-1] = partial_words - token_counter += 1 - yield [(parse_codeblock(history[i]), parse_codeblock(history[i + 1])) for i in range(0, len(history) - 1, 2) ], history, chat_counter, response, gr.update(interactive=False), gr.update(interactive=False) # resembles {chatbot: chat, state: history} - except Exception as e: - print (f'error found: {e}') - yield [(parse_codeblock(history[i]), parse_codeblock(history[i + 1])) for i in range(0, len(history) - 1, 2) ], history, chat_counter, response, gr.update(interactive=True), gr.update(interactive=True) - print(json.dumps({"chat_counter": chat_counter, "payload": payload, "partial_words": partial_words, "token_counter": token_counter, "counter": counter})) - - -def reset_textbox(): - return gr.update(value='', interactive=False), gr.update(interactive=False) - -title = """

              GPT4 Chatbot

              """ -if DISABLED: - title = """

              This app has reached OpenAI's usage limit. We are currently requesting an increase in our quota. Please check back in a few days.

              """ -description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form: -``` -User: -Assistant: -User: -Assistant: -... -``` -In this app, you can explore the outputs of a gpt-4 LLM. -""" - -theme = gr.themes.Default(primary_hue="green") - -with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} - #chatbot {height: 520px; overflow: auto;}""", - theme=theme) as demo: - gr.HTML(title) - #gr.HTML("""

              This app provides you full access to GPT4 (4096 token limit). You don't need any OPENAI API key.

              """) - gr.HTML("""

              If this app is too busy, consider trying our GPT-3.5 app, which has a much shorter queue time. Visit it below:
              https://huggingface.co/spaces/yuntian-deng/ChatGPT

              """) - - #gr.HTML('''
              Duplicate SpaceDuplicate the Space and run securely with your OpenAI API Key
              ''') - with gr.Column(elem_id = "col_container", visible=False) as main_block: - #GPT4 API Key is provided by Huggingface - #openai_api_key = gr.Textbox(type='password', label="Enter only your GPT4 OpenAI API key here") - chatbot = gr.Chatbot(elem_id='chatbot') #c - inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") #t - state = gr.State([]) #s - with gr.Row(): - with gr.Column(scale=7): - b1 = gr.Button(visible=not DISABLED).style(full_width=True) - with gr.Column(scale=3): - server_status_code = gr.Textbox(label="Status code from OpenAI server", ) - - #inputs, top_p, temperature, top_k, repetition_penalty - with gr.Accordion("Parameters", open=False): - top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",) - #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",) - #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", ) - chat_counter = gr.Number(value=0, visible=False, precision=0) - - with gr.Column(elem_id = "user_consent_container") as user_consent_block: - # Get user consent - accept_checkbox = gr.Checkbox(visible=False) - js = "(x) => confirm('By clicking \"OK\", I agree that my data may be published or shared.')" - with gr.Accordion("User Consent for Data Collection, Use, and Sharing", open=True): - gr.HTML(""" -
              -

              By using our app, which is powered by OpenAI's API, you acknowledge and agree to the following terms regarding the data you provide:

              -
                -
              1. Collection: We may collect information, including the inputs you type into our app, the outputs generated by OpenAI's API, and certain technical details about your device and connection (such as browser type, operating system, and IP address) provided by your device's request headers.
              2. -
              3. Use: We may use the collected data for research purposes, to improve our services, and to develop new products or services, including commercial applications, and for security purposes, such as protecting against unauthorized access and attacks.
              4. -
              5. Sharing and Publication: Your data, including the technical details collected from your device's request headers, may be published, shared with third parties, or used for analysis and reporting purposes.
              6. -
              7. Data Retention: We may retain your data, including the technical details collected from your device's request headers, for as long as necessary.
              8. -
              -

              By continuing to use our app, you provide your explicit consent to the collection, use, and potential sharing of your data as described above. If you do not agree with our data collection, use, and sharing practices, please do not use our app.

              -
              - """) - accept_button = gr.Button("I Agree") - - def enable_inputs(): - return user_consent_block.update(visible=False), main_block.update(visible=True) - - accept_button.click(None, None, accept_checkbox, _js=js, queue=False) - accept_checkbox.change(fn=enable_inputs, inputs=[], outputs=[user_consent_block, main_block], queue=False) - - inputs.submit(reset_textbox, [], [inputs, b1], queue=False) - inputs.submit(predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code, inputs, b1],) #openai_api_key - b1.click(reset_textbox, [], [inputs, b1], queue=False) - b1.click(predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code, inputs, b1],) #openai_api_key - - demo.queue(max_size=20, concurrency_count=NUM_THREADS, api_open=False).launch(share=False) \ No newline at end of file diff --git a/spaces/zhenwusw/JoJoGAN/e4e/utils/alignment.py b/spaces/zhenwusw/JoJoGAN/e4e/utils/alignment.py deleted file mode 100644 index a02798f0f7c9fdcc319f7884a491b9e6580cc8aa..0000000000000000000000000000000000000000 --- a/spaces/zhenwusw/JoJoGAN/e4e/utils/alignment.py +++ /dev/null @@ -1,115 +0,0 @@ -import numpy as np -import PIL -import PIL.Image -import scipy -import scipy.ndimage -import dlib - - -def get_landmark(filepath, predictor): - """get landmark with dlib - :return: np.array shape=(68, 2) - """ - detector = dlib.get_frontal_face_detector() - - img = dlib.load_rgb_image(filepath) - dets = detector(img, 1) - - for k, d in enumerate(dets): - shape = predictor(img, d) - - t = list(shape.parts()) - a = [] - for tt in t: - a.append([tt.x, tt.y]) - lm = np.array(a) - return lm - - -def align_face(filepath, predictor): - """ - :param filepath: str - :return: PIL Image - """ - - lm = get_landmark(filepath, predictor) - - lm_chin = lm[0: 17] # left-right - lm_eyebrow_left = lm[17: 22] # left-right - lm_eyebrow_right = lm[22: 27] # left-right - lm_nose = lm[27: 31] # top-down - lm_nostrils = lm[31: 36] # top-down - lm_eye_left = lm[36: 42] # left-clockwise - lm_eye_right = lm[42: 48] # left-clockwise - lm_mouth_outer = lm[48: 60] # left-clockwise - lm_mouth_inner = lm[60: 68] # left-clockwise - - # Calculate auxiliary vectors. - eye_left = np.mean(lm_eye_left, axis=0) - eye_right = np.mean(lm_eye_right, axis=0) - eye_avg = (eye_left + eye_right) * 0.5 - eye_to_eye = eye_right - eye_left - mouth_left = lm_mouth_outer[0] - mouth_right = lm_mouth_outer[6] - mouth_avg = (mouth_left + mouth_right) * 0.5 - eye_to_mouth = mouth_avg - eye_avg - - # Choose oriented crop rectangle. - x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] - x /= np.hypot(*x) - x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) - y = np.flipud(x) * [-1, 1] - c = eye_avg + eye_to_mouth * 0.1 - quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) - qsize = np.hypot(*x) * 2 - - # read image - img = PIL.Image.open(filepath) - - output_size = 256 - transform_size = 256 - enable_padding = True - - # Shrink. - shrink = int(np.floor(qsize / output_size * 0.5)) - if shrink > 1: - rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink))) - img = img.resize(rsize, PIL.Image.ANTIALIAS) - quad /= shrink - qsize /= shrink - - # Crop. - border = max(int(np.rint(qsize * 0.1)), 3) - crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), - int(np.ceil(max(quad[:, 1])))) - crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), - min(crop[3] + border, img.size[1])) - if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]: - img = img.crop(crop) - quad -= crop[0:2] - - # Pad. - pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), - int(np.ceil(max(quad[:, 1])))) - pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), - max(pad[3] - img.size[1] + border, 0)) - if enable_padding and max(pad) > border - 4: - pad = np.maximum(pad, int(np.rint(qsize * 0.3))) - img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect') - h, w, _ = img.shape - y, x, _ = np.ogrid[:h, :w, :1] - mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]), - 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3])) - blur = qsize * 0.02 - img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) - img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0) - img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB') - quad += pad[:2] - - # Transform. - img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR) - if output_size < transform_size: - img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS) - - # Return aligned image. - return img diff --git a/spaces/zibb/frontalface-cascade/app.py b/spaces/zibb/frontalface-cascade/app.py deleted file mode 100644 index 5ea08277644d82f12d65f53b7976ec993d37640b..0000000000000000000000000000000000000000 --- a/spaces/zibb/frontalface-cascade/app.py +++ /dev/null @@ -1,42 +0,0 @@ -import cv2 -import gradio as gr - -description = "사진을 첨부하면 사진 속 얼굴의 위치를 표시합니다." -title = "Face Detection" - -def predict(img): - # cascade xml 파일 선택 - face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') - - # img = cv2.imread('face.jpg') - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - - faces = face_cascade.detectMultiScale(gray, 1.1, 4) - - # Draw the rectangle around each face - if len(faces): - for (x, y, w, h) in faces: - face_rectangle = cv2.rectangle( - img, (x, y), (x + w, y + h), (255, 0, 0), 2) - - Face_text = cv2.putText(img=face_rectangle, - text="Face", - org=(x, y + h + 30), - fontFace=cv2.FONT_HERSHEY_SIMPLEX, - fontScale=1, color=(0, 0, 255), - thickness=2, lineType=cv2.LINE_4) - - # cv2.imshow("image", img) - # cv2.waitKey(0) - - return img - -iface = gr.Interface( - fn=predict, - inputs='image', - outputs='image', - description=description, - title=title, -) - -iface.launch(share=False) \ No newline at end of file diff --git a/spaces/ziguo/Real-ESRGAN/FAQ.md b/spaces/ziguo/Real-ESRGAN/FAQ.md deleted file mode 100644 index caa8c08cfe4302eb8812c823569e8a0be30fa49c..0000000000000000000000000000000000000000 --- a/spaces/ziguo/Real-ESRGAN/FAQ.md +++ /dev/null @@ -1,9 +0,0 @@ -# FAQ - -1. **What is the difference of `--netscale` and `outscale`?** - -A: TODO. - -1. **How to select models?** - -A: TODO. diff --git a/spaces/zomehwh/sovits-goldship/onnxexport/model_onnx.py b/spaces/zomehwh/sovits-goldship/onnxexport/model_onnx.py deleted file mode 100644 index e28bae95ec1e53aa05d06fc784ff86d55f228d60..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/sovits-goldship/onnxexport/model_onnx.py +++ /dev/null @@ -1,335 +0,0 @@ -import torch -from torch import nn -from torch.nn import functional as F - -import modules.attentions as attentions -import modules.commons as commons -import modules.modules as modules - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -import utils -from modules.commons import init_weights, get_padding -from vdecoder.hifigan.models import Generator -from utils import f0_to_coarse - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, - gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class Encoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - # print(x.shape,x_lengths.shape) - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - out_channels, - hidden_channels, - kernel_size, - n_layers, - gin_channels=0, - filter_channels=None, - n_heads=None, - p_dropout=None): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.gin_channels = gin_channels - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - self.f0_emb = nn.Embedding(256, hidden_channels) - - self.enc_ = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - - def forward(self, x, x_mask, f0=None, z=None): - x = x + self.f0_emb(f0).transpose(1, 2) - x = self.enc_(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + z * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class F0Decoder(nn.Module): - def __init__(self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - spk_channels=0): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.spk_channels = spk_channels - - self.prenet = nn.Conv1d(hidden_channels, hidden_channels, 3, padding=1) - self.decoder = attentions.FFT( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.f0_prenet = nn.Conv1d(1, hidden_channels, 3, padding=1) - self.cond = nn.Conv1d(spk_channels, hidden_channels, 1) - - def forward(self, x, norm_f0, x_mask, spk_emb=None): - x = torch.detach(x) - if spk_emb is not None: - x = x + self.cond(spk_emb) - x += self.f0_prenet(norm_f0) - x = self.prenet(x) * x_mask - x = self.decoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - ssl_dim, - n_speakers, - sampling_rate=44100, - **kwargs): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - self.ssl_dim = ssl_dim - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - self.pre = nn.Conv1d(ssl_dim, hidden_channels, kernel_size=5, padding=2) - - self.enc_p = TextEncoder( - inter_channels, - hidden_channels, - filter_channels=filter_channels, - n_heads=n_heads, - n_layers=n_layers, - kernel_size=kernel_size, - p_dropout=p_dropout - ) - hps = { - "sampling_rate": sampling_rate, - "inter_channels": inter_channels, - "resblock": resblock, - "resblock_kernel_sizes": resblock_kernel_sizes, - "resblock_dilation_sizes": resblock_dilation_sizes, - "upsample_rates": upsample_rates, - "upsample_initial_channel": upsample_initial_channel, - "upsample_kernel_sizes": upsample_kernel_sizes, - "gin_channels": gin_channels, - } - self.dec = Generator(h=hps) - self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - self.f0_decoder = F0Decoder( - 1, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - spk_channels=gin_channels - ) - self.emb_uv = nn.Embedding(2, hidden_channels) - self.predict_f0 = False - - def forward(self, c, f0, mel2ph, uv, noise=None, g=None): - - decoder_inp = F.pad(c, [0, 0, 1, 0]) - mel2ph_ = mel2ph.unsqueeze(2).repeat([1, 1, c.shape[-1]]) - c = torch.gather(decoder_inp, 1, mel2ph_).transpose(1, 2) # [B, T, H] - - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - g = g.unsqueeze(0) - g = self.emb_g(g).transpose(1, 2) - x_mask = torch.unsqueeze(commons.sequence_mask(c_lengths, c.size(2)), 1).to(c.dtype) - x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1, 2) - - if self.predict_f0: - lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500 - norm_lf0 = utils.normalize_f0(lf0, x_mask, uv, random_scale=False) - pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g) - f0 = (700 * (torch.pow(10, pred_lf0 * 500 / 2595) - 1)).squeeze(1) - - z_p, m_p, logs_p, c_mask = self.enc_p(x, x_mask, f0=f0_to_coarse(f0), z=noise) - z = self.flow(z_p, c_mask, g=g, reverse=True) - o = self.dec(z * c_mask, g=g, f0=f0) - return o diff --git a/spaces/zou-code/gorilla-llm-gorilla-7b-hf-delta-v0/app.py b/spaces/zou-code/gorilla-llm-gorilla-7b-hf-delta-v0/app.py deleted file mode 100644 index 2434350406d4b3a4b4e73760f2e4580a86d0e924..0000000000000000000000000000000000000000 --- a/spaces/zou-code/gorilla-llm-gorilla-7b-hf-delta-v0/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/gorilla-llm/gorilla-7b-hf-delta-v0").launch() \ No newline at end of file diff --git a/spaces/zxy666/bingo-chatai666/src/components/ui/icons.tsx b/spaces/zxy666/bingo-chatai666/src/components/ui/icons.tsx deleted file mode 100644 index 742b489b50437c5b64c86082f2ebc712eeb6a2b0..0000000000000000000000000000000000000000 --- a/spaces/zxy666/bingo-chatai666/src/components/ui/icons.tsx +++ /dev/null @@ -1,504 +0,0 @@ -'use client' - -import * as React from 'react' - -import { cn } from '@/lib/utils' - -function IconNextChat({ - className, - inverted, - ...props -}: React.ComponentProps<'svg'> & { inverted?: boolean }) { - const id = React.useId() - - return ( - - - - - - - - - - - - - - - - - - - - - - ) -} - -function IconOpenAI({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - OpenAI icon - - - ) -} - -function IconGitHub({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - GitHub - - - ) -} - -function IconSeparator({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - ) -} - -function IconArrowDown({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconArrowRight({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconUser({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconPlus({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconArrowElbow({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconSpinner({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconMessage({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconTrash({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconMore({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconRefresh({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconStop({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconSidebar({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconMoon({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconSun({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconCopy({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconCheck({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconDownload({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconClose({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconEdit({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconShare({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconUsers({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconExternalLink({ - className, - ...props -}: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconChevronUpDown({ - className, - ...props -}: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -export { - IconEdit, - IconNextChat, - IconOpenAI, - IconGitHub, - IconSeparator, - IconArrowDown, - IconArrowRight, - IconUser, - IconPlus, - IconArrowElbow, - IconSpinner, - IconMessage, - IconTrash, - IconMore, - IconRefresh, - IconStop, - IconSidebar, - IconMoon, - IconSun, - IconCopy, - IconCheck, - IconDownload, - IconClose, - IconShare, - IconUsers, - IconExternalLink, - IconChevronUpDown -} diff --git a/spaces/zzz666/ChuanhuChatGPT/modules/presets.py b/spaces/zzz666/ChuanhuChatGPT/modules/presets.py deleted file mode 100644 index fcfb53e73e9c5217d312e1a53a7b82c3dbbc82d5..0000000000000000000000000000000000000000 --- a/spaces/zzz666/ChuanhuChatGPT/modules/presets.py +++ /dev/null @@ -1,165 +0,0 @@ -# -*- coding:utf-8 -*- -import gradio as gr - -# ChatGPT 设置 -initial_prompt = "You are a helpful assistant." -API_URL = "https://api.openai.com/v1/chat/completions" -BALANCE_API_URL="https://api.openai.com/dashboard/billing/credit_grants" -HISTORY_DIR = "history" -TEMPLATES_DIR = "templates" - -# 错误信息 -standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀 -error_retrieve_prompt = "请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误 -connection_timeout_prompt = "连接超时,无法获取对话。" # 连接超时 -read_timeout_prompt = "读取超时,无法获取对话。" # 读取超时 -proxy_error_prompt = "代理错误,无法获取对话。" # 代理错误 -ssl_error_prompt = "SSL错误,无法获取对话。" # SSL 错误 -no_apikey_msg = "API key长度不是51位,请检查是否输入正确。" # API key 长度不足 51 位 -no_input_msg = "请输入对话内容。" # 未输入对话内容 - -max_token_streaming = 3500 # 流式对话时的最大 token 数 -timeout_streaming = 10 # 流式对话时的超时时间 -max_token_all = 3500 # 非流式对话时的最大 token 数 -timeout_all = 200 # 非流式对话时的超时时间 -enable_streaming_option = True # 是否启用选择选择是否实时显示回答的勾选框 -HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True -CONCURRENT_COUNT = 100 # 允许同时使用的用户数量 - -SIM_K = 5 -INDEX_QUERY_TEMPRATURE = 1.0 - -title = """

              川虎ChatGPT 🚀

              """ -description = """\ -
              - -由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发 - -访问川虎ChatGPT的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本 - -此App使用 `gpt-3.5-turbo` 大语言模型 -
              -""" - -summarize_prompt = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt - -MODELS = [ - "gpt-3.5-turbo", - "gpt-3.5-turbo-0301", - "gpt-4", - "gpt-4-0314", - "gpt-4-32k", - "gpt-4-32k-0314", -] # 可选的模型 - -REPLY_LANGUAGES = [ - "中文", - "English", - "日本語", - "Español", - "Français", - "Deutsch", - "跟随问题语言(不稳定)" -] - - -WEBSEARCH_PTOMPT_TEMPLATE = """\ -Web search results: - -{web_results} -Current date: {current_date} - -Instructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. -Query: {query} -Reply in {reply_language} -""" - -PROMPT_TEMPLATE = """\ -Context information is below. ---------------------- -{context_str} ---------------------- -Current date: {current_date}. -Using the provided context information, write a comprehensive reply to the given query. -Make sure to cite results using [number] notation after the reference. -If the provided context information refer to multiple subjects with the same name, write separate answers for each subject. -Use prior knowledge only if the given context didn't provide enough information. -Answer the question: {query_str} -Reply in {reply_language} -""" - -REFINE_TEMPLATE = """\ -The original question is as follows: {query_str} -We have provided an existing answer: {existing_answer} -We have the opportunity to refine the existing answer -(only if needed) with some more context below. ------------- -{context_msg} ------------- -Given the new context, refine the original answer to better -Reply in {reply_language} -If the context isn't useful, return the original answer. -""" - -ALREADY_CONVERTED_MARK = "" - -small_and_beautiful_theme = gr.themes.Soft( - primary_hue=gr.themes.Color( - c50="#02C160", - c100="rgba(2, 193, 96, 0.2)", - c200="#02C160", - c300="rgba(2, 193, 96, 0.32)", - c400="rgba(2, 193, 96, 0.32)", - c500="rgba(2, 193, 96, 1.0)", - c600="rgba(2, 193, 96, 1.0)", - c700="rgba(2, 193, 96, 0.32)", - c800="rgba(2, 193, 96, 0.32)", - c900="#02C160", - c950="#02C160", - ), - secondary_hue=gr.themes.Color( - c50="#576b95", - c100="#576b95", - c200="#576b95", - c300="#576b95", - c400="#576b95", - c500="#576b95", - c600="#576b95", - c700="#576b95", - c800="#576b95", - c900="#576b95", - c950="#576b95", - ), - neutral_hue=gr.themes.Color( - name="gray", - c50="#f9fafb", - c100="#f3f4f6", - c200="#e5e7eb", - c300="#d1d5db", - c400="#B2B2B2", - c500="#808080", - c600="#636363", - c700="#515151", - c800="#393939", - c900="#272727", - c950="#171717", - ), - radius_size=gr.themes.sizes.radius_sm, - ).set( - button_primary_background_fill="#06AE56", - button_primary_background_fill_dark="#06AE56", - button_primary_background_fill_hover="#07C863", - button_primary_border_color="#06AE56", - button_primary_border_color_dark="#06AE56", - button_primary_text_color="#FFFFFF", - button_primary_text_color_dark="#FFFFFF", - button_secondary_background_fill="#F2F2F2", - button_secondary_background_fill_dark="#2B2B2B", - button_secondary_text_color="#393939", - button_secondary_text_color_dark="#FFFFFF", - # background_fill_primary="#F7F7F7", - # background_fill_primary_dark="#1F1F1F", - block_title_text_color="*primary_500", - block_title_background_fill="*primary_100", - input_background_fill="#F6F6F6", - )