·请不要生成会对个人以及组织造成侵害的内容\n"
- "\n\n"
- "[Open In Colab]"
- "(https://colab.research.google.com/drive/10QOk9NPgoKZUXkIhhuVaZ7SYra1MPMKH?usp=share_link)"
- " without queue and length limitation.(无需等待队列,并且没有长度限制)\n\n"
- "[Finetune your own model](https://github.com/SayaSS/vits-finetuning)"
- )
-
- with gr.Tabs():
- with gr.TabItem("EN"):
- for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models:
- with gr.TabItem(name_en):
- with gr.Row():
- gr.Markdown(
- ''
- f'
{title}'
- f'

' if cover else ""
- '
'
- )
- with gr.Row():
- with gr.Column():
- input_text = gr.Textbox(label="Text (100 words limitation)" if limitation else "Text", lines=5, value=example, elem_id=f"input-text-en-{name_en.replace(' ','')}")
- lang = gr.Dropdown(label="Language", choices=["Chinese", "Japanese", "Mix(wrap the Chinese text with [ZH][ZH], wrap the Japanese text with [JA][JA])"],
- type="index", value=language)
- temp_lang = gr.Variable(value=language)
- with gr.Accordion(label="Advanced Options", open=False):
- temp_text_var = gr.Variable()
- symbol_input = gr.Checkbox(value=False, label="Symbol input")
- symbol_list = gr.Dataset(label="Symbol list", components=[input_text],
- samples=[[x] for x in hps_ms.symbols])
- symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False)
- btn = gr.Button(value="Generate", variant="primary")
- with gr.Row():
- ns = gr.Slider(label="noise_scale", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
- nsw = gr.Slider(label="noise_scale_w", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
- ls = gr.Slider(label="length_scale", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True)
- with gr.Column():
- o1 = gr.Textbox(label="Output Message")
- o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio-en-{name_en.replace(' ','')}")
- download = gr.Button("Download Audio")
- btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2])
- download.click(None, [], [], _js=download_audio_js.format(audio_id=f"en-{name_en.replace(' ', '')}"))
- lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls, temp_lang])
- symbol_input.change(
- to_symbol_fn,
- [symbol_input, input_text, temp_text_var, temp_lang],
- [input_text, temp_text_var]
- )
- symbol_list.click(None, [symbol_list, symbol_list_json], [input_text],
- _js=f"""
- (i,symbols) => {{
- let root = document.querySelector("body > gradio-app");
- if (root.shadowRoot != null)
- root = root.shadowRoot;
- let text_input = root.querySelector("#input-text-en-{name_en.replace(' ', '')}").querySelector("textarea");
- let startPos = text_input.selectionStart;
- let endPos = text_input.selectionEnd;
- let oldTxt = text_input.value;
- let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos);
- text_input.value = result;
- let x = window.scrollX, y = window.scrollY;
- text_input.focus();
- text_input.selectionStart = startPos + symbols[i].length;
- text_input.selectionEnd = startPos + symbols[i].length;
- text_input.blur();
- window.scrollTo(x, y);
- return text_input.value;
- }}""")
- with gr.TabItem("中文"):
- for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models:
- with gr.TabItem(name_zh):
- with gr.Row():
- gr.Markdown(
- ''
- f'
{title}'
- f'

' if cover else ""
- '
'
- )
- with gr.Row():
- with gr.Column():
- input_text = gr.Textbox(label="文本 (100字上限)" if limitation else "文本", lines=5, value=example, elem_id=f"input-text-zh-{name_zh}")
- lang = gr.Dropdown(label="语言", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"],
- type="index", value="中文"if language == "Chinese" else "日语")
- temp_lang = gr.Variable(value=language)
- with gr.Accordion(label="高级选项", open=False):
- temp_text_var = gr.Variable()
- symbol_input = gr.Checkbox(value=False, label="符号输入")
- symbol_list = gr.Dataset(label="符号列表", components=[input_text],
- samples=[[x] for x in hps_ms.symbols])
- symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False)
- btn = gr.Button(value="生成", variant="primary")
- with gr.Row():
- ns = gr.Slider(label="控制感情变化程度", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
- nsw = gr.Slider(label="控制音素发音长度", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
- ls = gr.Slider(label="控制整体语速", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True)
- with gr.Column():
- o1 = gr.Textbox(label="输出信息")
- o2 = gr.Audio(label="输出音频", elem_id=f"tts-audio-zh-{name_zh}")
- download = gr.Button("下载音频")
- btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2])
- download.click(None, [], [], _js=download_audio_js.format(audio_id=f"zh-{name_zh}"))
- lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
- symbol_input.change(
- to_symbol_fn,
- [symbol_input, input_text, temp_text_var, temp_lang],
- [input_text, temp_text_var]
- )
- symbol_list.click(None, [symbol_list, symbol_list_json], [input_text],
- _js=f"""
- (i,symbols) => {{
- let root = document.querySelector("body > gradio-app");
- if (root.shadowRoot != null)
- root = root.shadowRoot;
- let text_input = root.querySelector("#input-text-zh-{name_zh}").querySelector("textarea");
- let startPos = text_input.selectionStart;
- let endPos = text_input.selectionEnd;
- let oldTxt = text_input.value;
- let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos);
- text_input.value = result;
- let x = window.scrollX, y = window.scrollY;
- text_input.focus();
- text_input.selectionStart = startPos + symbols[i].length;
- text_input.selectionEnd = startPos + symbols[i].length;
- text_input.blur();
- window.scrollTo(x, y);
- return text_input.value;
- }}""")
- app.queue(concurrency_count=1).launch(show_api=False, share=args.share)
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Code Visual To Flowchart 41 Crack A Must-Have for Software Engineers and Programmers.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Code Visual To Flowchart 41 Crack A Must-Have for Software Engineers and Programmers.md
deleted file mode 100644
index 34853ed3b25d07f6dc3587437d3f13f4a6d57d52..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Code Visual To Flowchart 41 Crack A Must-Have for Software Engineers and Programmers.md
+++ /dev/null
@@ -1,145 +0,0 @@
-
-Code Visual To Flowchart 41 Crack: How to Generate Flowcharts from Code Easily
-Are you a programmer who wants to document your source code or reverse engineer a program? Do you want to create professional-looking flowcharts from your code without spending hours on drawing and editing? If yes, then you need Code Visual To Flowchart 41 Crack, a powerful tool that can automatically generate flowcharts from code in various programming languages. In this article, you will learn what Code Visual To Flowchart is, why you should use it, how to download and install it, how to use it, and what benefits you can get from it. Let's get started!
-Introduction
-What is Code Visual To Flowchart?
-Code Visual To Flowchart is an automatic flow chart generator software that can reverse engineer a program, create programming flowcharts from code, and document source code. It can generate Bmp, Visio, Word, Excel, PowerPoint, and HTML flowcharts documents from code. It supports the following programming languages: C, C++, VC++ (Visual C++ .NET), VB (Visual Basic), VBA, Qbasic (quickbasic), VBScript (VBS), ASP, Visual C# (C sharp), Visual Basic .NET (VB.NET), Visual J# .NET, VC++.NET, ASP.NET, Java, JSP, JavaScript (JScript), Delphi (Object Pascal), PowerBuilder (PowerScript), PHP, Visual FoxPro, PL/SQL, T-SQL (Transact-sql) and Perl.
-Code Visual To Flowchart 41 Crack
DOWNLOAD ★ https://byltly.com/2uKwwm
-Why use Code Visual To Flowchart?
-Code Visual To Flowchart can help you in many ways. Here are some of the reasons why you should use it:
-
-- It can help you understand complex code logic and structure by visualizing it in a clear and intuitive way.
-- It can help you document your code for yourself or others by generating flowcharts that show the logic flow and algorithm of your program.
-- It can help you debug your code by showing you where the errors or bugs are in your code.
-- It can help you learn new programming languages by showing you how the syntax and structure of different languages work.
-- It can help you improve your coding skills by showing you how to write better and more efficient code.
-
-How to download and install Code Visual To Flowchart 41 Crack?
-To download and install Code Visual To Flowchart 41 Crack, you need to follow these steps:
-Code Visual To Flowchart 41 Crack download
-Code Visual To Flowchart 41 Crack free
-Code Visual To Flowchart 41 Crack full version
-Code Visual To Flowchart 41 Crack serial key
-Code Visual To Flowchart 41 Crack license key
-Code Visual To Flowchart 41 Crack activation code
-Code Visual To Flowchart 41 Crack patch
-Code Visual To Flowchart 41 Crack keygen
-Code Visual To Flowchart 41 Crack torrent
-Code Visual To Flowchart 41 Crack online
-Code Visual To Flowchart 41 Crack generator
-Code Visual To Flowchart 41 Crack software
-Code Visual To Flowchart 41 Crack tool
-Code Visual To Flowchart 41 Crack converter
-Code Visual To Flowchart 41 Crack editor
-Code Visual To Flowchart 41 Crack viewer
-Code Visual To Flowchart 41 Crack maker
-Code Visual To Flowchart 41 Crack builder
-Code Visual To Flowchart 41 Crack creator
-Code Visual To Flowchart 41 Crack diagram
-Code Visual To Flowchart 41 Crack chart
-Code Visual To Flowchart 41 Crack graph
-Code Visual To Flowchart 41 Crack design
-Code Visual To Flowchart 41 Crack layout
-Code Visual To Flowchart 41 Crack format
-Code Visual To Flowchart 41 Crack style
-Code Visual To Flowchart 41 Crack template
-Code Visual To Flowchart 41 Crack example
-Code Visual To Flowchart 41 Crack sample
-Code Visual To Flowchart 41 Crack tutorial
-Code Visual To Flowchart 41 Crack guide
-Code Visual To Flowchart 41 Crack manual
-Code Visual To Flowchart 41 Crack instruction
-Code Visual To Flowchart 41 Crack review
-Code Visual To Flowchart 41 Crack rating
-Code Visual To Flowchart 41 Crack feedback
-Code Visual To Flowchart 41 Crack testimonial
-Code Visual To Flowchart 41 Crack comparison
-Code Visual To Flowchart 41 Crack alternative
-Code Visual To Flowchart 41 Crack solution
-Code Visual To Flowchart 41 Crack method
-Code Visual To Flowchart 41 Crack technique
-Code Visual To Flowchart 41 Crack approach
-Code Visual To Flowchart 41 Crack strategy
-Code Visual To Flowchart 41 Crack tip
-Code Visual To Flowchart 41 Crack trick
-Code Visual To Flowchart 41 Crack hack
-Code Visual To Flowchart 41 Crack cheat
-Code Visual To Flowchart 41 Crack mod
-Code Visual To Flowchart 41 Crack update
-
-- Go to this link and click on the green Download button.
-- Save the cvf.exe file on your computer and run it.
-- Follow the instructions on the screen to complete the installation process.
-- Copy the crack file from the downloaded folder and paste it into the installation directory of Code Visual To Flowchart.
-- Run Code Visual To Flowchart as administrator and enjoy!
-
-How to use Code Visual To Flowchart 41 Crack?
-Step 1: Open your code file in Code Visual To Flowchart
-To open your code file in Code Visual To Flowchart, you need to do the following:
-
-- Launch Code Visual To Flowchart from your desktop or start menu.
-- Click on the File menu and select Open Source File.
-- Browse to the location of your code file and select it.
-- Click on Open.
-
-You will see your code displayed in the left pane of the main window. You can also drag and drop your code file into the left pane.
-Step 2: Choose the output format and options
-To choose the output format and options for your flowchart, you need to do the following:
-
-- Click on the Output menu and select Output Format.
-- Select the format that you want for your flowchart. You can choose from Bmp, Visio, Word, Excel, PowerPoint, or HTML.
-- Click on OK.
-- Click on the Output menu again and select Output Options.
-- Select the options that you want for your flowchart. You can choose from different styles, colors, fonts, sizes, shapes, etc.
-- Click on OK.
-
- Step 3: Generate the flowchart and save it
- To generate the flowchart and save it, you need to do the following:
-
- - Click on the Chart menu and select Generate Chart.
- - You will see your flowchart displayed in the right pane of the main window. You can zoom in or out by using the mouse wheel or the toolbar buttons. You can also drag or resize the chart by using the mouse cursor.
- - To save your flowchart as a file, click on the File menu and select Save Chart As.
- - Browse to the location where you want to save your file and enter a name for it.
- - Select the file type that matches your output format. For example, if you chose HTML as your output format, select HTML files as your file type.
- - Click on Save.
-
- You have successfully generated a flowchart from your code using Code Visual To Flowchart 41 Crack!
- Benefits of using Code Visual To Flowchart 41 Crack
- Save time and effort
- By using Code Visual To Flowchart 41 Crack, you can save a lot of time and effort that you would otherwise spend on drawing and editing flowcharts manually. You don't need to worry about aligning or connecting shapes or symbols. You don't need to worry about formatting or styling your chart. You don't need to worry about updating or modifying your chart when your code changes. All you need to do is open your code file in Code Visual To Flowchart 41 Crack and let it do all the work for you!
- Improve code readability and understanding
- By using Code Visual To Flowchart 41 Crack, you can improve your code readability and understanding by visualizing it in a clear and intuitive way. You can see how your code flows from one statement to another. You can see how your code branches into different paths based on conditions or loops. You can see how your code calls different functions or subroutines. You can see how your code handles errors or exceptions. You can see how your code interacts with external resources or inputs/outputs. You can see all these details at a glance by looking at your flowchart!
- Document and debug your code easily
- By using Code Visual To Flowchart 41 Crack, you can document and debug your code easily by generating flowcharts that show the logic flow and algorithm of your program. You can use these flowcharts as documentation for yourself or others who need to understand or maintain your code. You can also use these flowcharts as debugging tools for finding errors or bugs in your code. You can compare your flowcharts with your expected results or specifications. You can trace where your code goes wrong or fails by following the arrows in your flowcharts. You can fix or improve your code accordingly by referring back to your flowcharts!
- Conclusion
- Summary of the main points
- ```html and install it, how to use it, and what benefits you can get from it. You learned that Code Visual To Flowchart is an automatic flow chart generator software that can reverse engineer a program, create programming flowcharts from code, and document source code. You learned that Code Visual To Flowchart can help you save time and effort, improve code readability and understanding, and document and debug your code easily. You learned how to download and install Code Visual To Flowchart 41 Crack, how to open your code file in Code Visual To Flowchart, how to choose the output format and options for your flowchart, how to generate the flowchart and save it, and how to use the flowchart for various purposes.
- Call to action
- If you are a programmer who wants to document your source code or reverse engineer a program, you should definitely try Code Visual To Flowchart 41 Crack. It is a powerful tool that can automatically generate flowcharts from code in various programming languages. It can help you save time and effort, improve code readability and understanding, and document and debug your code easily. You can download Code Visual To Flowchart 41 Crack from this link and follow the steps in this article to use it. Don't miss this opportunity to create professional-looking flowcharts from your code without spending hours on drawing and editing. Download Code Visual To Flowchart 41 Crack today and see the difference for yourself!
- FAQs
- What is a flowchart?
- A flowchart is a graphical representation of the steps or logic of a process or program. It uses different shapes or symbols to represent different types of actions or decisions, and arrows to show the direction or sequence of the flow.
- What are the benefits of using flowcharts?
- Flowcharts can help you in many ways, such as:
-
-- They can help you visualize complex processes or programs in a clear and intuitive way.
-- They can help you communicate your ideas or solutions to others effectively.
-- They can help you identify problems or errors in your processes or programs quickly.
-- They can help you optimize or improve your processes or programs efficiently.
-
- What are the features of Code Visual To Flowchart?
- Code Visual To Flowchart has many features that make it a powerful tool for generating flowcharts from code, such as:
-
-- It can support multiple programming languages, such as C, C++, Java, PHP, etc.
-- It can generate different types of flowcharts, such as Bmp, Visio, Word, Excel, PowerPoint, or HTML.
-- It can customize the style, color, font, size, shape, etc. of your flowcharts.
-- It can sync your code and flowchart automatically when you edit either of them.
-- It can export your flowcharts as files or images for further use.
-
- How to get Code Visual To Flowchart 41 Crack?
- To get Code Visual To Flowchart 41 Crack, you need to download the cvf.exe file from this link, install it on your computer, copy the crack file from the downloaded folder and paste it into the installation directory of Code Visual To Flowchart, run Code Visual To Flowchart as administrator and enjoy!
- Is Code Visual To Flowchart 41 Crack safe to use?
- Code Visual To Flowchart 41 Crack is safe to use as long as you download it from a reliable source and scan it with an antivirus program before running it. However, we do not recommend using cracked software as it may violate the terms and conditions of the original software developer and may cause legal issues. We suggest that you purchase the official version of Code Visual To Flowchart from this link if you want to support the developer and enjoy more features and updates.
- ``` 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/From Up on Poppy Hill English Dub 1080p The Best Way to Enjoy the Studio Ghibli Classic.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/From Up on Poppy Hill English Dub 1080p The Best Way to Enjoy the Studio Ghibli Classic.md
deleted file mode 100644
index 21c65ea266fc37a5a3c0b94f9bbf19b85d420fdb..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/From Up on Poppy Hill English Dub 1080p The Best Way to Enjoy the Studio Ghibli Classic.md
+++ /dev/null
@@ -1,12 +0,0 @@
-
-From Up on Poppy Hill English Dub 1080p: A Review of the Charming Anime Film by Studio Ghibli
- If you are looking for a heartwarming, wholesome, and nostalgic anime film to watch, you might want to check out From Up on Poppy Hill (2011), a historical drama by Studio Ghibli, the acclaimed animation studio behind classics like Spirited Away (2001), Howl's Moving Castle (2004), and My Neighbor Totoro (1988). From Up on Poppy Hill is set in Yokohama in 1963, a year before Japan hosts the Olympics for the first time after World War II. It tells the story of Umi, a high school girl who runs a boarding house by the sea while her mother is abroad, and Shun, a boy who leads a student club that fights to save their old clubhouse from demolition. As they work together to preserve their school's history and culture, they also develop a friendship that blossoms into romance. However, a buried secret from their past threatens to tear them apart. In this review, I will explain why From Up on Poppy Hill is a charming anime film that deserves your attention. I will discuss its historical and cultural context, its characters and relationships, its artistic and technical aspects, and its themes and messages. I will also give you my personal opinion on the film's strengths and weaknesses, as well as a recommendation for who should watch it and where to find it online. The Historical and Cultural Context of From Up on Poppy Hill
- One of the most interesting aspects of From Up on Poppy Hill is its historical and cultural context. The film depicts Japan in a time of transition, when it was recovering from the devastation of war and preparing to host the Olympics as a symbol of its new start. The film shows both the excitement and the anxiety that people felt about the future, as well as the nostalgia for the past. The film also explores the contrast between tradition and modernity, and the importance of preserving cultural heritage. Umi represents tradition, as she follows her father's naval flag ritual every morning, cooks traditional meals for her boarders, and respects her elders. Shun represents modernity, as he rides a motorbike, publishes a school newspaper, and advocates for social change. Their clubhouse, called the Latin Quarter (or Quartier Latin in French), is a place where both traditions and innovations coexist. It is filled with books, antiques, musical instruments, scientific equipment, art works, and other treasures that reflect Japan's rich history and culture. The film also pays homage to French culture and literature, which influenced many Japanese artists in the post-war era. The title of the film is inspired by a French song called "Coquelicot-zaka kara" (From Coquelicot Hill), which Umi sings in one scene. The Latin Quarter is named after a famous district in Paris where many intellectuals gathered. The film also references works by French authors like Antoine de Saint-Exupéry (The Little Prince), Jules Verne (Twenty Thousand Leagues Under the Sea), Victor Hugo (Les Misérables), Alexandre Dumas (The Count of Monte Cristo), Honoré de Balzac (The Human Comedy), Charles Baudelaire (The Flowers of Evil), Jean-Paul Sartre (No Exit), Albert Camus (The Stranger), Jean Cocteau (Beauty and the Beast), Antoine de Saint-Exupéry (The Little Prince), Jules Verne (Twenty Thousand Leagues Under the Sea), Victor Hugo (Les Misérables), Alexandre Dumas (The Count of Monte Cristo), Honoré de Balzac (The Human Comedy), Charles Baudelaire (The Flowers of Evil), Jean-Paul Sartre (No Exit), Albert Camus (The Stranger), Jean Cocteau (Beauty and the Beast), etc. The Characters and Relationships in From Up on Poppy Hill
- Another appealing aspect of From Up on Poppy Hill is its characters and relationships. The film features a large cast of colorful and memorable characters who add humor, drama, and charm to the story. The main protagonists are Umi and Shun, two high school students who are both mature, kind, and responsible beyond their years. Umi is a hardworking and caring girl who lost her father in the Korean War and manages a boarding house while her mother is studying in America. She is also good at cooking, sewing, and gardening. Shun is an adventurous and charismatic boy who was adopted as an infant and does not know his biological parents. He is also passionate about journalism, poetry, and sailing. They meet when Shun sees Umi's flag signal from his clubhouse and writes an article about it. They soon become friends and join forces to save their clubhouse from being torn down by the school administration. As they spend more time together, they also develop romantic feelings for each other. However, they face a major obstacle when they discover that they might be siblings, as they share the same photograph of their fathers who were friends in the navy. This shocking revelation tests their relationship and forces them to confront their pasts. The supporting characters are also well-developed and contribute to the plot and atmosphere of the film. They include: - The members of the Latin Quarter club, a group of eccentric and enthusiastic students who have various hobbies and interests, such as astronomy, philosophy, biology, drama, music, etc. They organize a festival to showcase their clubhouse and appeal to the public opinion. - Umi's family and friends, who live with her in the boarding house or visit her often. They include her grandmother, who runs the house; her younger sister Sora, who attends elementary school; her older sister Riku, who works as a nurse; her aunt Kyoko, who is a photographer; her childhood friend Sachiko, who helps her with chores; and Miki, a boarder who works as a typist. - Other minor characters, such as Tokumaru-sensei, the school principal who wants to demolish the clubhouse; Yoshio Onodera, Shun's adoptive father who runs a shipyard; Akio Kazama, a journalist who helps Shun find out his true identity; Ryoko Matsuzaki, a beautiful student council president who has a crush on Shun; Shiro Mizunuma, a handsome student council vice president who has a crush on Umi; etc. The Artistic and Technical Aspects of From Up on Poppy Hill
- From Up on Poppy Hill is also remarkable for its artistic and technical aspects. The film showcases Studio Ghibli's signature animation style and quality, which combines hand-drawn 2D animation with computer-generated 3D effects. The film uses vivid colors, realistic lighting, and detailed backgrounds to create a stunning visual experience. The film also features beautiful music and sound effects that enhance its mood and emotion. The film's soundtrack was composed by Satoshi Takebe, who blended orchestral music with jazz elements. The film's theme song was performed by Aoi Teshima, who sang both in Japanese and Here is the continuation of the article. in French. The film's sound effects are realistic and immersive, such as the waves of the sea, the wind of the hill, and the noise of the city. The film's voice acting and dubbing are also excellent, featuring a talented cast of actors who bring their characters to life. The English dub features stars like Sarah Bolger, Anton Yelchin, Gillian Anderson, Beau Bridges, Jamie Lee Curtis, Bruce Dern, Christina Hendricks, Ron Howard, and Chris Noth. The film was directed by Goro Miyazaki, the son of Hayao Miyazaki, the legendary founder of Studio Ghibli. This was his second feature film after Tales from Earthsea (2006), which received mixed reviews from critics and fans. From Up on Poppy Hill was a more successful and acclaimed project for him, as he collaborated with his father on the screenplay, which was based on a manga by Chizuru Takahashi and Tetsuro Sayama. The film won several awards, including the Japan Academy Prize for Animation of the Year and the Asia Pacific Screen Award for Best Animated Feature Film. The Themes and Messages of From Up on Poppy Hill
- From Up on Poppy Hill is a film that explores various themes and messages that resonate with audiences of all ages and backgrounds. Some of the main themes and messages are: - The importance of family, community, and identity. The film shows how Umi and Shun struggle with their family histories and identities, as they search for their roots and their place in the world. They also learn to appreciate their adoptive families and communities, who support them and love them unconditionally. They realize that family is not only defined by blood, but also by bonds. - The value of courage, honesty, and resilience. The film shows how Umi and Shun face their challenges and overcome their fears with courage, honesty, and resilience. They do not give up on their dreams and goals, even when they encounter difficulties and setbacks. They also do not lie or hide their feelings, even when they are painful or uncomfortable. They face the truth and accept the consequences. - The appreciation of beauty, simplicity, and nostalgia. The film shows how Umi and Shun find beauty, simplicity, and nostalgia in their everyday lives. They enjoy the simple pleasures of cooking, gardening, sailing, reading, and spending time with each other. They also cherish the memories of their pasts, such as Umi's flag ritual, Shun's poem book, and their fathers' photograph. They appreciate what they have and what they had. Conclusion
- In conclusion, From Up on Poppy Hill is a charming anime film that offers a delightful and touching experience for viewers. It is a film that combines historical and cultural context, characters and relationships, artistic and technical aspects, and themes and messages in a harmonious and engaging way. It is a film that celebrates love, life, and legacy in a changing world. In my opinion, From Up on Poppy Hill is one of the best films by Studio Ghibli. It is not as fantastical or adventurous as some of their other works, but it is more realistic and relatable. It is a film that captures the essence of human emotions and interactions in a simple yet profound way. It is a film that makes me smile, cry, and think. I would recommend this film to anyone who loves anime, history, romance, or drama. I would also recommend this film to anyone who wants to watch a wholesome, heartwarming, and nostalgic story that will make them feel good. You can watch this film online on platforms like Microsoft Store, Animefever, Internet Archive, or Bilibili. You can also buy or rent this film on DVD or Blu-ray. I hope you enjoyed this review and found it helpful. If you have any questions or comments, please feel free to share them with me. Thank you for reading! FAQs
- - Q: What is the meaning of Umi's flag signal? - A: Umi's flag signal is a naval code that means "I pray for safe voyages". She does it every morning to honor her father who died in the Korean War. - Q: What is the significance of Shun's poem book? - A: Shun's poem book is a collection of poems by Kenji Miyazawa, a Japanese poet and author who wrote about nature and social issues. Shun inherited it from his biological father who was also a poet and a sailor. - Q: What is the origin of the Latin Quarter club? - A: The Latin Quarter club was founded by Umi and Shun's fathers when they were students at the same school. They named it after the Parisian district where they met and became friends. - Q: What is the outcome of the clubhouse protest? - A: The clubhouse protest succeeds in convincing the school administration and the public opinion to spare the clubhouse from demolition. The students are allowed to keep their club activities and their cultural heritage. - Q: What is the resolution of Umi and Shun's relationship? - A: Umi and Shun find out that they are not siblings, but cousins. They are relieved and happy to learn that they can be together without any guilt or shame. They confess their love for each other and kiss under Umi's flag signal.
-From Up on Poppy Hill English Dub 1080pFrom Up on Poppy Hill English Dub 1080p
Download ……… https://byltly.com/2uKAb5
0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Basta Guardare Il Cielo Film Completo Ita Download.md b/spaces/1gistliPinn/ChatGPT4/Examples/Basta Guardare Il Cielo Film Completo Ita Download.md
deleted file mode 100644
index 93edee779d9055215b3a328976a0ad023f52080a..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Basta Guardare Il Cielo Film Completo Ita Download.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-Perché ci si lamenta che Zucconi non interviene nel dibattito politico? Altrove parla che è una bellezza. E mena, pure.
Ieri, 14 ore fa :"Sarebbe bastato guardare la diretta TV per assistere al suicidio del Senato della Repubblica. Renzi sta cercando di uccidere un uomo morto".
Il che si ricollega al tutti ladri-tutti morti. Qua e là qualche verità Grillo l'ha detta pure. La ggente l'ha captata, e questo spiega il suo successo ( d'altra parte "ciò che è reale è razionale e ciò che è razionale è reale spiegazione", ricordate?).
-Basta Guardare Il Cielo Film Completo Ita Download
Download Zip ❤ https://imgfil.com/2uy07H
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Criminal Case Paris - The Hidden Object Game That Takes You to the Heart of Romance.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Criminal Case Paris - The Hidden Object Game That Takes You to the Heart of Romance.md
deleted file mode 100644
index a190ef93379798faef6f41267ccc601c5d5e2986..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Criminal Case Paris - The Hidden Object Game That Takes You to the Heart of Romance.md
+++ /dev/null
@@ -1,113 +0,0 @@
-
-Criminal Case: City of Romance - A Review
-If you are a fan of hidden object, adventure games, you might want to check out Criminal Case: City of Romance. This is the eighth installment of the popular Criminal Case series, which has been downloaded by millions of players around the world. In this game, you join the Parisian Police Squad to solve a series of murder cases in the city of love. You will investigate crime scenes, examine clues, interrogate suspects, and catch killers, while exploring different districts and themes based on romance. You can also play with your friends and compete for the best detective title.
-In this article, I will give you a brief overview of the game, its gameplay, graphics, sound, pros and cons, and my final verdict. I will also share some tips and tricks to help you enjoy the game more. So, let's get started!
-criminal case city of romance download
Download File ➡ https://urlin.us/2uSVPC
- Introduction
-Criminal Case: City of Romance is a free-to-play game that was released in 2020 by Pretty Simple, a French game developer. It is available for iOS and Android devices, as well as on Facebook. The game is divided into six districts across Paris, each with its own romantic theme: Fantasy, Attraction, Obsession, Jealousy, Separation, and Engagement. There are 17 cases in total, each with a unique storyline and characters.
-To play the game, you need to download it from the App Store or Google Play, or access it from Facebook. You will need an internet connection to play the game. You will also need energy to investigate crime scenes, which can be replenished by waiting, watching ads, buying with real money, or asking your friends for help. You can also earn coins, stars, cash, and other rewards by playing the game.
- Gameplay
-The gameplay of Criminal Case: City of Romance is similar to other games in the Criminal Case series. You will be assigned a case by your chief inspector Hugo Mercier, who will guide you throughout the investigation. You will then go to various crime scenes to look for clues. You will need to find all the items on a list within a time limit. The faster you find them, the higher your score will be. You can also use hints or boosters to help you find items faster.
-Once you have collected enough clues from a crime scene, you will be able to analyze them in the laboratory with your forensic expert Enzo Jonas. You will need to perform various mini-games such as matching fingerprints, assembling puzzles, identifying substances, etc. Some analyses may take time to complete, but you can speed them up by using cash or watching ads.
-After analyzing clues, you will be able to interrogate witnesses and suspects with your partner Riya Laghari. You will need to ask them questions based on the evidence you have found. Some suspects may lie or hide information from you, so you will need to pay attention to their
expressions and body language. You can also use stars to arrest suspects or confirm their alibis. You will need to arrest the right killer at the end of each case, based on the evidence and motive you have gathered.
-Besides solving cases, you can also explore different districts and themes in the game. You will encounter various romantic scenarios, such as a fairy tale wedding, a masquerade ball, a love triangle, a secret admirer, etc. You will also meet different characters, such as a prince charming, a femme fatale, a celebrity chef, a fashion designer, etc. You can interact with them and learn more about their stories and personalities.
-To make the game more fun and challenging, you can also follow some tips and tricks. For example, you can play the game every day to get daily bonuses and rewards. You can also replay crime scenes to earn more stars and coins. You can also join a team or create your own to chat with other players and exchange gifts. You can also participate in special events and tournaments to win exclusive items and prizes.
- Graphics and Sound
-The graphics and sound of Criminal Case: City of Romance are impressive and immersive. The game has a colorful and detailed visual design that captures the beauty and charm of Paris. The crime scenes are well-designed and realistic, with various objects and backgrounds that fit the theme of each district. The characters are also well-drawn and animated, with different expressions and outfits that reflect their moods and roles.
-criminal case paris game download
-how to play criminal case city of romance
-criminal case city of romance app store
-criminal case city of romance google play
-criminal case city of romance facebook
-criminal case city of romance cheats and tips
-criminal case city of romance walkthrough and guide
-criminal case city of romance hidden object game
-criminal case city of romance adventure game
-criminal case city of romance murder mystery game
-criminal case city of romance pretty simple
-criminal case city of romance review and rating
-criminal case city of romance latest update
-criminal case city of romance free download for pc
-criminal case city of romance free download for android
-criminal case city of romance free download for ios
-criminal case city of romance apk download
-criminal case city of romance mod apk download
-criminal case city of romance hack download
-criminal case city of romance unlimited energy download
-criminal case city of romance offline download
-criminal case city of romance online download
-criminal case city of romance full version download
-criminal case city of romance no ads download
-criminal case city of romance premium download
-criminal case city of romance best scenes
-criminal case city of romance best characters
-criminal case city of romance best cases
-criminal case city of romance best outfits
-criminal case city of romance best pets
-criminal case city of romance best weapons
-criminal case city of romance best clues
-criminal case city of romance best suspects
-criminal case city of romance best killers
-criminal case city of romance best endings
-criminal case city of romance how to get stars
-criminal case city of romance how to get coins
-criminal case city of romance how to get cash
-criminal case city of romance how to get burgers
-criminal case city of romance how to get lucky cards
-criminal case city of romance how to get trophies
-criminal case city of romance how to get medals
-criminal case city of romance how to get achievements
-criminal case city of romance how to get hints
-criminal case city of romance how to get magnifying glasses
-criminal case city of romance how to get flashlights
-criminal case city of romance how to level up fast
-criminal case city of romance how to solve puzzles fast
-criminal case city of romance how to find objects fast
-The game also has a catchy and atmospheric music and sound effects that enhance the mood and tone of the game. The music is varied and fitting for each district and theme, ranging from classical to jazz to pop. The sound effects are also realistic and engaging, such as the sound of clicking items, analyzing clues, interrogating suspects, etc. The game also has voice-overs for some characters, such as your chief inspector, your partner, and your forensic expert.
- Pros and Cons
-Like any other game, Criminal Case: City of Romance has its pros and cons. Here are some of them:
-
-
-Pros |
-Cons |
-
-
-- Engaging and addictive gameplay that keeps you hooked on solving cases |
-- Limited energy that may prevent you from playing for long periods |
-
-
-- Interesting and diverse storylines and characters that make you curious about their secrets |
-- Repetitive and predictable patterns that may make some cases easy or boring |
-
-
-- Stunning and realistic graphics that make you feel like you are in Paris |
-- High storage space and data usage that may slow down your device or connection |
-
-
-- Fun and lively music and sound effects that create a great atmosphere for the game |
-- No option to mute or adjust the volume of the music or sound effects |
-
-
-- Social and competitive features that allow you to play with your friends and other players |
-- In-app purchases that may give some players an unfair advantage or pressure you to spend money |
-
-
- Overall, I think the pros outweigh the cons, and I would recommend this game to anyone who likes hidden object, adventure games.
- Conclusion
-In conclusion, Criminal Case: City of Romance is a fun and exciting game that lets you experience the thrill of solving murder cases in the city of love. You will enjoy the gameplay, graphics, sound, storylines, characters, themes, and features of this game. You will also learn some facts about Parisian culture and history along the way. If you are looking for a game that combines mystery, romance, adventure, and challenge, this is the game for you.
-I hope you found this article helpful and informative. If you have any questions or feedback about the game or this article, please feel free to contact me at [email](^i^). I would love to hear from you. Thank you for reading!
- Frequently Asked Questions (FAQs)
- Q: How can I get more energy in Criminal Case: City of Romance?
-A: There are several ways to get more energy in the game. You can wait for it to regenerate over time (1 point every 3 minutes), watch ads (20 points per ad), buy it with real money (various packages available), ask your friends for help (1 point per friend), or get it from your team members (5 points per member).
- Q: How can I get more stars in Criminal Case: City of Romance?
-A: Stars are used to perform
various actions in the game, such as arresting suspects, confirming alibis, unlocking new districts, etc. You can earn stars by playing crime scenes and getting high scores. You can also replay crime scenes to earn more stars. You can also buy stars with real money (various packages available).
- Q: How can I get more coins in Criminal Case: City of Romance?
-A: Coins are used to buy items and outfits in the game, as well as hints and boosters. You can earn coins by playing crime scenes and getting high scores. You can also get coins from your friends, team members, or daily bonuses. You can also watch ads to get coins (100 coins per ad). You can also buy coins with real money (various packages available).
- Q: How can I get more cash in Criminal Case: City of Romance?
-A: Cash is the premium currency in the game, which can be used to speed up analyses, unlock new districts, buy exclusive items and outfits, etc. You can earn cash by leveling up, completing achievements, or participating in special events and tournaments. You can also buy cash with real money (various packages available).
- Q: How can I get more items and outfits in Criminal Case: City of Romance?
-A: Items and outfits are used to customize your avatar and improve your skills in the game. You can get items and outfits by buying them with coins or cash in the shop, or by winning them from special events and tournaments. You can also get items and outfits from your friends, team members, or daily bonuses.
- Q: How can I join or create a team in Criminal Case: City of Romance?
-A: A team is a group of players who can chat, exchange gifts, and help each other in the game. You can join or create a team by tapping on the team icon on the bottom right corner of the screen. You will need to be at least level 10 to join or create a team. You can search for an existing team by name or ID, or browse through the list of recommended teams. You can also create your own team by choosing a name, an ID, a description, a badge, and a language. You can invite your friends to join your team, or accept requests from other players who want to join.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download File dari Google Drive Tanpa Buka Halaman Web.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download File dari Google Drive Tanpa Buka Halaman Web.md
deleted file mode 100644
index ed9b5aba2f7d5e261ec3a6958bbe8fe1a546e13b..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download File dari Google Drive Tanpa Buka Halaman Web.md
+++ /dev/null
@@ -1,163 +0,0 @@
-
-How to Download Files from Google Drive
-Google Drive is a cloud-based storage and syncing service from Google. It is designed to give you free 15 GB of storage for all your files, including photos, documents, music, and so much more. You can access your files from any device, as long as you have an internet connection.
-download dari link google drive
Download »»» https://urlin.us/2uSThK
-But what if you want to download your files from Google Drive to your computer or mobile device? Maybe you want to back up your data, work offline, or share your files with others. In this article, we will show you how to download files from Google Drive easily and quickly.
-Before we get started, let's go over some of the benefits and features of using Google Drive, as well as how to create a Google account and access Google Drive.
- Introduction
-What is Google Drive and Why Use It
-Google Drive is more than just a place to store your files. It is also a platform that allows you to create, share, and collaborate on various types of content. Here are some of the reasons why you might want to use Google Drive:
-
-- You can store any type of file in Google Drive, such as PDFs, images, videos, audio, etc.
-- You can access your files from any device, whether it's a computer, tablet, or smartphone.
-- You can organize your files into folders and subfolders, and search for them easily.
-- You can share your files with anyone, either by inviting them by email or by creating a link that anyone can use.
-- You can control how others can use your files, such as whether they can view, comment, or edit them.
-- You can use Google Docs, Sheets, and Slides, cloud-based apps that let you create and edit documents, spreadsheets, and presentations online. You can also share and comment on these files with others in real time.
-- You can use Drive for desktop, a tool that lets you sync your files between your computer and Google Drive.
-- You can use the Google Drive app on your mobile device to access your files, scan documents, and upload photos.
-- You can use Google Photos, a service that lets you store and manage your photos and videos in Google Drive.
-- You can use Google Backup and Sync, a tool that lets you back up your files and folders from your computer to Google Drive.
-
-As you can see, Google Drive offers a lot of benefits and features that can help you store, manage, and work with your files. But how do you get started with Google Drive?
- How to Create a Google Account and Access Google Drive
-To use Google Drive, you need to have a Google account. A Google account is a free account that gives you access to various Google services, such as Gmail, YouTube, Maps, Calendar, etc. If you already have a Google account, you can skip this step. If not, here's how to create one:
-
-- Go to https://accounts.google.com/signup and fill out the form with your name, email address, password, and other details.
-- Verify your email address by clicking on the link that Google sends you.
-- Agree to the terms and conditions and privacy policy of Google.
-- Congratulations! You have created your Google account. You can now access Google Drive and other Google services.
-
-To access Google Drive, you have two options:
-cara membuat link download dari google drive
-cara download file di google drive yang dikunci
-cara download file di google drive yang terkunci view only
-cara download file di google drive yang tidak bisa didownload
-cara download file di google drive yang hanya bisa dilihat
-cara download file di google drive yang dibatasi
-cara download file di google drive yang tidak ada tombol downloadnya
-cara download file di google drive yang hanya bisa memberi komentar
-cara membuat link download langsung dari google drive
-cara membuat link download otomatis dari google drive
-cara membuat link download direct dari google drive
-cara membuat link download tanpa masuk ke google drive
-cara membuat link download tanpa membuka google drive
-cara membuat link download tanpa login ke google drive
-cara membuat link download tanpa verifikasi google drive
-cara mendownload file dari google drive dengan cepat
-cara mendownload file dari google drive dengan mudah
-cara mendownload file dari google drive dengan idm
-cara mendownload file dari google drive dengan gdirect
-cara mendownload file dari google drive dengan uc browser
-tips dan trik download file di google drive
-solusi download file di google drive yang bermasalah
-alasan download file di google drive gagal atau error
-langkah-langkah download file di google drive yang benar
-syarat dan ketentuan download file di google drive yang harus dipatuhi
-keuntungan dan kerugian download file di google drive
-perbedaan dan persamaan download file di google drive dan dropbox
-pengertian dan fungsi download file di google drive
-tutorial dan panduan download file di google drive untuk pemula
-contoh dan aplikasi download file di google drive untuk berbagai keperluan
-
-- You can go to https://drive.google.com and sign in with your Google account.
-- You can download the Google Drive app on your computer or mobile device and sign in with your Google account.
-
-Either way, you will see the main interface of Google Drive, where you can view, upload, create, and share your files.
- How to Share Files on Google Drive
-How to Share Files with Specific People or Groups
-One of the main features of Google Drive is the ability to share your files with others. You can share your files with specific people or groups by inviting them by email. Here's how:
-
-- Select the file or folder that you want to share on Google Drive.
-- Click on the Share button on the top right corner of the screen.
-- A pop-up window will appear where you can enter the email addresses of the people or groups that you want to share with. You can also add a note or message if you want.
-- Choose the level of access that you want to give them. You can choose between Viewer, Commenter, or Editor. A Viewer can only view the file, a Commenter can view and comment on the file, and an Editor can view, comment, and edit the file.
-- Click on Send or Done. The people or groups that you invited will receive an email notification with a link to access the file.
-
- How to Share Files Publicly or With Anyone Who Has the Link
-If you want to share your files with anyone who has the link, without requiring them to sign in with a Google account, you can do so by creating a public link. Here's how:
-
-- Select the file or folder that you want to share on Google Drive.
-- Click on the Share button on the top right corner of the screen.
-- A pop-up window will appear where you can click on Change next to Anyone with the link.
-- A drop-down menu will appear where you can choose who can access the link. You can choose between Anyone on the internet or Anyone at [your organization]. You can also choose the level of access that they have: Viewer, Commenter, or Editor.
-- Click on Done. You will see a link that you can copy and paste anywhere you want to share it. Anyone who has the link will be able to access the file without signing in.
-
- How to Change or Revoke Sharing Permissions
-If you want to change or revoke the sharing permissions for your files, you can do so at any time. Here's how:
-
-- Select the file or folder that you want to change or revoke permissions for on Google Drive.
-- Click on the Share button on the top right corner of the screen.
-- A pop-up window will appear where you can see the list of people or groups that you have shared the file with. You can also see the level of access that they have.
-- To change the level of access, click on the drop-down menu next to their name and choose a different option: Viewer, Commenter, or Editor.
-- To revoke the sharing permission, click on the Remove button next to their name. They will no longer be able to access the file.
-- To revoke the public link, click on Change next to Anyone with the link and choose Restricted. Only the people that you have invited by email will be able to access the file.
-- Click on Done to save your changes.
-
- How to Download Files from Google Drive
-How to Download a Single File or Folder
-If you want to download a single file or folder from Google Drive to your computer or mobile device, you can do so by following these steps:
-
-- Select the file or folder that you want to download on Google Drive.
-- Click on the More actions button (three vertical dots) on the top right corner of the screen.
-- Click on Download. The file or folder will start downloading to your device.
-- You can also right-click on the file or folder and choose Download from the menu.
-
- How to Download Multiple Files or Folders
-If you want to download multiple files or folders from Google Drive to your computer or mobile device, you can do so by following these steps:
-
-- Select the files or folders that you want to download on Google Drive. You can use the Shift or Ctrl keys to select multiple items.
-- Right-click on any of the selected items and choose Download from the menu. The files or folders will be compressed into a ZIP file and start downloading to your device.
-- You can also click on the More actions button (three vertical dots) on the top right corner of the screen and choose Download from there.
-
- How to Download Files that are Locked or Restricted
-Sometimes, you might encounter files that are locked or restricted by their owners. This means that you cannot view, comment, or edit them without their permission. However, you might still be able to download them if they have enabled that option. Here's how:
-
-- Select the file that you want to download on Google Drive.
-- Click on the Request access button on the top right corner of the screen.
-- A pop-up window will appear where you can enter your email address and a message to request access from the owner of the file.
-- Click on Send request. The owner of the file will receive an email notification with your request.
-- If they approve your request, you will receive an email notification with a link to access and download the file.
-
- How to Troubleshoot Google Drive Issues
-How to Fix Common Issues with Google Drive
-Sometimes, you might face some issues with Google Drive, such as slow loading, syncing errors, missing files, etc. Here are some of the common issues and how to fix them:
-
-Issue | Solution |
-Google Drive is not loading or is slow | Check your internet connection and make sure it is stable and fast. Try reloading the page or restarting your browser. Clear your browser's cache and cookies. Disable any extensions or plugins that might interfere with Google Drive. Update your browser to the latest version. |
-Google Drive is not syncing or is showing errors | Check your internet connection and make sure it is stable and fast. Try pausing and resuming the sync process. Check your storage space and make sure you have enough room for your files. Check your firewall and antivirus settings and make sure they are not blocking Google Drive. Update your Google Drive app to the latest version. |
-Google Drive files are missing or deleted | Check your trash folder and see if your files are there. You can restore them by right-clicking and choosing Restore from trash. Check your activity panel and see if someone else has moved or deleted your files. You can undo their actions by clicking on Undo changes. Contact Google support and request a file recovery within 25 days of deletion. |
-
- How to Fix Error Messages and Corrupted Files
-Sometimes, you might encounter error messages or corrupted files when trying to download files from Google Drive. Here are some of the common error messages and corrupted files and how to fix them:
-
-Error Message | Solution |
-Sorry, you can't view or download this file at this time. | This error message usually means that the file has exceeded its download limit. You can try again later or make a copy of the file in your own Google Drive and download it from there. To make a copy, open the file link, sign in with your Google account, click on the More actions button (three vertical dots), and choose Make a copy. |
-Unable to access document. Please try again later. | This error message usually means that there is a temporary issue with Google Drive or the file itself. You can try reloading the page or restarting your browser. You can also check the Google Workspace Status Dashboard to see if there are any service disruptions or outages. |
-The file is corrupt and cannot be opened. | This error message usually means that the file has been damaged or corrupted during the download process. You can try downloading the file again or using a different browser. You can also try using a file repair tool or software to fix the corrupted file. |
-
- How to Send Feedback and Report Bugs to Google
-If you encounter any issues or bugs with Google Drive that are not covered by the solutions above, you can send feedback and report them to Google. This will help Google improve their service and fix any problems. Here's how:
-
-- Click on the Help button (question mark icon) on the top right corner of the screen.
-- Click on Send feedback.
-- A pop-up window will appear where you can describe your issue or bug in detail. You can also include a screenshot if you want.
-- Click on Send. Google will receive your feedback and work on resolving your issue or bug.
-
- Conclusion
-In this article, we have shown you how to download files from Google Drive easily and quickly. We have also covered some of the benefits and features of using Google Drive, as well as how to share, troubleshoot, and send feedback on Google Drive.
-Google Drive is a powerful and convenient tool that can help you store, manage, and work with your files online. Whether you want to back up your data, work offline, or share your files with others, Google Drive can help you do it all.
-We hope you found this article helpful and informative. If you have any questions or comments, feel free to leave them below. We would love to hear from you!
- FAQs
-Q: How much storage space do I get with Google Drive?
-A: You get 15 GB of free storage space with a Google account. You can upgrade to more storage with a Google One subscription.
- Q: How can I access my files offline on Google Drive?
-A: You can turn on offline access for your files on Google Drive. This will allow you to view and edit your files without an internet connection. Learn how to turn on offline access here.
- Q: How can I sync my files across different devices with Google Drive?
-A: You can use Drive for desktop, a tool that lets you sync your files between your computer and Google Drive. You can also use the Google Drive app on your mobile device to access your files. Learn how to sync your files here.
- Q: How can I create and collaborate on documents, spreadsheets, and presentations with Google Drive?
-A: You can use Google Docs, Sheets, and Slides, cloud-based apps that let you create and edit documents, spreadsheets, and presentations online. You can also share and comment on these files with others in real time. Learn how to use these apps here.
- Q: How can I make a direct download link for my Google Drive files?
-A: You can make a direct download link for your files by using a custom link that contains your file ID. This will bypass the web page that opens when you click a Google Drive file link and start the file download immediately. Learn how to make a direct download link here.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Archero Mod APK iOS Everything You Need to Know About the Game and the Hack.md b/spaces/1phancelerku/anime-remove-background/Archero Mod APK iOS Everything You Need to Know About the Game and the Hack.md
deleted file mode 100644
index e4fba5e32c3c14e674fdea1bc77a0ec7253acf5a..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Archero Mod APK iOS Everything You Need to Know About the Game and the Hack.md
+++ /dev/null
@@ -1,133 +0,0 @@
-
-Archero Mod Apk Ios: A Guide to the Ultimate Action Game
-If you are looking for a fun and challenging action game that will test your skills and reflexes, you should try Archero. This is a game where you play as a lone archer who has to fight against waves of enemies and obstacles in different worlds. You can also customize your hero with various skills and equipment to make him more powerful and survive longer.
-archero mod apk ios
Download Zip ⚹⚹⚹ https://jinyurl.com/2uNLtJ
-However, if you want to enjoy the game without any limitations or restrictions, you may want to use Archero mod apk ios. This is a modified version of the game that allows you to access all the features and content for free. You can also get unlimited coins, gems, energy, and other resources to upgrade your hero and unlock new items.
-In this article, we will explain what Archero is, how to download and install Archero mod apk ios, why you should use it, and some tips and tricks for playing the game. Let's get started!
- What is Archero?
-Archero is a popular action game developed by Habby, a Chinese studio that specializes in casual games. The game was released in 2019 for Android and iOS devices, and has since gained millions of fans around the world. The game has also received positive reviews from critics and users alike, who praised its addictive gameplay, beautiful graphics, and variety of content.
- Gameplay and features
-The gameplay of Archero is simple but challenging. You control your hero with a virtual joystick on the screen, and he will automatically shoot arrows at the nearest enemy when you stop moving. Your goal is to clear each level by defeating all the enemies and avoiding their attacks. You will also encounter different obstacles, such as spikes, traps, walls, and portals, that will make your journey more difficult.
-archero mod apk ios download
-archero mod apk ios no jailbreak
-archero mod apk ios unlimited gems
-archero mod apk ios latest version
-archero mod apk ios free
-archero mod apk ios 2023
-archero mod apk ios reddit
-archero mod apk ios online
-archero mod apk ios hack
-archero mod apk ios install
-archero mod apk ios update
-archero mod apk ios offline
-archero mod apk ios cheats
-archero mod apk ios gameplay
-archero mod apk ios review
-archero mod apk ios 4.14.0
-archero mod apk ios god mode
-archero mod apk ios mega
-archero mod apk ios mediafire
-archero mod apk ios vip
-archero mod apk ios obb
-archero mod apk ios tutorial
-archero mod apk ios link
-archero mod apk ios file
-archero mod apk ios generator
-archero mod apk ios coins
-archero mod apk ios weapons
-archero mod apk ios armor
-archero mod apk ios skills
-archero mod apk ios levels
-archero mod apk ios characters
-archero mod apk ios chapters
-archero mod apk ios bosses
-archero mod apk ios enemies
-archero mod apk ios items
-archero mod apk ios pets
-archero mod apk ios codes
-archero mod apk ios features
-archero mod apk ios benefits
-archero mod apk ios tips
-archero mod apk ios tricks
-archero mod apk ios guide
-archero mod apk ios support
-archero mod apk ios forum
-archero mod apk ios community
-archero mod apk ios feedback
-archero mod apk ios rating
-archero mod apk ios comparison
-As you progress through the game, you will face stronger enemies and bosses that will require more strategy and skill to defeat. You will also be able to level up your hero and choose from various skills that will enhance his abilities. For example, you can increase his attack speed, damage, range, or critical rate. You can also get skills that will give him extra arrows, elemental effects, shields, or pets.
-The game features hundreds of levels across different worlds, each with its own theme and design. You can explore forests, deserts, dungeons, temples, caves, and more. The game also offers daily challenges and events that will reward you with coins, gems, energy, scrolls, chests, and other items. You can use these items to upgrade your equipment or buy new ones from the shop.
- How to download and install Archero mod apk ios
-If you want to play Archero on your iOS device with all the benefits of the modded version, you will need to follow these steps:
-
-- Download the Archero mod apk ios file from a reliable source. You can use this link as an example.
-- Install a third-party app installer on your device, such as TutuApp or AppValley. These are apps that allow you to install modified or hacked apps on your iOS device without jailbreaking it.
-- Open the app installer and search for Archero mod apk ios. You should see it in the list of available apps.
-- Tap on the install button and wait for the installation process to finish.
-- Once installed, you may need to trust the app developer in your device settings. To do this, go to Settings > General > Profiles & Device Management > [name of developer] > Trust.
-- Now you can launch Archero mod apk ios from your home screen and enjoy the game with all the mod features.
-
- Why use Archero mod apk ios?
-You may be wondering why you should use Archero mod apk ios instead of the original version of the game. Well, there are several reasons why you may want to do so. Here are some of them:
- Benefits of using Archero mod apk ios
-
-- You can get unlimited coins, gems, energy, and other resources that you can use to upgrade your hero and equipment. This will make your gameplay easier and more enjoyable.
-- You can access all the features and content of the game for free, without having to spend any real money or watch any ads. You can also unlock all the worlds, levels, skills, and items that are otherwise locked or require premium currency.
-- You can customize your hero with any skills and equipment that you want, without having to rely on random drops or choices. You can also change your skills and equipment anytime during the game, without losing any progress or resources.
-- You can enjoy the game without any bugs, glitches, or errors that may affect your performance or experience. The mod apk is updated regularly to ensure its compatibility and functionality with the latest version of the game.
-
- Risks and precautions of using Archero mod apk ios
-However, using Archero mod apk ios also comes with some risks and precautions that you should be aware of. Here are some of them:
-
-- You may violate the terms and conditions of the game developer and publisher, which may result in your account being banned or suspended. You may also lose your progress and data if this happens.
-- You may expose your device to malware or viruses that may harm your system or steal your personal information. You should always download the mod apk from a trusted source and scan it with an antivirus before installing it.
-- You may encounter some compatibility or stability issues with the mod apk, especially if you have an older device or a different version of iOS. You should always check the requirements and specifications of the mod apk before downloading it.
-- You may lose some of the fun and challenge of the game by using the mod apk, as it may make the game too easy or boring for you. You should always use the mod apk responsibly and moderately, and not abuse its features or advantages.
-
- Tips and tricks for playing Archero
-Now that you know how to download and install Archero mod apk ios, and why you should use it, you may want to learn some tips and tricks for playing the game. Here are some of them:
- How to dodge enemy attacks
-One of the most important skills that you need to master in Archero is dodging enemy attacks. This will help you avoid taking damage and losing health, which will affect your survival and performance. Here are some tips on how to dodge enemy attacks:
-
-- Always keep moving and don't stay in one spot for too long. This will make you harder to hit by enemies and give you more opportunities to attack them.
-- Learn the patterns and behaviors of different enemies and bosses. This will help you anticipate their movements and attacks, and react accordingly.
-- Use obstacles and walls to your advantage. You can hide behind them or use them to block enemy projectiles. However, be careful not to trap yourself or get cornered by enemies.
-- Use skills that will help you dodge enemy attacks, such as invincibility, dash, teleport, slow time, or freeze. These skills will give you a temporary edge over your enemies and allow you to escape from dangerous situations.
-
- How to summon and deal with the devil
-Another interesting feature of Archero is the devil, who is a mysterious character that will appear after you defeat a boss. The devil will offer you a deal: he will give you a powerful skill in exchange for some of your health. You can either accept or decline his offer. Here are some tips on how to summon and deal with the devil:
-
-- To summon the devil, you need to defeat a boss without taking any damage. This means that you need to dodge all the boss's attacks and not get hit by any enemies or obstacles along the way.
-- The skill that the devil offers you is random, but it is usually one of the best skills in the game. Some examples are multishot, ricochet, diagonal arrows, extra life, or death nova.
-- The amount of health that the devil takes from you is also random, but it is usually around 20% to 40% of your maximum health. This means that you need to weigh the pros and cons of accepting his offer. If you have a lot of health or a healing skill, you may be able to afford the trade. However, if you have low health or no healing skill, you may want to decline his offer.
-- The devil's offer is optional, and you can always choose to skip it and get a normal skill instead. However, if you skip the devil's offer, you will not be able to summon him again in the same run.
-
- How to choose the best skills and equipment
-Another crucial aspect of Archero is choosing the best skills and equipment for your hero. This will affect your damage, defense, speed, and overall performance in the game. Here are some tips on how to choose the best skills and equipment:
-
-- Choose skills that complement your playstyle and preferences. For example, if you like to move around a lot, you may want to choose skills that increase your movement speed, dash distance, or invincibility duration. If you like to stay in one spot and shoot from afar, you may want to choose skills that increase your attack range, damage, or critical rate.
-- Choose skills that synergize with each other and create powerful combinations. For example, multishot and ricochet are two skills that work well together, as they allow you to shoot multiple arrows that bounce off enemies and walls. Another example is diagonal arrows and piercing shot, which allow you to shoot arrows in four directions that go through enemies.
-- Choose equipment that suits your hero and skills. For example, if you have a lot of skills that increase your arrow count or elemental effects, you may want to use a bow that has a high attack speed or damage. If you have a lot of skills that increase your defense or health, you may want to use an armor that has a high health or resistance.
-- Upgrade your equipment regularly with scrolls and coins. This will improve their stats and make them more effective. You can also fuse equipment of the same type and rarity to create a higher rarity equipment with better stats.
-
- Conclusion
-Archero is a fun and challenging action game that will keep you entertained for hours. You can play as a lone archer who has to fight against hordes of enemies and obstacles in different worlds. You can also customize your hero with various skills and equipment to make him more powerful and survive longer.
-If you want to enjoy the game without any limitations or restrictions, you can use Archero mod apk ios. This is a modified version of the game that allows you to access all the features and content for free. You can also get unlimited coins, gems, energy, and other resources to upgrade your hero and unlock new items.
-However, using Archero mod apk ios also comes with some risks and precautions that you should be aware of. You may violate the terms and conditions of the game developer and publisher, which may result in your account being banned or suspended. You may also expose your device to malware or viruses that may harm your system or steal your personal information. You may also lose some of the fun and challenge of the game by using the mod apk, as it may make the game too easy or boring for you.
-Therefore, you should always use Archero mod apk ios responsibly and moderately, and not abuse its features or advantages. You should also download the mod apk from a trusted source and scan it with an antivirus before installing it. You should also check the requirements and specifications of the mod apk before downloading it.
-We hope this article has helped you understand what Archero is, how to download and install Archero mod apk ios, why you should use it, and some tips and tricks for playing the game. Have fun playing Archero!
- FAQs
-Here are some frequently asked questions about Archero mod apk ios:
- Q: Is Archero mod apk ios safe to use?
-A: Archero mod apk ios is safe to use as long as you download it from a reliable source and scan it with an antivirus before installing it. However, you should always be careful when using modified or hacked apps on your device, as they may contain malware or viruses that may harm your system or steal your personal information.
- Q: Is Archero mod apk ios legal to use?
-A: Archero mod apk ios is not legal to use, as it violates the terms and conditions of the game developer and publisher. Using Archero mod apk ios may result in your account being banned or suspended by the game authorities. You may also face legal consequences if you are caught using Archero mod apk ios.
- Q: How do I update Archero mod apk ios?
-A: To update Arch ero mod apk ios, you need to download the latest version of the mod apk from the same source that you downloaded it from before. You can also check for updates from the app installer that you used to install the mod apk. You should always update the mod apk to ensure its compatibility and functionality with the latest version of the game.
- Q: How do I uninstall Archero mod apk ios?
-A: To uninstall Archero mod apk ios, you need to delete the app from your device. You can do this by long-pressing the app icon and tapping on the delete option. You can also delete the app from your device settings by going to Settings > General > iPhone Storage > Archero > Delete App.
- Q: Can I play Archero mod apk ios online with other players?
-A: Archero mod apk ios does not support online multiplayer mode, as it may cause conflicts or errors with the game servers. You can only play Archero mod apk ios offline with your device. However, you can still enjoy the game's features and content without any limitations or restrictions.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Aim King 8 Ball Pool APK and Become a Pro in No Time.md b/spaces/1phancelerku/anime-remove-background/Download Aim King 8 Ball Pool APK and Become a Pro in No Time.md
deleted file mode 100644
index a1208e580405044faf4b7c90d06ae346e97712b2..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Aim King 8 Ball Pool APK and Become a Pro in No Time.md
+++ /dev/null
@@ -1,136 +0,0 @@
-
-Aim King 8 Ball Pool APK: A Guide to the Best Tool for 8 Ball Pool Players
-If you are a fan of 8 ball pool, you might have heard of Aim King 8 Ball Pool APK, a tool that can help you improve your skills and win more games. But what is it exactly, and how does it work? In this article, we will answer these questions and more, and give you some tips and tricks on how to use it effectively. Let's get started!
-aim king 8 ball pool apk
Download 🗸 https://jinyurl.com/2uNMUn
- What is Aim King 8 Ball Pool APK?
-Aim King 8 Ball Pool APK is an app that you can download and install on your Android device, and use it while playing 8 ball pool. It is a guide tool that uses AI image recognition technology to display extended guidelines in real time, making your shots more accurate and precise. It also allows you to adjust the sensitivity and accuracy settings according to your preference. With Aim King 8 Ball Pool APK, you can become a master of 8 ball pool in no time!
- Features of Aim King 8 Ball Pool APK
-Some of the features that make Aim King 8 Ball Pool APK stand out from other similar tools are:
-
-- It is compatible with all versions of 8 ball pool, including the latest one.
-- It is easy to use, with a simple and user-friendly interface.
-- It does not require root access or any other permissions.
-- It does not interfere with the game performance or graphics.
-- It does not contain any ads or malware.
-
- How to Download and Install Aim King 8 Ball Pool APK
-To download and install Aim King 8 Ball Pool APK on your Android device, follow these steps:
-
-- Go to [this link](^1^) and click on the download button.
-- Wait for the file to be downloaded on your device.
-- Open the file manager app on your device and locate the downloaded file.
-- Tap on the file and allow the installation from unknown sources if prompted.
-- Wait for the installation to be completed.
-- Launch the app and enjoy!
-
- Why Use Aim King 8 Ball Pool APK?
-You might be wondering why you should use Aim King 8 Ball Pool APK instead of playing the game normally. Well, there are some good reasons why you should consider using this tool, as well as some drawbacks that you should be aware of. Let's take a look at them.
- Benefits of Using Aim King 8 Ball Pool APK
-Some of the benefits that you can get from using Aim King 8 Ball Pool APK are:
-aim king 8 ball pool apk download
-aim king 8 ball pool apk mod
-aim king 8 ball pool apk latest version
-aim king 8 ball pool apk free
-aim king 8 ball pool apk android
-aim king 8 ball pool apk ios
-aim king 8 ball pool apk hack
-aim king 8 ball pool apk no root
-aim king 8 ball pool apk unlimited coins
-aim king 8 ball pool apk online
-aim king 8 ball pool apk offline
-aim king 8 ball pool apk for pc
-aim king 8 ball pool apk for laptop
-aim king 8 ball pool apk for windows
-aim king 8 ball pool apk for mac
-aim king 8 ball pool apk review
-aim king 8 ball pool apk features
-aim king 8 ball pool apk guide
-aim king 8 ball pool apk tutorial
-aim king 8 ball pool apk tips
-aim king 8 ball pool apk tricks
-aim king 8 ball pool apk cheats
-aim king 8 ball pool apk best settings
-aim king 8 ball pool apk how to use
-aim king 8 ball pool apk how to install
-aim king 8 ball pool apk how to update
-aim king 8 ball pool apk how to uninstall
-aim king 8 ball pool apk how to play
-aim king 8 ball pool apk how to win
-aim king 8 ball pool apk how to get coins
-aim king 8 ball pool apk comparison
-aim king 8 ball pool apk alternatives
-aim king 8 ball pool apk vs other tools
-aim king 8 ball pool apk pros and cons
-aim king 8 ball pool apk benefits and drawbacks
-aim king 8 ball pool apk advantages and disadvantages
-aim king 8 ball pool apk testimonials and feedbacks
-aim king 8 ball pool apk ratings and reviews
-aim king 8 ball pool apk quality and performance
-aim king 8 ball pool apk reliability and security
-
-- You can improve your skills and confidence in playing 8 ball pool.
-- You can win more games and earn more coins and rewards.
-- You can challenge your friends and other players online and show off your abilities.
-- You can have more fun and enjoyment while playing the game.
-
- Drawbacks of Using Aim King 8 Ball Pool APK
-Some of the drawbacks that you should be aware of when using Aim King 8 Ball Pool APK are:
-
-- You might lose the thrill and challenge of playing the game naturally.
-- You might get addicted to using the tool and lose interest in the game.
-- You might get detected and banned by the game developers if you use the tool excessively or carelessly.
-- You might face ethical issues and criticism from other players who consider using the tool as cheating.
-
- Tips and Tricks for Using Aim King 8 Ball Pool APK
-Now that you know what Aim King 8 Ball Pool APK is and what are its pros and cons, you might want to know how to use it effectively and safely. Here are some tips and tricks that you can follow:
- How to Use the Extended Guidelines Feature
-The extended guidelines feature is the main function of Aim King 8 Ball Pool APK. It shows you the trajectory of your cue ball and the target ball, as well as the angle and distance of your shot. To use this feature, you need to:
-
-- Open the app and grant it permission to access your screen.
-- Open the game and start a match.
-- Tap on the Aim King icon on your screen to activate the extended guidelines.
-- Aim your shot using the guidelines and adjust your power and spin as needed.
-- Tap on the Aim King icon again to deactivate the extended guidelines.
-- Release your shot and watch the result.
-
- How to Adjust the Sensitivity and Accuracy Settings
-The sensitivity and accuracy settings allow you to customize the performance of Aim King 8 Ball Pool APK according to your preference. The sensitivity setting determines how responsive the app is to your touch, while the accuracy setting determines how precise the app is in calculating the guidelines. To adjust these settings, you need to:
-
-- Open the app and tap on the settings icon on the top right corner.
-- Slide the sensitivity bar to increase or decrease the sensitivity level.
-- Slide the accuracy bar to increase or decrease the accuracy level.
-- Tap on the save button to apply your changes.
-
- How to Avoid Detection and Ban by the Game Developers
-One of the risks of using Aim King 8 Ball Pool APK is getting detected and banned by the game developers, who might consider it as a violation of their terms of service. To avoid this, you need to:
-
-- Use the tool sparingly and moderately, and not in every match or shot.
-- Use the tool only in offline mode or in private matches with your friends.
-- Do not brag or boast about using the tool in public chats or forums.
-- Do not update the game or the tool unless you are sure that they are compatible.
-
- Conclusion
-Aim King 8 Ball Pool APK is a great tool for 8 ball pool players who want to improve their skills and win more games. It offers extended guidelines, sensitivity and accuracy settings, and other features that can enhance your gameplay. However, it also has some drawbacks, such as losing the challenge, getting addicted, getting banned, and facing ethical issues. Therefore, you should use it wisely and responsibly, and not rely on it too much. Remember, it is just a tool, not a magic wand!
- Summary of the Main Points
-In this article, we have discussed:
-
-- What is Aim King 8 Ball Pool APK and how does it work?
-- Why use Aim King 8 Ball Pool APK and what are its benefits and drawbacks?
-- How to use Aim King 8 Ball Pool APK effectively and safely?
-
- Call to Action for the Readers
-If you are interested in trying out Aim King 8 Ball Pool APK, you can download it from [this link] and follow the instructions we have provided. However, if you are not comfortable with using it or you prefer playing the game naturally, you can skip it and enjoy 8 ball pool without any tools. The choice is yours!
- FAQs
-Here are some frequently asked questions about Aim King 8 Ball Pool APK:
- Is Aim King 8 Ball Pool APK safe to use?
-Aim King 8 Ball Pool APK is safe to use as long as you download it from a trusted source and follow our tips on how to avoid detection and ban by the game developers. However, you should always be careful when using any third-party app that modifies the game, as there is always a possibility of malware or virus infection, data theft, or account suspension.
- Does Aim King 8 Ball Pool APK work on iOS devices?
-No, Aim King 8 Ball Pool APK is only compatible with Android devices. If you have an iOS device, you will not be able to use this tool. However, there might be other similar tools that work on iOS devices, but we cannot guarantee their safety or effectiveness.
- Can I use Aim King 8 Ball Pool APK with other 8 ball pool tools or mods?
-We do not recommend using Aim King 8 Ball Pool APK with other 8 ball pool tools or mods, as they might conflict with each other and cause errors or glitches in the game. Moreover, using multiple tools or mods might increase the chance of getting detected and banned by the game developers. Therefore, you should use Aim King 8 Ball Pool APK alone and disable any other tools or mods that you have installed.
- How can I contact the developer of Aim King 8 Ball Pool APK?
-If you have any questions, feedback, or suggestions about Aim King 8 Ball Pool APK, you can contact the developer through their email address: aimking8ballpool@gmail.com. They will try to respond to your queries as soon as possible.
- Where can I find more information about Aim King 8 Ball Pool APK?
-If you want to learn more about Aim King 8 Ball Pool APK, you can visit their official website: [this link]. There, you can find more details about the app, such as its features, screenshots, videos, reviews, and updates. You can also join their community and interact with other users who use the app.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/backbones/iresnet.py b/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/backbones/iresnet.py
deleted file mode 100644
index c6d3b9c240c24687d432197f976ee01fbf423216..0000000000000000000000000000000000000000
--- a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/backbones/iresnet.py
+++ /dev/null
@@ -1,187 +0,0 @@
-import torch
-from torch import nn
-
-__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100', 'iresnet200']
-
-
-def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
- """3x3 convolution with padding"""
- return nn.Conv2d(in_planes,
- out_planes,
- kernel_size=3,
- stride=stride,
- padding=dilation,
- groups=groups,
- bias=False,
- dilation=dilation)
-
-
-def conv1x1(in_planes, out_planes, stride=1):
- """1x1 convolution"""
- return nn.Conv2d(in_planes,
- out_planes,
- kernel_size=1,
- stride=stride,
- bias=False)
-
-
-class IBasicBlock(nn.Module):
- expansion = 1
- def __init__(self, inplanes, planes, stride=1, downsample=None,
- groups=1, base_width=64, dilation=1):
- super(IBasicBlock, self).__init__()
- if groups != 1 or base_width != 64:
- raise ValueError('BasicBlock only supports groups=1 and base_width=64')
- if dilation > 1:
- raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
- self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,)
- self.conv1 = conv3x3(inplanes, planes)
- self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,)
- self.prelu = nn.PReLU(planes)
- self.conv2 = conv3x3(planes, planes, stride)
- self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,)
- self.downsample = downsample
- self.stride = stride
-
- def forward(self, x):
- identity = x
- out = self.bn1(x)
- out = self.conv1(out)
- out = self.bn2(out)
- out = self.prelu(out)
- out = self.conv2(out)
- out = self.bn3(out)
- if self.downsample is not None:
- identity = self.downsample(x)
- out += identity
- return out
-
-
-class IResNet(nn.Module):
- fc_scale = 7 * 7
- def __init__(self,
- block, layers, dropout=0, num_features=512, zero_init_residual=False,
- groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False):
- super(IResNet, self).__init__()
- self.fp16 = fp16
- self.inplanes = 64
- self.dilation = 1
- if replace_stride_with_dilation is None:
- replace_stride_with_dilation = [False, False, False]
- if len(replace_stride_with_dilation) != 3:
- raise ValueError("replace_stride_with_dilation should be None "
- "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
- self.groups = groups
- self.base_width = width_per_group
- self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
- self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05)
- self.prelu = nn.PReLU(self.inplanes)
- self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
- self.layer2 = self._make_layer(block,
- 128,
- layers[1],
- stride=2,
- dilate=replace_stride_with_dilation[0])
- self.layer3 = self._make_layer(block,
- 256,
- layers[2],
- stride=2,
- dilate=replace_stride_with_dilation[1])
- self.layer4 = self._make_layer(block,
- 512,
- layers[3],
- stride=2,
- dilate=replace_stride_with_dilation[2])
- self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,)
- self.dropout = nn.Dropout(p=dropout, inplace=True)
- self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features)
- self.features = nn.BatchNorm1d(num_features, eps=1e-05)
- nn.init.constant_(self.features.weight, 1.0)
- self.features.weight.requires_grad = False
-
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- nn.init.normal_(m.weight, 0, 0.1)
- elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
- nn.init.constant_(m.weight, 1)
- nn.init.constant_(m.bias, 0)
-
- if zero_init_residual:
- for m in self.modules():
- if isinstance(m, IBasicBlock):
- nn.init.constant_(m.bn2.weight, 0)
-
- def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
- downsample = None
- previous_dilation = self.dilation
- if dilate:
- self.dilation *= stride
- stride = 1
- if stride != 1 or self.inplanes != planes * block.expansion:
- downsample = nn.Sequential(
- conv1x1(self.inplanes, planes * block.expansion, stride),
- nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ),
- )
- layers = []
- layers.append(
- block(self.inplanes, planes, stride, downsample, self.groups,
- self.base_width, previous_dilation))
- self.inplanes = planes * block.expansion
- for _ in range(1, blocks):
- layers.append(
- block(self.inplanes,
- planes,
- groups=self.groups,
- base_width=self.base_width,
- dilation=self.dilation))
-
- return nn.Sequential(*layers)
-
- def forward(self, x):
- with torch.cuda.amp.autocast(self.fp16):
- x = self.conv1(x)
- x = self.bn1(x)
- x = self.prelu(x)
- x = self.layer1(x)
- x = self.layer2(x)
- x = self.layer3(x)
- x = self.layer4(x)
- x = self.bn2(x)
- x = torch.flatten(x, 1)
- x = self.dropout(x)
- x = self.fc(x.float() if self.fp16 else x)
- x = self.features(x)
- return x
-
-
-def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
- model = IResNet(block, layers, **kwargs)
- if pretrained:
- raise ValueError()
- return model
-
-
-def iresnet18(pretrained=False, progress=True, **kwargs):
- return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained,
- progress, **kwargs)
-
-
-def iresnet34(pretrained=False, progress=True, **kwargs):
- return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained,
- progress, **kwargs)
-
-
-def iresnet50(pretrained=False, progress=True, **kwargs):
- return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained,
- progress, **kwargs)
-
-
-def iresnet100(pretrained=False, progress=True, **kwargs):
- return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained,
- progress, **kwargs)
-
-
-def iresnet200(pretrained=False, progress=True, **kwargs):
- return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained,
- progress, **kwargs)
-
diff --git a/spaces/AI-Dashboards/CP.Matplotlib.NetworkX.Streamlit.PyVis.Graphviz/got.py b/spaces/AI-Dashboards/CP.Matplotlib.NetworkX.Streamlit.PyVis.Graphviz/got.py
deleted file mode 100644
index 242c8de34d45366fb52c5ae22f8e18ea0d8cb779..0000000000000000000000000000000000000000
--- a/spaces/AI-Dashboards/CP.Matplotlib.NetworkX.Streamlit.PyVis.Graphviz/got.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import networkx as nx
-import matplotlib.pyplot as plt
-from pyvis.network import Network
-import pandas as pd
-import streamlit as st
-
-
-def got_func(physics):
- got_net = Network(height="600px", width="100%", font_color="black",heading='Game of Thrones Graph')
-
-# set the physics layout of the network
- got_net.barnes_hut()
- got_data = pd.read_csv("stormofswords.csv")
- #got_data = pd.read_csv("stormofswords.csv")
- #got_data.rename(index={0: "Source", 1: "Target", 2: "Weight"})
- sources = got_data['Source']
- targets = got_data['Target']
- weights = got_data['Weight']
-
- edge_data = zip(sources, targets, weights)
-
- for e in edge_data:
- src = e[0]
- dst = e[1]
- w = e[2]
-
- got_net.add_node(src, src, title=src)
- got_net.add_node(dst, dst, title=dst)
- got_net.add_edge(src, dst, value=w)
-
- neighbor_map = got_net.get_adj_list()
-
-# add neighbor data to node hover data
- for node in got_net.nodes:
- node["title"] += " Neighbors:
" + "
".join(neighbor_map[node["id"]])
- node["value"] = len(neighbor_map[node["id"]])
- if physics:
- got_net.show_buttons(filter_=['physics'])
- got_net.show("gameofthrones.html")
-
-
-def simple_func(physics):
- nx_graph = nx.cycle_graph(10)
- nx_graph.nodes[1]['title'] = 'Number 1'
- nx_graph.nodes[1]['group'] = 1
- nx_graph.nodes[3]['title'] = 'I belong to a different group!'
- nx_graph.nodes[3]['group'] = 10
- nx_graph.add_node(20, size=20, title='couple', group=2)
- nx_graph.add_node(21, size=15, title='couple', group=2)
- nx_graph.add_edge(20, 21, weight=5)
- nx_graph.add_node(25, size=25, label='lonely', title='lonely node', group=3)
-
-
- nt = Network("500px", "500px",notebook=True,heading='')
- nt.from_nx(nx_graph)
- #physics=st.sidebar.checkbox('add physics interactivity?')
- if physics:
- nt.show_buttons(filter_=['physics'])
- nt.show('test.html')
-
-
-def karate_func(physics):
- G = nx.karate_club_graph()
-
-
- nt = Network("500px", "500px",notebook=True,heading='Zachary’s Karate Club graph')
- nt.from_nx(G)
- #physics=st.sidebar.checkbox('add physics interactivity?')
- if physics:
- nt.show_buttons(filter_=['physics'])
- nt.show('karate.html')
\ No newline at end of file
diff --git a/spaces/AIWaves/Software_Company/src/agents/Component/PromptComponent.py b/spaces/AIWaves/Software_Company/src/agents/Component/PromptComponent.py
deleted file mode 100644
index dc590d4734e14cad93ab5560cb7b4f08bd45c416..0000000000000000000000000000000000000000
--- a/spaces/AIWaves/Software_Company/src/agents/Component/PromptComponent.py
+++ /dev/null
@@ -1,133 +0,0 @@
-from abc import abstractmethod
-
-
-class PromptComponent:
- def __init__(self):
- pass
-
- @abstractmethod
- def get_prompt(self, agent):
- pass
-
-class TaskComponent(PromptComponent):
- def __init__(self, task):
- super().__init__()
- self.task = task
-
- def get_prompt(self, agent):
- return f"""The task you need to execute is: {self.task}.\n"""
-
-
-class OutputComponent(PromptComponent):
- def __init__(self, output):
- super().__init__()
- self.output = output
-
- def get_prompt(self, agent):
- return f"""Please contact the above to extract <{self.output}> and {self.output}>, \
- do not perform additional output, please output in strict accordance with the above format!\n"""
-
-
-class SystemComponent(PromptComponent):
- def __init__(self,system_prompt):
- super().__init__()
- self.system_prompt = system_prompt
-
- def get_prompt(self, agent):
- return self.system_prompt
-
-class LastComponent(PromptComponent):
- def __init__(self, last_prompt):
- super().__init__()
- self.last_prompt = last_prompt
-
- def get_prompt(self, agent):
- return self.last_prompt
-
-
-class StyleComponent(PromptComponent):
- """
- 角色、风格组件
- """
-
- def __init__(self, role):
- super().__init__()
- self.role = role
-
- def get_prompt(self, agent):
- name = agent.name
- style = agent.style
- return f"""Now your role is:\n{self.role}, your name is:\n{name}. \
- You need to follow the output style:\n.\n"""
-
-
-class RuleComponent(PromptComponent):
- def __init__(self, rule):
- super().__init__()
- self.rule = rule
-
- def get_prompt(self, agent):
- return f"""The rule you need to follow is:\n{self.rule}.\n"""
-
-
-class DemonstrationComponent(PromptComponent):
- """
- input a list,the example of answer.
- """
-
- def __init__(self, demonstrations):
- super().__init__()
- self.demonstrations = demonstrations
-
- def add_demonstration(self, demonstration):
- self.demonstrations.append(demonstration)
-
- def get_prompt(self, agent):
- prompt = "Here are demonstrations you can refer to:\n"
- for demonstration in self.demonstrations:
- prompt += "\n" + demonstration
- prompt += "\n"
- return prompt
-
-
-class CoTComponent(PromptComponent):
- """
- input a list,the example of answer.
- """
-
- def __init__(self, demonstrations):
- super().__init__()
- self.demonstrations = demonstrations
-
- def add_demonstration(self, demonstration):
- self.demonstrations.append(demonstration)
-
- def get_prompt(self, agent):
- prompt = "You need to think in detail before outputting, the thinking case is as follows:\n"
- for demonstration in self.demonstrations:
- prompt += "\n" + demonstration
- prompt += "\n"
- return prompt
-
-
-class CustomizeComponent(PromptComponent):
- """
- Custom template
- template(str) : example: "i am {}"
- keywords(list) : example : ["name"]
- example : agent.environment.shared_memory["name"] = "Lilong"
- the component will get the keyword attribute from the environment, and then add it to the template.
- Return : "i am Lilong"
- """
- def __init__(self, template, keywords) -> None:
- super().__init__()
- self.template = template
- self.keywords = keywords
-
- def get_prompt(self, agent):
- template_keyword = {}
- for keyword in self.keywords:
-
- current_keyword = agent.environment.shared_memory[keyword]
- template_keyword[keyword] = current_keyword
- return self.template.format(**template_keyword)
\ No newline at end of file
diff --git a/spaces/AP123/dreamgaussian/cam_utils.py b/spaces/AP123/dreamgaussian/cam_utils.py
deleted file mode 100644
index 05e730690fcdab48255c73b0f8298ce165149758..0000000000000000000000000000000000000000
--- a/spaces/AP123/dreamgaussian/cam_utils.py
+++ /dev/null
@@ -1,146 +0,0 @@
-import numpy as np
-from scipy.spatial.transform import Rotation as R
-
-import torch
-
-def dot(x, y):
- if isinstance(x, np.ndarray):
- return np.sum(x * y, -1, keepdims=True)
- else:
- return torch.sum(x * y, -1, keepdim=True)
-
-
-def length(x, eps=1e-20):
- if isinstance(x, np.ndarray):
- return np.sqrt(np.maximum(np.sum(x * x, axis=-1, keepdims=True), eps))
- else:
- return torch.sqrt(torch.clamp(dot(x, x), min=eps))
-
-
-def safe_normalize(x, eps=1e-20):
- return x / length(x, eps)
-
-
-def look_at(campos, target, opengl=True):
- # campos: [N, 3], camera/eye position
- # target: [N, 3], object to look at
- # return: [N, 3, 3], rotation matrix
- if not opengl:
- # camera forward aligns with -z
- forward_vector = safe_normalize(target - campos)
- up_vector = np.array([0, 1, 0], dtype=np.float32)
- right_vector = safe_normalize(np.cross(forward_vector, up_vector))
- up_vector = safe_normalize(np.cross(right_vector, forward_vector))
- else:
- # camera forward aligns with +z
- forward_vector = safe_normalize(campos - target)
- up_vector = np.array([0, 1, 0], dtype=np.float32)
- right_vector = safe_normalize(np.cross(up_vector, forward_vector))
- up_vector = safe_normalize(np.cross(forward_vector, right_vector))
- R = np.stack([right_vector, up_vector, forward_vector], axis=1)
- return R
-
-
-# elevation & azimuth to pose (cam2world) matrix
-def orbit_camera(elevation, azimuth, radius=1, is_degree=True, target=None, opengl=True):
- # radius: scalar
- # elevation: scalar, in (-90, 90), from +y to -y is (-90, 90)
- # azimuth: scalar, in (-180, 180), from +z to +x is (0, 90)
- # return: [4, 4], camera pose matrix
- if is_degree:
- elevation = np.deg2rad(elevation)
- azimuth = np.deg2rad(azimuth)
- x = radius * np.cos(elevation) * np.sin(azimuth)
- y = - radius * np.sin(elevation)
- z = radius * np.cos(elevation) * np.cos(azimuth)
- if target is None:
- target = np.zeros([3], dtype=np.float32)
- campos = np.array([x, y, z]) + target # [3]
- T = np.eye(4, dtype=np.float32)
- T[:3, :3] = look_at(campos, target, opengl)
- T[:3, 3] = campos
- return T
-
-
-class OrbitCamera:
- def __init__(self, W, H, r=2, fovy=60, near=0.01, far=100):
- self.W = W
- self.H = H
- self.radius = r # camera distance from center
- self.fovy = np.deg2rad(fovy) # deg 2 rad
- self.near = near
- self.far = far
- self.center = np.array([0, 0, 0], dtype=np.float32) # look at this point
- self.rot = R.from_matrix(np.eye(3))
- self.up = np.array([0, 1, 0], dtype=np.float32) # need to be normalized!
-
- @property
- def fovx(self):
- return 2 * np.arctan(np.tan(self.fovy / 2) * self.W / self.H)
-
- @property
- def campos(self):
- return self.pose[:3, 3]
-
- # pose (c2w)
- @property
- def pose(self):
- # first move camera to radius
- res = np.eye(4, dtype=np.float32)
- res[2, 3] = self.radius # opengl convention...
- # rotate
- rot = np.eye(4, dtype=np.float32)
- rot[:3, :3] = self.rot.as_matrix()
- res = rot @ res
- # translate
- res[:3, 3] -= self.center
- return res
-
- # view (w2c)
- @property
- def view(self):
- return np.linalg.inv(self.pose)
-
- # projection (perspective)
- @property
- def perspective(self):
- y = np.tan(self.fovy / 2)
- aspect = self.W / self.H
- return np.array(
- [
- [1 / (y * aspect), 0, 0, 0],
- [0, -1 / y, 0, 0],
- [
- 0,
- 0,
- -(self.far + self.near) / (self.far - self.near),
- -(2 * self.far * self.near) / (self.far - self.near),
- ],
- [0, 0, -1, 0],
- ],
- dtype=np.float32,
- )
-
- # intrinsics
- @property
- def intrinsics(self):
- focal = self.H / (2 * np.tan(self.fovy / 2))
- return np.array([focal, focal, self.W // 2, self.H // 2], dtype=np.float32)
-
- @property
- def mvp(self):
- return self.perspective @ np.linalg.inv(self.pose) # [4, 4]
-
- def orbit(self, dx, dy):
- # rotate along camera up/side axis!
- side = self.rot.as_matrix()[:3, 0]
- rotvec_x = self.up * np.radians(-0.05 * dx)
- rotvec_y = side * np.radians(-0.05 * dy)
- self.rot = R.from_rotvec(rotvec_x) * R.from_rotvec(rotvec_y) * self.rot
-
- def scale(self, delta):
- self.radius *= 1.1 ** (-delta)
-
- def pan(self, dx, dy, dz=0):
- # pan in camera coordinate system (careful on the sensitivity!)
- self.center += 0.0005 * self.rot.as_matrix()[:3, :3] @ np.array([-dx, -dy, dz])
\ No newline at end of file
diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb32-fp16-dynamic_in1k.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb32-fp16-dynamic_in1k.py
deleted file mode 100644
index 58f6fe4cf25e8f0b3d321a7aab4b746552aa4163..0000000000000000000000000000000000000000
--- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb32-fp16-dynamic_in1k.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = ['./resnet50_8xb32_in1k.py']
-
-# schedule settings
-optim_wrapper = dict(type='AmpOptimWrapper', loss_scale='dynamic')
diff --git a/spaces/Abhi5ingh/fashionsd/app.py b/spaces/Abhi5ingh/fashionsd/app.py
deleted file mode 100644
index f8308709c45a14de3dea96f5bfc4d87bac31c940..0000000000000000000000000000000000000000
--- a/spaces/Abhi5ingh/fashionsd/app.py
+++ /dev/null
@@ -1,168 +0,0 @@
-from typing import Optional
-
-import numpy as np
-import cv2
-import streamlit as st
-from PIL import Image
-import os
-import tempfile
-
-from sdfile import PIPELINES, generate
-
-DEFAULT_PROMPT = "belted shirt black belted portrait-collar wrap blouse with black prints"
-DEAFULT_WIDTH, DEFAULT_HEIGHT = 512,512
-OUTPUT_IMAGE_KEY = "output_img"
-LOADED_IMAGE_KEY = "loaded_img"
-
-def get_image(key: str) -> Optional[Image.Image]:
- if key in st.session_state:
- return st.session_state[key]
- return None
-
-def set_image(key:str, img: Image.Image):
- st.session_state[key] = img
-
-def prompt_and_generate_button(prefix, pipeline_name: PIPELINES, **kwargs):
- prompt = st.text_area(
- "Prompt",
- value = DEFAULT_PROMPT,
- key = f"{prefix}-prompt"
- )
- negative_prompt = st.text_area(
- "Negative prompt",
- value = "",
- key =f"{prefix}-negative_prompt",
- )
- col1,col2 =st.columns(2)
- with col1:
- steps = st.slider(
- "Number of inference steps",
- min_value=1,
- max_value=200,
- value=30,
- key=f"{prefix}-inference-steps",
- )
- with col2:
- guidance_scale = st.slider(
- "Guidance scale",
- min_value=0.0,
- max_value=20.0,
- value= 7.5,
- step = 0.5,
- key=f"{prefix}-guidance-scale",
- )
- enable_cpu_offload = st.checkbox(
- "Enable CPU offload if you run out of memory",
- key =f"{prefix}-cpu-offload",
- value= False,
- )
-
- if st.button("Generate Image", key = f"{prefix}-btn"):
- with st.spinner("Generating image ..."):
- image = generate(
- prompt,
- pipeline_name,
- negative_prompt=negative_prompt,
- num_inference_steps=steps,
- guidance_scale=guidance_scale,
- enable_cpu_offload=enable_cpu_offload,
- **kwargs,
- )
- set_image(OUTPUT_IMAGE_KEY,image.copy())
- st.image(image)
-def width_and_height_sliders(prefix):
- col1, col2 = st.columns(2)
- with col1:
- width = st.slider(
- "Width",
- min_value=64,
- max_value=1600,
- step=16,
- value=512,
- key=f"{prefix}-width",
- )
- with col2:
- height = st.slider(
- "Height",
- min_value=64,
- max_value=1600,
- step=16,
- value=512,
- key=f"{prefix}-height",
- )
- return width, height
-
-def image_uploader(prefix):
- image = st.file_uploader("Image", ["jpg", "png"], key=f"{prefix}-uploader")
- if image:
- image = Image.open(image)
- print(f"loaded input image of size ({image.width}, {image.height})")
- return image
-
- return get_image(LOADED_IMAGE_KEY)
-
-def sketching():
- image = image_uploader("sketch2img")
-
- if not image:
- return None,None
-
- with tempfile.TemporaryDirectory() as temp_dir:
- temp_image_path = os.path.join(temp_dir, "uploaded_image.jpg")
- image.save(temp_image_path)
-
- image = cv2.imread(temp_image_path)
- image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
- image_blur = cv2.GaussianBlur(image,(5,5),0)
- sketch = cv2.adaptiveThreshold(image_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
- sketch_pil = Image.fromarray(sketch)
- return sketch_pil
-
-def txt2img_tab():
- prefix = "txt2img"
- width, height = width_and_height_sliders(prefix)
- prompt_and_generate_button(prefix,"txt2img",width=width,height=height)
-
-def sketching_tab():
- prefix = "sketch2img"
- col1,col2 = st.columns(2)
- with col1:
- image = sketching()
- with col2:
- if image:
- controlnet_conditioning_scale = st.slider(
- "Strength or dependence on the input sketch",
- min_value=0.0,
- max_value= 1.0,
- value = 0.5,
- step = 0.05,
- key=f"{prefix}-controlnet_conditioning_scale",
- )
- prompt_and_generate_button(
- prefix,
- "sketch2img",
- image=image,
- controlnet_conditioning_scale=controlnet_conditioning_scale,
- )
-
-def main():
- st.set_page_config(layout="wide")
- st.title("Fashion-SDX: Playground")
-
- tab1,tab2 = st.tabs(
- ["Text to image", "Sketch to image"]
- )
- with tab1:
- txt2img_tab()
- with tab2:
- sketching_tab()
-
- with st.sidebar:
- st.header("Most Recent Output Image")
- output_image = get_image((OUTPUT_IMAGE_KEY))
- if output_image:
- st.image(output_image)
- else:
- st.markdown("no output generated yet")
-if __name__ =="__main__":
- main()
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/hsladjustpipeline.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/hsladjustpipeline.js
deleted file mode 100644
index ff1f2f4cd96863e0c3624659c017a2931a50d48b..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/hsladjustpipeline.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import HslAdjustPostFxPipeline from './shaders/hsladjust/HslAdjustPostFxPipeline.js';
-export default HslAdjustPostFxPipeline;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/IsInTouching.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/IsInTouching.js
deleted file mode 100644
index 53f39a744ab128e3a2d2aa0c0a293351b471f9ca..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/IsInTouching.js
+++ /dev/null
@@ -1,19 +0,0 @@
-import IsPointerInBounds from '../../../plugins/utils/input/IsPointerInBounds.js';
-import IsGameObject from '../../../plugins/utils/system/IsGameObject.js';
-
-var IsInTouching = function (pointer, gameObject) {
- if (IsGameObject(pointer) || (typeof (pointer) === 'string')) {
- gameObject = pointer;
- pointer = undefined;
- }
-
- if (gameObject === undefined) {
- gameObject = this;
- } else if (typeof (gameObject) === 'string') {
- gameObject = this.getElement(gameObject);
- }
-
- return IsPointerInBounds(gameObject, pointer);
-}
-
-export default IsInTouching;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/InjectProperties.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/InjectProperties.js
deleted file mode 100644
index c1080c1089521c3cc5ad9dfb73bfe661a0567c06..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/InjectProperties.js
+++ /dev/null
@@ -1,32 +0,0 @@
-var InjectProperties = function (table) {
- Object.defineProperty(table, 'childOY', {
- configurable: true,
- get: function () {
- return table.tableOY;
- },
- set: function (value) {
- table.tableOY = value;
- }
- });
- Object.defineProperty(table, 'topChildOY', {
- get: function () {
- return table.topTableOY;
- }
- });
- Object.defineProperty(table, 'bottomChildOY', {
- get: function () {
- return table.bottomTableOY;
- }
- });
- Object.defineProperty(table, 'childVisibleHeight', {
- get: function () {
- return table.instHeight;
- }
- });
- Object.defineProperty(table, 'childHeight', {
- get: function () {
- return table.tableHeight;
- }
- });
-};
-export default InjectProperties;
\ No newline at end of file
diff --git a/spaces/AlexWelcing/MusicLM/__init__.py b/spaces/AlexWelcing/MusicLM/__init__.py
deleted file mode 100644
index d9887dd3cb19259dc28969383604022640eb37db..0000000000000000000000000000000000000000
--- a/spaces/AlexWelcing/MusicLM/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from musiclm_pytorch.musiclm_pytorch import MuLaN, MuLaNEmbedQuantizer, MusicLM
-
-from musiclm_pytorch.musiclm_pytorch import AudioSpectrogramTransformer, TextTransformer
\ No newline at end of file
diff --git a/spaces/Ali-Omrani/CCR/app.py b/spaces/Ali-Omrani/CCR/app.py
deleted file mode 100644
index c537a248fdb07bb61ec0e87a08f3210be3909716..0000000000000000000000000000000000000000
--- a/spaces/Ali-Omrani/CCR/app.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import pickle
-import os
-import gradio as gr
-import gradio as gr
-import pandas as pd
-from sentence_transformers import SentenceTransformer, util
-
-def encode_column(model, filename, col_name):
- df = pd.read_csv(filename)
- df["embedding"] = list(model.encode(df[col_name]))
- return df
-
-def item_level_ccr(data_encoded_df, questionnaire_encoded_df):
- q_embeddings = questionnaire_encoded_df.embedding
- d_embeddings = data_encoded_df.embedding
- similarities = util.pytorch_cos_sim(d_embeddings, q_embeddings)
- for i in range(1,len(questionnaire_encoded_df)+1):
- data_encoded_df["sim_item_{}".format(i)] = similarities[:, i-1]
- return data_encoded_df
-
-# encoding questionnaire
-def ccr_wrapper(data_file, data_col, q_file, q_col, model='all-MiniLM-L6-v2'):
- """
- Returns a Dataframe that is the content of data_file with one additional column for CCR value per question
-
- Parameters:
- data_file (str): path to the file containing user text
- data_col (str): column that includes user text
- q_file (str): path to the file containing questionnaires
- q_col (str): column that includes questions
- model (str): name of the SBERT model to use for CCR see https://www.sbert.net/docs/pretrained_models.html for full list
-
- """
- try:
- model = SentenceTransformer(model)
- except:
- print("model name was not included, using all-MiniLM-L6-v2")
- model = SentenceTransformer('all-MiniLM-L6-v2')
-
- questionnaire_filename = q_file.name
- data_filename = data_file.name
-
- q_encoded_df = encode_column(model, questionnaire_filename, q_col)
- data_encoded_df = encode_column(model, data_filename, data_col)
- ccr_df = item_level_ccr(data_encoded_df, q_encoded_df)
-
-
- ccr_df.to_csv("ccr_results.csv")
- return "ccr_results.csv"
-
-
-
-def read_dataframe(data_file, data_col, q_file, q_col):
-
- # df = pd.read_csv(data_file.name)
- return data_file.name
-
-
-
-def single_text_ccr(text, question):
- model = SentenceTransformer('all-MiniLM-L6-v2')
- text_embedding = model.encode(text)
- question_embedding = model.encode(question)
- return round(util.pytorch_cos_sim(text_embedding, question_embedding).item(),3)
-
-
-
-
-
-with gr.Blocks() as demo:
- # gr.Markdown('This is the first page for CCR, info goes here!')
- gr.Markdown("""Contextual Construct Representations
- Ali Omrani and Mohammad Atari
""")
-
- gr.Markdown("""
Play around with your items!
""")
-
- with gr.Row():
- user_txt = gr.Textbox(label="Input Text", placeholder="Enter your desired text here ...")
- question = gr.Textbox(label="Question", placeholder="Enter the question text here ...")
-
- submit2 = gr.Button("Get CCR for this Text!")
-
- submit2.click(single_text_ccr, inputs=[user_txt, question], outputs=gr.Textbox(label="CCR Value"))
-
- gr.Markdown("""
Or process a whole file!
""")
-
- with gr.Row():
- model_name = gr.Dropdown(label="Choose the Model",
- choices=["all-mpnet-base-v2","multi-qa-mpnet-base-dot-v1", "distiluse-base-multilingual-cased-v2",
- "distiluse-base-multilingual-cased-v1", "paraphrase-MiniLM-L3-v2", "paraphrase-multilingual-MiniLM-L12-v2",
- "paraphrase-albert-small-v2", "paraphrase-multilingual-mpnet-base-v2", "multi-qa-MiniLM-L6-cos-v1",
- "all-MiniLM-L6-v2", "multi-qa-distilbert-cos-v1", "all-MiniLM-L12-v2", "all-distilroberta-v1"])
- with gr.Row():
- with gr.Column():
- user_data = gr.File(label="Participant Data File")
- text_col = gr.Textbox(label="Text Column", placeholder="text column ... ")
- with gr.Column():
- questionnaire_data = gr.File(label="Questionnaire File")
- q_col = gr.Textbox(label="Question Column", placeholder="questionnaire column ... ")
-
- submit = gr.Button("Get CCR!")
-
- outputs=gr.File()
- submit.click(ccr_wrapper, inputs=[user_data, text_col,questionnaire_data,q_col, model_name], outputs=[outputs])
-demo.launch()
\ No newline at end of file
diff --git a/spaces/Alican/pixera/models/__init__.py b/spaces/Alican/pixera/models/__init__.py
deleted file mode 100644
index fc01113da66ff042bd1807b5bfdb70c4bce8d14c..0000000000000000000000000000000000000000
--- a/spaces/Alican/pixera/models/__init__.py
+++ /dev/null
@@ -1,67 +0,0 @@
-"""This package contains modules related to objective functions, optimizations, and network architectures.
-
-To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel.
-You need to implement the following five functions:
- -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
- -- : unpack data from dataset and apply preprocessing.
- -- : produce intermediate results.
- -- : calculate loss, gradients, and update network weights.
- -- : (optionally) add model-specific options and set default options.
-
-In the function <__init__>, you need to define four lists:
- -- self.loss_names (str list): specify the training losses that you want to plot and save.
- -- self.model_names (str list): define networks used in our training.
- -- self.visual_names (str list): specify the images that you want to display and save.
- -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage.
-
-Now you can use the model class by specifying flag '--model dummy'.
-See our template model class 'template_model.py' for more details.
-"""
-
-import importlib
-from models.base_model import BaseModel
-
-
-def find_model_using_name(model_name):
- """Import the module "models/[model_name]_model.py".
-
- In the file, the class called DatasetNameModel() will
- be instantiated. It has to be a subclass of BaseModel,
- and it is case-insensitive.
- """
- model_filename = "models." + model_name + "_model"
- modellib = importlib.import_module(model_filename)
- model = None
- target_model_name = model_name.replace('_', '') + 'model'
- for name, cls in modellib.__dict__.items():
- if name.lower() == target_model_name.lower() \
- and issubclass(cls, BaseModel):
- model = cls
-
- if model is None:
- print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
- exit(0)
-
- return model
-
-
-def get_option_setter(model_name):
- """Return the static method of the model class."""
- model_class = find_model_using_name(model_name)
- return model_class.modify_commandline_options
-
-
-def create_model(opt):
- """Create a model given the option.
-
- This function warps the class CustomDatasetDataLoader.
- This is the main interface between this package and 'train.py'/'test.py'
-
- Example:
- >>> from models import create_model
- >>> model = create_model(opt)
- """
- model = find_model_using_name(opt.model)
- instance = model(opt)
- print("model [%s] was created" % type(instance).__name__)
- return instance
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/prior_transformer.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/prior_transformer.py
deleted file mode 100644
index 9f3c61dd7561742114947e3419c19fec8c2a824f..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/prior_transformer.py
+++ /dev/null
@@ -1,364 +0,0 @@
-from dataclasses import dataclass
-from typing import Dict, Optional, Union
-
-import torch
-import torch.nn.functional as F
-from torch import nn
-
-from ..configuration_utils import ConfigMixin, register_to_config
-from ..utils import BaseOutput
-from .attention import BasicTransformerBlock
-from .attention_processor import AttentionProcessor, AttnProcessor
-from .embeddings import TimestepEmbedding, Timesteps
-from .modeling_utils import ModelMixin
-
-
-@dataclass
-class PriorTransformerOutput(BaseOutput):
- """
- The output of [`PriorTransformer`].
-
- Args:
- predicted_image_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`):
- The predicted CLIP image embedding conditioned on the CLIP text embedding input.
- """
-
- predicted_image_embedding: torch.FloatTensor
-
-
-class PriorTransformer(ModelMixin, ConfigMixin):
- """
- A Prior Transformer model.
-
- Parameters:
- num_attention_heads (`int`, *optional*, defaults to 32): The number of heads to use for multi-head attention.
- attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head.
- num_layers (`int`, *optional*, defaults to 20): The number of layers of Transformer blocks to use.
- embedding_dim (`int`, *optional*, defaults to 768): The dimension of the model input `hidden_states`
- num_embeddings (`int`, *optional*, defaults to 77):
- The number of embeddings of the model input `hidden_states`
- additional_embeddings (`int`, *optional*, defaults to 4): The number of additional tokens appended to the
- projected `hidden_states`. The actual length of the used `hidden_states` is `num_embeddings +
- additional_embeddings`.
- dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
- time_embed_act_fn (`str`, *optional*, defaults to 'silu'):
- The activation function to use to create timestep embeddings.
- norm_in_type (`str`, *optional*, defaults to None): The normalization layer to apply on hidden states before
- passing to Transformer blocks. Set it to `None` if normalization is not needed.
- embedding_proj_norm_type (`str`, *optional*, defaults to None):
- The normalization layer to apply on the input `proj_embedding`. Set it to `None` if normalization is not
- needed.
- encoder_hid_proj_type (`str`, *optional*, defaults to `linear`):
- The projection layer to apply on the input `encoder_hidden_states`. Set it to `None` if
- `encoder_hidden_states` is `None`.
- added_emb_type (`str`, *optional*, defaults to `prd`): Additional embeddings to condition the model.
- Choose from `prd` or `None`. if choose `prd`, it will prepend a token indicating the (quantized) dot
- product between the text embedding and image embedding as proposed in the unclip paper
- https://arxiv.org/abs/2204.06125 If it is `None`, no additional embeddings will be prepended.
- time_embed_dim (`int, *optional*, defaults to None): The dimension of timestep embeddings.
- If None, will be set to `num_attention_heads * attention_head_dim`
- embedding_proj_dim (`int`, *optional*, default to None):
- The dimension of `proj_embedding`. If None, will be set to `embedding_dim`.
- clip_embed_dim (`int`, *optional*, default to None):
- The dimension of the output. If None, will be set to `embedding_dim`.
- """
-
- @register_to_config
- def __init__(
- self,
- num_attention_heads: int = 32,
- attention_head_dim: int = 64,
- num_layers: int = 20,
- embedding_dim: int = 768,
- num_embeddings=77,
- additional_embeddings=4,
- dropout: float = 0.0,
- time_embed_act_fn: str = "silu",
- norm_in_type: Optional[str] = None, # layer
- embedding_proj_norm_type: Optional[str] = None, # layer
- encoder_hid_proj_type: Optional[str] = "linear", # linear
- added_emb_type: Optional[str] = "prd", # prd
- time_embed_dim: Optional[int] = None,
- embedding_proj_dim: Optional[int] = None,
- clip_embed_dim: Optional[int] = None,
- ):
- super().__init__()
- self.num_attention_heads = num_attention_heads
- self.attention_head_dim = attention_head_dim
- inner_dim = num_attention_heads * attention_head_dim
- self.additional_embeddings = additional_embeddings
-
- time_embed_dim = time_embed_dim or inner_dim
- embedding_proj_dim = embedding_proj_dim or embedding_dim
- clip_embed_dim = clip_embed_dim or embedding_dim
-
- self.time_proj = Timesteps(inner_dim, True, 0)
- self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, out_dim=inner_dim, act_fn=time_embed_act_fn)
-
- self.proj_in = nn.Linear(embedding_dim, inner_dim)
-
- if embedding_proj_norm_type is None:
- self.embedding_proj_norm = None
- elif embedding_proj_norm_type == "layer":
- self.embedding_proj_norm = nn.LayerNorm(embedding_proj_dim)
- else:
- raise ValueError(f"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}")
-
- self.embedding_proj = nn.Linear(embedding_proj_dim, inner_dim)
-
- if encoder_hid_proj_type is None:
- self.encoder_hidden_states_proj = None
- elif encoder_hid_proj_type == "linear":
- self.encoder_hidden_states_proj = nn.Linear(embedding_dim, inner_dim)
- else:
- raise ValueError(f"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}")
-
- self.positional_embedding = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, inner_dim))
-
- if added_emb_type == "prd":
- self.prd_embedding = nn.Parameter(torch.zeros(1, 1, inner_dim))
- elif added_emb_type is None:
- self.prd_embedding = None
- else:
- raise ValueError(
- f"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`."
- )
-
- self.transformer_blocks = nn.ModuleList(
- [
- BasicTransformerBlock(
- inner_dim,
- num_attention_heads,
- attention_head_dim,
- dropout=dropout,
- activation_fn="gelu",
- attention_bias=True,
- )
- for d in range(num_layers)
- ]
- )
-
- if norm_in_type == "layer":
- self.norm_in = nn.LayerNorm(inner_dim)
- elif norm_in_type is None:
- self.norm_in = None
- else:
- raise ValueError(f"Unsupported norm_in_type: {norm_in_type}.")
-
- self.norm_out = nn.LayerNorm(inner_dim)
-
- self.proj_to_clip_embeddings = nn.Linear(inner_dim, clip_embed_dim)
-
- causal_attention_mask = torch.full(
- [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -10000.0
- )
- causal_attention_mask.triu_(1)
- causal_attention_mask = causal_attention_mask[None, ...]
- self.register_buffer("causal_attention_mask", causal_attention_mask, persistent=False)
-
- self.clip_mean = nn.Parameter(torch.zeros(1, clip_embed_dim))
- self.clip_std = nn.Parameter(torch.zeros(1, clip_embed_dim))
-
- @property
- # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
- def attn_processors(self) -> Dict[str, AttentionProcessor]:
- r"""
- Returns:
- `dict` of attention processors: A dictionary containing all attention processors used in the model with
- indexed by its weight name.
- """
- # set recursively
- processors = {}
-
- def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
- if hasattr(module, "set_processor"):
- processors[f"{name}.processor"] = module.processor
-
- for sub_name, child in module.named_children():
- fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
-
- return processors
-
- for name, module in self.named_children():
- fn_recursive_add_processors(name, module, processors)
-
- return processors
-
- # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
- def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
- r"""
- Sets the attention processor to use to compute attention.
-
- Parameters:
- processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
- The instantiated processor class or a dictionary of processor classes that will be set as the processor
- for **all** `Attention` layers.
-
- If `processor` is a dict, the key needs to define the path to the corresponding cross attention
- processor. This is strongly recommended when setting trainable attention processors.
-
- """
- count = len(self.attn_processors.keys())
-
- if isinstance(processor, dict) and len(processor) != count:
- raise ValueError(
- f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
- f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
- )
-
- def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
- if hasattr(module, "set_processor"):
- if not isinstance(processor, dict):
- module.set_processor(processor)
- else:
- module.set_processor(processor.pop(f"{name}.processor"))
-
- for sub_name, child in module.named_children():
- fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
-
- for name, module in self.named_children():
- fn_recursive_attn_processor(name, module, processor)
-
- # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
- def set_default_attn_processor(self):
- """
- Disables custom attention processors and sets the default attention implementation.
- """
- self.set_attn_processor(AttnProcessor())
-
- def forward(
- self,
- hidden_states,
- timestep: Union[torch.Tensor, float, int],
- proj_embedding: torch.FloatTensor,
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
- attention_mask: Optional[torch.BoolTensor] = None,
- return_dict: bool = True,
- ):
- """
- The [`PriorTransformer`] forward method.
-
- Args:
- hidden_states (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`):
- The currently predicted image embeddings.
- timestep (`torch.LongTensor`):
- Current denoising step.
- proj_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`):
- Projected embedding vector the denoising process is conditioned on.
- encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_embeddings, embedding_dim)`):
- Hidden states of the text embeddings the denoising process is conditioned on.
- attention_mask (`torch.BoolTensor` of shape `(batch_size, num_embeddings)`):
- Text mask for the text embeddings.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~models.prior_transformer.PriorTransformerOutput`] instead of a plain
- tuple.
-
- Returns:
- [`~models.prior_transformer.PriorTransformerOutput`] or `tuple`:
- If return_dict is True, a [`~models.prior_transformer.PriorTransformerOutput`] is returned, otherwise a
- tuple is returned where the first element is the sample tensor.
- """
- batch_size = hidden_states.shape[0]
-
- timesteps = timestep
- if not torch.is_tensor(timesteps):
- timesteps = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device)
- elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:
- timesteps = timesteps[None].to(hidden_states.device)
-
- # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
- timesteps = timesteps * torch.ones(batch_size, dtype=timesteps.dtype, device=timesteps.device)
-
- timesteps_projected = self.time_proj(timesteps)
-
- # timesteps does not contain any weights and will always return f32 tensors
- # but time_embedding might be fp16, so we need to cast here.
- timesteps_projected = timesteps_projected.to(dtype=self.dtype)
- time_embeddings = self.time_embedding(timesteps_projected)
-
- if self.embedding_proj_norm is not None:
- proj_embedding = self.embedding_proj_norm(proj_embedding)
-
- proj_embeddings = self.embedding_proj(proj_embedding)
- if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
- encoder_hidden_states = self.encoder_hidden_states_proj(encoder_hidden_states)
- elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
- raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set")
-
- hidden_states = self.proj_in(hidden_states)
-
- positional_embeddings = self.positional_embedding.to(hidden_states.dtype)
-
- additional_embeds = []
- additional_embeddings_len = 0
-
- if encoder_hidden_states is not None:
- additional_embeds.append(encoder_hidden_states)
- additional_embeddings_len += encoder_hidden_states.shape[1]
-
- if len(proj_embeddings.shape) == 2:
- proj_embeddings = proj_embeddings[:, None, :]
-
- if len(hidden_states.shape) == 2:
- hidden_states = hidden_states[:, None, :]
-
- additional_embeds = additional_embeds + [
- proj_embeddings,
- time_embeddings[:, None, :],
- hidden_states,
- ]
-
- if self.prd_embedding is not None:
- prd_embedding = self.prd_embedding.to(hidden_states.dtype).expand(batch_size, -1, -1)
- additional_embeds.append(prd_embedding)
-
- hidden_states = torch.cat(
- additional_embeds,
- dim=1,
- )
-
- # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
- additional_embeddings_len = additional_embeddings_len + proj_embeddings.shape[1] + 1
- if positional_embeddings.shape[1] < hidden_states.shape[1]:
- positional_embeddings = F.pad(
- positional_embeddings,
- (
- 0,
- 0,
- additional_embeddings_len,
- self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
- ),
- value=0.0,
- )
-
- hidden_states = hidden_states + positional_embeddings
-
- if attention_mask is not None:
- attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
- attention_mask = F.pad(attention_mask, (0, self.additional_embeddings), value=0.0)
- attention_mask = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
- attention_mask = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0)
-
- if self.norm_in is not None:
- hidden_states = self.norm_in(hidden_states)
-
- for block in self.transformer_blocks:
- hidden_states = block(hidden_states, attention_mask=attention_mask)
-
- hidden_states = self.norm_out(hidden_states)
-
- if self.prd_embedding is not None:
- hidden_states = hidden_states[:, -1]
- else:
- hidden_states = hidden_states[:, additional_embeddings_len:]
-
- predicted_image_embedding = self.proj_to_clip_embeddings(hidden_states)
-
- if not return_dict:
- return (predicted_image_embedding,)
-
- return PriorTransformerOutput(predicted_image_embedding=predicted_image_embedding)
-
- def post_process_latents(self, prior_latents):
- prior_latents = (prior_latents * self.clip_std) + self.clip_mean
- return prior_latents
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_ddim.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_ddim.py
deleted file mode 100644
index 156b02b2208e253ad51921eabb244af1adb2da61..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_ddim.py
+++ /dev/null
@@ -1,148 +0,0 @@
-import torch
-
-from diffusers import DDIMScheduler
-
-from .test_schedulers import SchedulerCommonTest
-
-
-class DDIMSchedulerTest(SchedulerCommonTest):
- scheduler_classes = (DDIMScheduler,)
- forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50))
-
- def get_scheduler_config(self, **kwargs):
- config = {
- "num_train_timesteps": 1000,
- "beta_start": 0.0001,
- "beta_end": 0.02,
- "beta_schedule": "linear",
- "clip_sample": True,
- }
-
- config.update(**kwargs)
- return config
-
- def full_loop(self, **config):
- scheduler_class = self.scheduler_classes[0]
- scheduler_config = self.get_scheduler_config(**config)
- scheduler = scheduler_class(**scheduler_config)
-
- num_inference_steps, eta = 10, 0.0
-
- model = self.dummy_model()
- sample = self.dummy_sample_deter
-
- scheduler.set_timesteps(num_inference_steps)
-
- for t in scheduler.timesteps:
- residual = model(sample, t)
- sample = scheduler.step(residual, t, sample, eta).prev_sample
-
- return sample
-
- def test_timesteps(self):
- for timesteps in [100, 500, 1000]:
- self.check_over_configs(num_train_timesteps=timesteps)
-
- def test_steps_offset(self):
- for steps_offset in [0, 1]:
- self.check_over_configs(steps_offset=steps_offset)
-
- scheduler_class = self.scheduler_classes[0]
- scheduler_config = self.get_scheduler_config(steps_offset=1)
- scheduler = scheduler_class(**scheduler_config)
- scheduler.set_timesteps(5)
- assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1]))
-
- def test_betas(self):
- for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
- self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
-
- def test_schedules(self):
- for schedule in ["linear", "squaredcos_cap_v2"]:
- self.check_over_configs(beta_schedule=schedule)
-
- def test_prediction_type(self):
- for prediction_type in ["epsilon", "v_prediction"]:
- self.check_over_configs(prediction_type=prediction_type)
-
- def test_clip_sample(self):
- for clip_sample in [True, False]:
- self.check_over_configs(clip_sample=clip_sample)
-
- def test_timestep_spacing(self):
- for timestep_spacing in ["trailing", "leading"]:
- self.check_over_configs(timestep_spacing=timestep_spacing)
-
- def test_rescale_betas_zero_snr(self):
- for rescale_betas_zero_snr in [True, False]:
- self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr)
-
- def test_thresholding(self):
- self.check_over_configs(thresholding=False)
- for threshold in [0.5, 1.0, 2.0]:
- for prediction_type in ["epsilon", "v_prediction"]:
- self.check_over_configs(
- thresholding=True,
- prediction_type=prediction_type,
- sample_max_value=threshold,
- )
-
- def test_time_indices(self):
- for t in [1, 10, 49]:
- self.check_over_forward(time_step=t)
-
- def test_inference_steps(self):
- for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]):
- self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
-
- def test_eta(self):
- for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0]):
- self.check_over_forward(time_step=t, eta=eta)
-
- def test_variance(self):
- scheduler_class = self.scheduler_classes[0]
- scheduler_config = self.get_scheduler_config()
- scheduler = scheduler_class(**scheduler_config)
-
- assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5
- assert torch.sum(torch.abs(scheduler._get_variance(420, 400) - 0.14771)) < 1e-5
- assert torch.sum(torch.abs(scheduler._get_variance(980, 960) - 0.32460)) < 1e-5
- assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5
- assert torch.sum(torch.abs(scheduler._get_variance(487, 486) - 0.00979)) < 1e-5
- assert torch.sum(torch.abs(scheduler._get_variance(999, 998) - 0.02)) < 1e-5
-
- def test_full_loop_no_noise(self):
- sample = self.full_loop()
-
- result_sum = torch.sum(torch.abs(sample))
- result_mean = torch.mean(torch.abs(sample))
-
- assert abs(result_sum.item() - 172.0067) < 1e-2
- assert abs(result_mean.item() - 0.223967) < 1e-3
-
- def test_full_loop_with_v_prediction(self):
- sample = self.full_loop(prediction_type="v_prediction")
-
- result_sum = torch.sum(torch.abs(sample))
- result_mean = torch.mean(torch.abs(sample))
-
- assert abs(result_sum.item() - 52.5302) < 1e-2
- assert abs(result_mean.item() - 0.0684) < 1e-3
-
- def test_full_loop_with_set_alpha_to_one(self):
- # We specify different beta, so that the first alpha is 0.99
- sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
- result_sum = torch.sum(torch.abs(sample))
- result_mean = torch.mean(torch.abs(sample))
-
- assert abs(result_sum.item() - 149.8295) < 1e-2
- assert abs(result_mean.item() - 0.1951) < 1e-3
-
- def test_full_loop_with_no_set_alpha_to_one(self):
- # We specify different beta, so that the first alpha is 0.99
- sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
- result_sum = torch.sum(torch.abs(sample))
- result_mean = torch.mean(torch.abs(sample))
-
- assert abs(result_sum.item() - 149.0784) < 1e-2
- assert abs(result_mean.item() - 0.1941) < 1e-3
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/README.md b/spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/README.md
deleted file mode 100644
index 73714122b9a89bdef5bf95e62dc804b81d6e1c10..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-# Disentangled Non-Local Neural Networks
-
-## Introduction
-
-
-
-This example is to reproduce ["Disentangled Non-Local Neural Networks"](https://arxiv.org/abs/2006.06668) for semantic segmentation. It is still in progress.
-
-## Citation
-
-```latex
-@misc{yin2020disentangled,
- title={Disentangled Non-Local Neural Networks},
- author={Minghao Yin and Zhuliang Yao and Yue Cao and Xiu Li and Zheng Zhang and Stephen Lin and Han Hu},
- year={2020},
- booktitle={ECCV}
-}
-```
-
-## Results and models (in progress)
-
-### Cityscapes
-
-| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
-| ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| dnl | R-50-D8 | 512x1024 | 40000 | 7.3 | 2.56 | 78.61 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes/dnl_r50-d8_512x1024_40k_cityscapes_20200904_233629-53d4ea93.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes/dnl_r50-d8_512x1024_40k_cityscapes-20200904_233629.log.json) |
-| dnl | R-101-D8 | 512x1024 | 40000 | 10.9 | 1.96 | 78.31 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes/dnl_r101-d8_512x1024_40k_cityscapes_20200904_233629-9928ffef.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes/dnl_r101-d8_512x1024_40k_cityscapes-20200904_233629.log.json) |
-| dnl | R-50-D8 | 769x769 | 40000 | 9.2 | 1.50 | 78.44 | 80.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_40k_cityscapes/dnl_r50-d8_769x769_40k_cityscapes_20200820_232206-0f283785.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_40k_cityscapes/dnl_r50-d8_769x769_40k_cityscapes-20200820_232206.log.json) |
-| dnl | R-101-D8 | 769x769 | 40000 | 12.6 | 1.02 | 76.39 | 77.77 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_40k_cityscapes/dnl_r101-d8_769x769_40k_cityscapes_20200820_171256-76c596df.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_40k_cityscapes/dnl_r101-d8_769x769_40k_cityscapes-20200820_171256.log.json) |
-| dnl | R-50-D8 | 512x1024 | 80000 | - | - | 79.33 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes/dnl_r50-d8_512x1024_80k_cityscapes_20200904_233629-58b2f778.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes/dnl_r50-d8_512x1024_80k_cityscapes-20200904_233629.log.json) |
-| dnl | R-101-D8 | 512x1024 | 80000 | - | - | 80.41 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes/dnl_r101-d8_512x1024_80k_cityscapes_20200904_233629-758e2dd4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes/dnl_r101-d8_512x1024_80k_cityscapes-20200904_233629.log.json) |
-| dnl | R-50-D8 | 769x769 | 80000 | - | - | 79.36 | 80.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_80k_cityscapes/dnl_r50-d8_769x769_80k_cityscapes_20200820_011925-366bc4c7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_80k_cityscapes/dnl_r50-d8_769x769_80k_cityscapes-20200820_011925.log.json) |
-| dnl | R-101-D8 | 769x769 | 80000 | - | - | 79.41 | 80.68 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_80k_cityscapes/dnl_r101-d8_769x769_80k_cityscapes_20200821_051111-95ff84ab.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_80k_cityscapes/dnl_r101-d8_769x769_80k_cityscapes-20200821_051111.log.json) |
-
-### ADE20K
-
-| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
-| ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| DNL | R-50-D8 | 512x512 | 80000 | 8.8 | 20.66 | 41.76 | 42.99 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_80k_ade20k/dnl_r50-d8_512x512_80k_ade20k_20200826_183354-1cf6e0c1.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_80k_ade20k/dnl_r50-d8_512x512_80k_ade20k-20200826_183354.log.json) |
-| DNL | R-101-D8 | 512x512 | 80000 | 12.8 | 12.54 | 43.76 | 44.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_80k_ade20k/dnl_r101-d8_512x512_80k_ade20k_20200826_183354-d820d6ea.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_80k_ade20k/dnl_r101-d8_512x512_80k_ade20k-20200826_183354.log.json) |
-| DNL | R-50-D8 | 512x512 | 160000 | - | - | 41.87 | 43.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_160k_ade20k/dnl_r50-d8_512x512_160k_ade20k_20200826_183350-37837798.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_160k_ade20k/dnl_r50-d8_512x512_160k_ade20k-20200826_183350.log.json) |
-| DNL | R-101-D8 | 512x512 | 160000 | - | - | 44.25 | 45.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_160k_ade20k/dnl_r101-d8_512x512_160k_ade20k_20200826_183350-ed522c61.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_160k_ade20k/dnl_r101-d8_512x512_160k_ade20k-20200826_183350.log.json) |
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/utils/flops_counter.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/utils/flops_counter.py
deleted file mode 100644
index d10af5feca7f4b8c0ba359b7b1c826f754e048be..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/utils/flops_counter.py
+++ /dev/null
@@ -1,599 +0,0 @@
-# Modified from flops-counter.pytorch by Vladislav Sovrasov
-# original repo: https://github.com/sovrasov/flops-counter.pytorch
-
-# MIT License
-
-# Copyright (c) 2018 Vladislav Sovrasov
-
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-import sys
-from functools import partial
-
-import numpy as np
-import torch
-import torch.nn as nn
-
-import annotator.uniformer.mmcv as mmcv
-
-
-def get_model_complexity_info(model,
- input_shape,
- print_per_layer_stat=True,
- as_strings=True,
- input_constructor=None,
- flush=False,
- ost=sys.stdout):
- """Get complexity information of a model.
-
- This method can calculate FLOPs and parameter counts of a model with
- corresponding input shape. It can also print complexity information for
- each layer in a model.
-
- Supported layers are listed as below:
- - Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``.
- - Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, ``nn.LeakyReLU``,
- ``nn.ReLU6``.
- - Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``,
- ``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,
- ``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,
- ``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,
- ``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.
- - BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``,
- ``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``,
- ``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``.
- - Linear: ``nn.Linear``.
- - Deconvolution: ``nn.ConvTranspose2d``.
- - Upsample: ``nn.Upsample``.
-
- Args:
- model (nn.Module): The model for complexity calculation.
- input_shape (tuple): Input shape used for calculation.
- print_per_layer_stat (bool): Whether to print complexity information
- for each layer in a model. Default: True.
- as_strings (bool): Output FLOPs and params counts in a string form.
- Default: True.
- input_constructor (None | callable): If specified, it takes a callable
- method that generates input. otherwise, it will generate a random
- tensor with input shape to calculate FLOPs. Default: None.
- flush (bool): same as that in :func:`print`. Default: False.
- ost (stream): same as ``file`` param in :func:`print`.
- Default: sys.stdout.
-
- Returns:
- tuple[float | str]: If ``as_strings`` is set to True, it will return
- FLOPs and parameter counts in a string format. otherwise, it will
- return those in a float number format.
- """
- assert type(input_shape) is tuple
- assert len(input_shape) >= 1
- assert isinstance(model, nn.Module)
- flops_model = add_flops_counting_methods(model)
- flops_model.eval()
- flops_model.start_flops_count()
- if input_constructor:
- input = input_constructor(input_shape)
- _ = flops_model(**input)
- else:
- try:
- batch = torch.ones(()).new_empty(
- (1, *input_shape),
- dtype=next(flops_model.parameters()).dtype,
- device=next(flops_model.parameters()).device)
- except StopIteration:
- # Avoid StopIteration for models which have no parameters,
- # like `nn.Relu()`, `nn.AvgPool2d`, etc.
- batch = torch.ones(()).new_empty((1, *input_shape))
-
- _ = flops_model(batch)
-
- flops_count, params_count = flops_model.compute_average_flops_cost()
- if print_per_layer_stat:
- print_model_with_flops(
- flops_model, flops_count, params_count, ost=ost, flush=flush)
- flops_model.stop_flops_count()
-
- if as_strings:
- return flops_to_string(flops_count), params_to_string(params_count)
-
- return flops_count, params_count
-
-
-def flops_to_string(flops, units='GFLOPs', precision=2):
- """Convert FLOPs number into a string.
-
- Note that Here we take a multiply-add counts as one FLOP.
-
- Args:
- flops (float): FLOPs number to be converted.
- units (str | None): Converted FLOPs units. Options are None, 'GFLOPs',
- 'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically
- choose the most suitable unit for FLOPs. Default: 'GFLOPs'.
- precision (int): Digit number after the decimal point. Default: 2.
-
- Returns:
- str: The converted FLOPs number with units.
-
- Examples:
- >>> flops_to_string(1e9)
- '1.0 GFLOPs'
- >>> flops_to_string(2e5, 'MFLOPs')
- '0.2 MFLOPs'
- >>> flops_to_string(3e-9, None)
- '3e-09 FLOPs'
- """
- if units is None:
- if flops // 10**9 > 0:
- return str(round(flops / 10.**9, precision)) + ' GFLOPs'
- elif flops // 10**6 > 0:
- return str(round(flops / 10.**6, precision)) + ' MFLOPs'
- elif flops // 10**3 > 0:
- return str(round(flops / 10.**3, precision)) + ' KFLOPs'
- else:
- return str(flops) + ' FLOPs'
- else:
- if units == 'GFLOPs':
- return str(round(flops / 10.**9, precision)) + ' ' + units
- elif units == 'MFLOPs':
- return str(round(flops / 10.**6, precision)) + ' ' + units
- elif units == 'KFLOPs':
- return str(round(flops / 10.**3, precision)) + ' ' + units
- else:
- return str(flops) + ' FLOPs'
-
-
-def params_to_string(num_params, units=None, precision=2):
- """Convert parameter number into a string.
-
- Args:
- num_params (float): Parameter number to be converted.
- units (str | None): Converted FLOPs units. Options are None, 'M',
- 'K' and ''. If set to None, it will automatically choose the most
- suitable unit for Parameter number. Default: None.
- precision (int): Digit number after the decimal point. Default: 2.
-
- Returns:
- str: The converted parameter number with units.
-
- Examples:
- >>> params_to_string(1e9)
- '1000.0 M'
- >>> params_to_string(2e5)
- '200.0 k'
- >>> params_to_string(3e-9)
- '3e-09'
- """
- if units is None:
- if num_params // 10**6 > 0:
- return str(round(num_params / 10**6, precision)) + ' M'
- elif num_params // 10**3:
- return str(round(num_params / 10**3, precision)) + ' k'
- else:
- return str(num_params)
- else:
- if units == 'M':
- return str(round(num_params / 10.**6, precision)) + ' ' + units
- elif units == 'K':
- return str(round(num_params / 10.**3, precision)) + ' ' + units
- else:
- return str(num_params)
-
-
-def print_model_with_flops(model,
- total_flops,
- total_params,
- units='GFLOPs',
- precision=3,
- ost=sys.stdout,
- flush=False):
- """Print a model with FLOPs for each layer.
-
- Args:
- model (nn.Module): The model to be printed.
- total_flops (float): Total FLOPs of the model.
- total_params (float): Total parameter counts of the model.
- units (str | None): Converted FLOPs units. Default: 'GFLOPs'.
- precision (int): Digit number after the decimal point. Default: 3.
- ost (stream): same as `file` param in :func:`print`.
- Default: sys.stdout.
- flush (bool): same as that in :func:`print`. Default: False.
-
- Example:
- >>> class ExampleModel(nn.Module):
-
- >>> def __init__(self):
- >>> super().__init__()
- >>> self.conv1 = nn.Conv2d(3, 8, 3)
- >>> self.conv2 = nn.Conv2d(8, 256, 3)
- >>> self.conv3 = nn.Conv2d(256, 8, 3)
- >>> self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
- >>> self.flatten = nn.Flatten()
- >>> self.fc = nn.Linear(8, 1)
-
- >>> def forward(self, x):
- >>> x = self.conv1(x)
- >>> x = self.conv2(x)
- >>> x = self.conv3(x)
- >>> x = self.avg_pool(x)
- >>> x = self.flatten(x)
- >>> x = self.fc(x)
- >>> return x
-
- >>> model = ExampleModel()
- >>> x = (3, 16, 16)
- to print the complexity information state for each layer, you can use
- >>> get_model_complexity_info(model, x)
- or directly use
- >>> print_model_with_flops(model, 4579784.0, 37361)
- ExampleModel(
- 0.037 M, 100.000% Params, 0.005 GFLOPs, 100.000% FLOPs,
- (conv1): Conv2d(0.0 M, 0.600% Params, 0.0 GFLOPs, 0.959% FLOPs, 3, 8, kernel_size=(3, 3), stride=(1, 1)) # noqa: E501
- (conv2): Conv2d(0.019 M, 50.020% Params, 0.003 GFLOPs, 58.760% FLOPs, 8, 256, kernel_size=(3, 3), stride=(1, 1))
- (conv3): Conv2d(0.018 M, 49.356% Params, 0.002 GFLOPs, 40.264% FLOPs, 256, 8, kernel_size=(3, 3), stride=(1, 1))
- (avg_pool): AdaptiveAvgPool2d(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.017% FLOPs, output_size=(1, 1))
- (flatten): Flatten(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.000% FLOPs, )
- (fc): Linear(0.0 M, 0.024% Params, 0.0 GFLOPs, 0.000% FLOPs, in_features=8, out_features=1, bias=True)
- )
- """
-
- def accumulate_params(self):
- if is_supported_instance(self):
- return self.__params__
- else:
- sum = 0
- for m in self.children():
- sum += m.accumulate_params()
- return sum
-
- def accumulate_flops(self):
- if is_supported_instance(self):
- return self.__flops__ / model.__batch_counter__
- else:
- sum = 0
- for m in self.children():
- sum += m.accumulate_flops()
- return sum
-
- def flops_repr(self):
- accumulated_num_params = self.accumulate_params()
- accumulated_flops_cost = self.accumulate_flops()
- return ', '.join([
- params_to_string(
- accumulated_num_params, units='M', precision=precision),
- '{:.3%} Params'.format(accumulated_num_params / total_params),
- flops_to_string(
- accumulated_flops_cost, units=units, precision=precision),
- '{:.3%} FLOPs'.format(accumulated_flops_cost / total_flops),
- self.original_extra_repr()
- ])
-
- def add_extra_repr(m):
- m.accumulate_flops = accumulate_flops.__get__(m)
- m.accumulate_params = accumulate_params.__get__(m)
- flops_extra_repr = flops_repr.__get__(m)
- if m.extra_repr != flops_extra_repr:
- m.original_extra_repr = m.extra_repr
- m.extra_repr = flops_extra_repr
- assert m.extra_repr != m.original_extra_repr
-
- def del_extra_repr(m):
- if hasattr(m, 'original_extra_repr'):
- m.extra_repr = m.original_extra_repr
- del m.original_extra_repr
- if hasattr(m, 'accumulate_flops'):
- del m.accumulate_flops
-
- model.apply(add_extra_repr)
- print(model, file=ost, flush=flush)
- model.apply(del_extra_repr)
-
-
-def get_model_parameters_number(model):
- """Calculate parameter number of a model.
-
- Args:
- model (nn.module): The model for parameter number calculation.
-
- Returns:
- float: Parameter number of the model.
- """
- num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
- return num_params
-
-
-def add_flops_counting_methods(net_main_module):
- # adding additional methods to the existing module object,
- # this is done this way so that each function has access to self object
- net_main_module.start_flops_count = start_flops_count.__get__(
- net_main_module)
- net_main_module.stop_flops_count = stop_flops_count.__get__(
- net_main_module)
- net_main_module.reset_flops_count = reset_flops_count.__get__(
- net_main_module)
- net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__( # noqa: E501
- net_main_module)
-
- net_main_module.reset_flops_count()
-
- return net_main_module
-
-
-def compute_average_flops_cost(self):
- """Compute average FLOPs cost.
-
- A method to compute average FLOPs cost, which will be available after
- `add_flops_counting_methods()` is called on a desired net object.
-
- Returns:
- float: Current mean flops consumption per image.
- """
- batches_count = self.__batch_counter__
- flops_sum = 0
- for module in self.modules():
- if is_supported_instance(module):
- flops_sum += module.__flops__
- params_sum = get_model_parameters_number(self)
- return flops_sum / batches_count, params_sum
-
-
-def start_flops_count(self):
- """Activate the computation of mean flops consumption per image.
-
- A method to activate the computation of mean flops consumption per image.
- which will be available after ``add_flops_counting_methods()`` is called on
- a desired net object. It should be called before running the network.
- """
- add_batch_counter_hook_function(self)
-
- def add_flops_counter_hook_function(module):
- if is_supported_instance(module):
- if hasattr(module, '__flops_handle__'):
- return
-
- else:
- handle = module.register_forward_hook(
- get_modules_mapping()[type(module)])
-
- module.__flops_handle__ = handle
-
- self.apply(partial(add_flops_counter_hook_function))
-
-
-def stop_flops_count(self):
- """Stop computing the mean flops consumption per image.
-
- A method to stop computing the mean flops consumption per image, which will
- be available after ``add_flops_counting_methods()`` is called on a desired
- net object. It can be called to pause the computation whenever.
- """
- remove_batch_counter_hook_function(self)
- self.apply(remove_flops_counter_hook_function)
-
-
-def reset_flops_count(self):
- """Reset statistics computed so far.
-
- A method to Reset computed statistics, which will be available after
- `add_flops_counting_methods()` is called on a desired net object.
- """
- add_batch_counter_variables_or_reset(self)
- self.apply(add_flops_counter_variable_or_reset)
-
-
-# ---- Internal functions
-def empty_flops_counter_hook(module, input, output):
- module.__flops__ += 0
-
-
-def upsample_flops_counter_hook(module, input, output):
- output_size = output[0]
- batch_size = output_size.shape[0]
- output_elements_count = batch_size
- for val in output_size.shape[1:]:
- output_elements_count *= val
- module.__flops__ += int(output_elements_count)
-
-
-def relu_flops_counter_hook(module, input, output):
- active_elements_count = output.numel()
- module.__flops__ += int(active_elements_count)
-
-
-def linear_flops_counter_hook(module, input, output):
- input = input[0]
- output_last_dim = output.shape[
- -1] # pytorch checks dimensions, so here we don't care much
- module.__flops__ += int(np.prod(input.shape) * output_last_dim)
-
-
-def pool_flops_counter_hook(module, input, output):
- input = input[0]
- module.__flops__ += int(np.prod(input.shape))
-
-
-def norm_flops_counter_hook(module, input, output):
- input = input[0]
-
- batch_flops = np.prod(input.shape)
- if (getattr(module, 'affine', False)
- or getattr(module, 'elementwise_affine', False)):
- batch_flops *= 2
- module.__flops__ += int(batch_flops)
-
-
-def deconv_flops_counter_hook(conv_module, input, output):
- # Can have multiple inputs, getting the first one
- input = input[0]
-
- batch_size = input.shape[0]
- input_height, input_width = input.shape[2:]
-
- kernel_height, kernel_width = conv_module.kernel_size
- in_channels = conv_module.in_channels
- out_channels = conv_module.out_channels
- groups = conv_module.groups
-
- filters_per_channel = out_channels // groups
- conv_per_position_flops = (
- kernel_height * kernel_width * in_channels * filters_per_channel)
-
- active_elements_count = batch_size * input_height * input_width
- overall_conv_flops = conv_per_position_flops * active_elements_count
- bias_flops = 0
- if conv_module.bias is not None:
- output_height, output_width = output.shape[2:]
- bias_flops = out_channels * batch_size * output_height * output_height
- overall_flops = overall_conv_flops + bias_flops
-
- conv_module.__flops__ += int(overall_flops)
-
-
-def conv_flops_counter_hook(conv_module, input, output):
- # Can have multiple inputs, getting the first one
- input = input[0]
-
- batch_size = input.shape[0]
- output_dims = list(output.shape[2:])
-
- kernel_dims = list(conv_module.kernel_size)
- in_channels = conv_module.in_channels
- out_channels = conv_module.out_channels
- groups = conv_module.groups
-
- filters_per_channel = out_channels // groups
- conv_per_position_flops = int(
- np.prod(kernel_dims)) * in_channels * filters_per_channel
-
- active_elements_count = batch_size * int(np.prod(output_dims))
-
- overall_conv_flops = conv_per_position_flops * active_elements_count
-
- bias_flops = 0
-
- if conv_module.bias is not None:
-
- bias_flops = out_channels * active_elements_count
-
- overall_flops = overall_conv_flops + bias_flops
-
- conv_module.__flops__ += int(overall_flops)
-
-
-def batch_counter_hook(module, input, output):
- batch_size = 1
- if len(input) > 0:
- # Can have multiple inputs, getting the first one
- input = input[0]
- batch_size = len(input)
- else:
- pass
- print('Warning! No positional inputs found for a module, '
- 'assuming batch size is 1.')
- module.__batch_counter__ += batch_size
-
-
-def add_batch_counter_variables_or_reset(module):
-
- module.__batch_counter__ = 0
-
-
-def add_batch_counter_hook_function(module):
- if hasattr(module, '__batch_counter_handle__'):
- return
-
- handle = module.register_forward_hook(batch_counter_hook)
- module.__batch_counter_handle__ = handle
-
-
-def remove_batch_counter_hook_function(module):
- if hasattr(module, '__batch_counter_handle__'):
- module.__batch_counter_handle__.remove()
- del module.__batch_counter_handle__
-
-
-def add_flops_counter_variable_or_reset(module):
- if is_supported_instance(module):
- if hasattr(module, '__flops__') or hasattr(module, '__params__'):
- print('Warning: variables __flops__ or __params__ are already '
- 'defined for the module' + type(module).__name__ +
- ' ptflops can affect your code!')
- module.__flops__ = 0
- module.__params__ = get_model_parameters_number(module)
-
-
-def is_supported_instance(module):
- if type(module) in get_modules_mapping():
- return True
- return False
-
-
-def remove_flops_counter_hook_function(module):
- if is_supported_instance(module):
- if hasattr(module, '__flops_handle__'):
- module.__flops_handle__.remove()
- del module.__flops_handle__
-
-
-def get_modules_mapping():
- return {
- # convolutions
- nn.Conv1d: conv_flops_counter_hook,
- nn.Conv2d: conv_flops_counter_hook,
- mmcv.cnn.bricks.Conv2d: conv_flops_counter_hook,
- nn.Conv3d: conv_flops_counter_hook,
- mmcv.cnn.bricks.Conv3d: conv_flops_counter_hook,
- # activations
- nn.ReLU: relu_flops_counter_hook,
- nn.PReLU: relu_flops_counter_hook,
- nn.ELU: relu_flops_counter_hook,
- nn.LeakyReLU: relu_flops_counter_hook,
- nn.ReLU6: relu_flops_counter_hook,
- # poolings
- nn.MaxPool1d: pool_flops_counter_hook,
- nn.AvgPool1d: pool_flops_counter_hook,
- nn.AvgPool2d: pool_flops_counter_hook,
- nn.MaxPool2d: pool_flops_counter_hook,
- mmcv.cnn.bricks.MaxPool2d: pool_flops_counter_hook,
- nn.MaxPool3d: pool_flops_counter_hook,
- mmcv.cnn.bricks.MaxPool3d: pool_flops_counter_hook,
- nn.AvgPool3d: pool_flops_counter_hook,
- nn.AdaptiveMaxPool1d: pool_flops_counter_hook,
- nn.AdaptiveAvgPool1d: pool_flops_counter_hook,
- nn.AdaptiveMaxPool2d: pool_flops_counter_hook,
- nn.AdaptiveAvgPool2d: pool_flops_counter_hook,
- nn.AdaptiveMaxPool3d: pool_flops_counter_hook,
- nn.AdaptiveAvgPool3d: pool_flops_counter_hook,
- # normalizations
- nn.BatchNorm1d: norm_flops_counter_hook,
- nn.BatchNorm2d: norm_flops_counter_hook,
- nn.BatchNorm3d: norm_flops_counter_hook,
- nn.GroupNorm: norm_flops_counter_hook,
- nn.InstanceNorm1d: norm_flops_counter_hook,
- nn.InstanceNorm2d: norm_flops_counter_hook,
- nn.InstanceNorm3d: norm_flops_counter_hook,
- nn.LayerNorm: norm_flops_counter_hook,
- # FC
- nn.Linear: linear_flops_counter_hook,
- mmcv.cnn.bricks.Linear: linear_flops_counter_hook,
- # Upscale
- nn.Upsample: upsample_flops_counter_hook,
- # Deconvolution
- nn.ConvTranspose2d: deconv_flops_counter_hook,
- mmcv.cnn.bricks.ConvTranspose2d: deconv_flops_counter_hook,
- }
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/photometric.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/photometric.py
deleted file mode 100644
index 5085d012019c0cbf56f66f421a378278c1a058ae..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/photometric.py
+++ /dev/null
@@ -1,428 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import cv2
-import numpy as np
-
-from ..utils import is_tuple_of
-from .colorspace import bgr2gray, gray2bgr
-
-
-def imnormalize(img, mean, std, to_rgb=True):
- """Normalize an image with mean and std.
-
- Args:
- img (ndarray): Image to be normalized.
- mean (ndarray): The mean to be used for normalize.
- std (ndarray): The std to be used for normalize.
- to_rgb (bool): Whether to convert to rgb.
-
- Returns:
- ndarray: The normalized image.
- """
- img = img.copy().astype(np.float32)
- return imnormalize_(img, mean, std, to_rgb)
-
-
-def imnormalize_(img, mean, std, to_rgb=True):
- """Inplace normalize an image with mean and std.
-
- Args:
- img (ndarray): Image to be normalized.
- mean (ndarray): The mean to be used for normalize.
- std (ndarray): The std to be used for normalize.
- to_rgb (bool): Whether to convert to rgb.
-
- Returns:
- ndarray: The normalized image.
- """
- # cv2 inplace normalization does not accept uint8
- assert img.dtype != np.uint8
- mean = np.float64(mean.reshape(1, -1))
- stdinv = 1 / np.float64(std.reshape(1, -1))
- if to_rgb:
- cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace
- cv2.subtract(img, mean, img) # inplace
- cv2.multiply(img, stdinv, img) # inplace
- return img
-
-
-def imdenormalize(img, mean, std, to_bgr=True):
- assert img.dtype != np.uint8
- mean = mean.reshape(1, -1).astype(np.float64)
- std = std.reshape(1, -1).astype(np.float64)
- img = cv2.multiply(img, std) # make a copy
- cv2.add(img, mean, img) # inplace
- if to_bgr:
- cv2.cvtColor(img, cv2.COLOR_RGB2BGR, img) # inplace
- return img
-
-
-def iminvert(img):
- """Invert (negate) an image.
-
- Args:
- img (ndarray): Image to be inverted.
-
- Returns:
- ndarray: The inverted image.
- """
- return np.full_like(img, 255) - img
-
-
-def solarize(img, thr=128):
- """Solarize an image (invert all pixel values above a threshold)
-
- Args:
- img (ndarray): Image to be solarized.
- thr (int): Threshold for solarizing (0 - 255).
-
- Returns:
- ndarray: The solarized image.
- """
- img = np.where(img < thr, img, 255 - img)
- return img
-
-
-def posterize(img, bits):
- """Posterize an image (reduce the number of bits for each color channel)
-
- Args:
- img (ndarray): Image to be posterized.
- bits (int): Number of bits (1 to 8) to use for posterizing.
-
- Returns:
- ndarray: The posterized image.
- """
- shift = 8 - bits
- img = np.left_shift(np.right_shift(img, shift), shift)
- return img
-
-
-def adjust_color(img, alpha=1, beta=None, gamma=0):
- r"""It blends the source image and its gray image:
-
- .. math::
- output = img * alpha + gray\_img * beta + gamma
-
- Args:
- img (ndarray): The input source image.
- alpha (int | float): Weight for the source image. Default 1.
- beta (int | float): Weight for the converted gray image.
- If None, it's assigned the value (1 - `alpha`).
- gamma (int | float): Scalar added to each sum.
- Same as :func:`cv2.addWeighted`. Default 0.
-
- Returns:
- ndarray: Colored image which has the same size and dtype as input.
- """
- gray_img = bgr2gray(img)
- gray_img = np.tile(gray_img[..., None], [1, 1, 3])
- if beta is None:
- beta = 1 - alpha
- colored_img = cv2.addWeighted(img, alpha, gray_img, beta, gamma)
- if not colored_img.dtype == np.uint8:
- # Note when the dtype of `img` is not the default `np.uint8`
- # (e.g. np.float32), the value in `colored_img` got from cv2
- # is not guaranteed to be in range [0, 255], so here clip
- # is needed.
- colored_img = np.clip(colored_img, 0, 255)
- return colored_img
-
-
-def imequalize(img):
- """Equalize the image histogram.
-
- This function applies a non-linear mapping to the input image,
- in order to create a uniform distribution of grayscale values
- in the output image.
-
- Args:
- img (ndarray): Image to be equalized.
-
- Returns:
- ndarray: The equalized image.
- """
-
- def _scale_channel(im, c):
- """Scale the data in the corresponding channel."""
- im = im[:, :, c]
- # Compute the histogram of the image channel.
- histo = np.histogram(im, 256, (0, 255))[0]
- # For computing the step, filter out the nonzeros.
- nonzero_histo = histo[histo > 0]
- step = (np.sum(nonzero_histo) - nonzero_histo[-1]) // 255
- if not step:
- lut = np.array(range(256))
- else:
- # Compute the cumulative sum, shifted by step // 2
- # and then normalized by step.
- lut = (np.cumsum(histo) + (step // 2)) // step
- # Shift lut, prepending with 0.
- lut = np.concatenate([[0], lut[:-1]], 0)
- # handle potential integer overflow
- lut[lut > 255] = 255
- # If step is zero, return the original image.
- # Otherwise, index from lut.
- return np.where(np.equal(step, 0), im, lut[im])
-
- # Scales each channel independently and then stacks
- # the result.
- s1 = _scale_channel(img, 0)
- s2 = _scale_channel(img, 1)
- s3 = _scale_channel(img, 2)
- equalized_img = np.stack([s1, s2, s3], axis=-1)
- return equalized_img.astype(img.dtype)
-
-
-def adjust_brightness(img, factor=1.):
- """Adjust image brightness.
-
- This function controls the brightness of an image. An
- enhancement factor of 0.0 gives a black image.
- A factor of 1.0 gives the original image. This function
- blends the source image and the degenerated black image:
-
- .. math::
- output = img * factor + degenerated * (1 - factor)
-
- Args:
- img (ndarray): Image to be brightened.
- factor (float): A value controls the enhancement.
- Factor 1.0 returns the original image, lower
- factors mean less color (brightness, contrast,
- etc), and higher values more. Default 1.
-
- Returns:
- ndarray: The brightened image.
- """
- degenerated = np.zeros_like(img)
- # Note manually convert the dtype to np.float32, to
- # achieve as close results as PIL.ImageEnhance.Brightness.
- # Set beta=1-factor, and gamma=0
- brightened_img = cv2.addWeighted(
- img.astype(np.float32), factor, degenerated.astype(np.float32),
- 1 - factor, 0)
- brightened_img = np.clip(brightened_img, 0, 255)
- return brightened_img.astype(img.dtype)
-
-
-def adjust_contrast(img, factor=1.):
- """Adjust image contrast.
-
- This function controls the contrast of an image. An
- enhancement factor of 0.0 gives a solid grey
- image. A factor of 1.0 gives the original image. It
- blends the source image and the degenerated mean image:
-
- .. math::
- output = img * factor + degenerated * (1 - factor)
-
- Args:
- img (ndarray): Image to be contrasted. BGR order.
- factor (float): Same as :func:`mmcv.adjust_brightness`.
-
- Returns:
- ndarray: The contrasted image.
- """
- gray_img = bgr2gray(img)
- hist = np.histogram(gray_img, 256, (0, 255))[0]
- mean = round(np.sum(gray_img) / np.sum(hist))
- degenerated = (np.ones_like(img[..., 0]) * mean).astype(img.dtype)
- degenerated = gray2bgr(degenerated)
- contrasted_img = cv2.addWeighted(
- img.astype(np.float32), factor, degenerated.astype(np.float32),
- 1 - factor, 0)
- contrasted_img = np.clip(contrasted_img, 0, 255)
- return contrasted_img.astype(img.dtype)
-
-
-def auto_contrast(img, cutoff=0):
- """Auto adjust image contrast.
-
- This function maximize (normalize) image contrast by first removing cutoff
- percent of the lightest and darkest pixels from the histogram and remapping
- the image so that the darkest pixel becomes black (0), and the lightest
- becomes white (255).
-
- Args:
- img (ndarray): Image to be contrasted. BGR order.
- cutoff (int | float | tuple): The cutoff percent of the lightest and
- darkest pixels to be removed. If given as tuple, it shall be
- (low, high). Otherwise, the single value will be used for both.
- Defaults to 0.
-
- Returns:
- ndarray: The contrasted image.
- """
-
- def _auto_contrast_channel(im, c, cutoff):
- im = im[:, :, c]
- # Compute the histogram of the image channel.
- histo = np.histogram(im, 256, (0, 255))[0]
- # Remove cut-off percent pixels from histo
- histo_sum = np.cumsum(histo)
- cut_low = histo_sum[-1] * cutoff[0] // 100
- cut_high = histo_sum[-1] - histo_sum[-1] * cutoff[1] // 100
- histo_sum = np.clip(histo_sum, cut_low, cut_high) - cut_low
- histo = np.concatenate([[histo_sum[0]], np.diff(histo_sum)], 0)
-
- # Compute mapping
- low, high = np.nonzero(histo)[0][0], np.nonzero(histo)[0][-1]
- # If all the values have been cut off, return the origin img
- if low >= high:
- return im
- scale = 255.0 / (high - low)
- offset = -low * scale
- lut = np.array(range(256))
- lut = lut * scale + offset
- lut = np.clip(lut, 0, 255)
- return lut[im]
-
- if isinstance(cutoff, (int, float)):
- cutoff = (cutoff, cutoff)
- else:
- assert isinstance(cutoff, tuple), 'cutoff must be of type int, ' \
- f'float or tuple, but got {type(cutoff)} instead.'
- # Auto adjusts contrast for each channel independently and then stacks
- # the result.
- s1 = _auto_contrast_channel(img, 0, cutoff)
- s2 = _auto_contrast_channel(img, 1, cutoff)
- s3 = _auto_contrast_channel(img, 2, cutoff)
- contrasted_img = np.stack([s1, s2, s3], axis=-1)
- return contrasted_img.astype(img.dtype)
-
-
-def adjust_sharpness(img, factor=1., kernel=None):
- """Adjust image sharpness.
-
- This function controls the sharpness of an image. An
- enhancement factor of 0.0 gives a blurred image. A
- factor of 1.0 gives the original image. And a factor
- of 2.0 gives a sharpened image. It blends the source
- image and the degenerated mean image:
-
- .. math::
- output = img * factor + degenerated * (1 - factor)
-
- Args:
- img (ndarray): Image to be sharpened. BGR order.
- factor (float): Same as :func:`mmcv.adjust_brightness`.
- kernel (np.ndarray, optional): Filter kernel to be applied on the img
- to obtain the degenerated img. Defaults to None.
-
- Note:
- No value sanity check is enforced on the kernel set by users. So with
- an inappropriate kernel, the ``adjust_sharpness`` may fail to perform
- the function its name indicates but end up performing whatever
- transform determined by the kernel.
-
- Returns:
- ndarray: The sharpened image.
- """
-
- if kernel is None:
- # adopted from PIL.ImageFilter.SMOOTH
- kernel = np.array([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]]) / 13
- assert isinstance(kernel, np.ndarray), \
- f'kernel must be of type np.ndarray, but got {type(kernel)} instead.'
- assert kernel.ndim == 2, \
- f'kernel must have a dimension of 2, but got {kernel.ndim} instead.'
-
- degenerated = cv2.filter2D(img, -1, kernel)
- sharpened_img = cv2.addWeighted(
- img.astype(np.float32), factor, degenerated.astype(np.float32),
- 1 - factor, 0)
- sharpened_img = np.clip(sharpened_img, 0, 255)
- return sharpened_img.astype(img.dtype)
-
-
-def adjust_lighting(img, eigval, eigvec, alphastd=0.1, to_rgb=True):
- """AlexNet-style PCA jitter.
-
- This data augmentation is proposed in `ImageNet Classification with Deep
- Convolutional Neural Networks
- `_.
-
- Args:
- img (ndarray): Image to be adjusted lighting. BGR order.
- eigval (ndarray): the eigenvalue of the convariance matrix of pixel
- values, respectively.
- eigvec (ndarray): the eigenvector of the convariance matrix of pixel
- values, respectively.
- alphastd (float): The standard deviation for distribution of alpha.
- Defaults to 0.1
- to_rgb (bool): Whether to convert img to rgb.
-
- Returns:
- ndarray: The adjusted image.
- """
- assert isinstance(eigval, np.ndarray) and isinstance(eigvec, np.ndarray), \
- f'eigval and eigvec should both be of type np.ndarray, got ' \
- f'{type(eigval)} and {type(eigvec)} instead.'
-
- assert eigval.ndim == 1 and eigvec.ndim == 2
- assert eigvec.shape == (3, eigval.shape[0])
- n_eigval = eigval.shape[0]
- assert isinstance(alphastd, float), 'alphastd should be of type float, ' \
- f'got {type(alphastd)} instead.'
-
- img = img.copy().astype(np.float32)
- if to_rgb:
- cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace
-
- alpha = np.random.normal(0, alphastd, n_eigval)
- alter = eigvec \
- * np.broadcast_to(alpha.reshape(1, n_eigval), (3, n_eigval)) \
- * np.broadcast_to(eigval.reshape(1, n_eigval), (3, n_eigval))
- alter = np.broadcast_to(alter.sum(axis=1).reshape(1, 1, 3), img.shape)
- img_adjusted = img + alter
- return img_adjusted
-
-
-def lut_transform(img, lut_table):
- """Transform array by look-up table.
-
- The function lut_transform fills the output array with values from the
- look-up table. Indices of the entries are taken from the input array.
-
- Args:
- img (ndarray): Image to be transformed.
- lut_table (ndarray): look-up table of 256 elements; in case of
- multi-channel input array, the table should either have a single
- channel (in this case the same table is used for all channels) or
- the same number of channels as in the input array.
-
- Returns:
- ndarray: The transformed image.
- """
- assert isinstance(img, np.ndarray)
- assert 0 <= np.min(img) and np.max(img) <= 255
- assert isinstance(lut_table, np.ndarray)
- assert lut_table.shape == (256, )
-
- return cv2.LUT(np.array(img, dtype=np.uint8), lut_table)
-
-
-def clahe(img, clip_limit=40.0, tile_grid_size=(8, 8)):
- """Use CLAHE method to process the image.
-
- See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J].
- Graphics Gems, 1994:474-485.` for more information.
-
- Args:
- img (ndarray): Image to be processed.
- clip_limit (float): Threshold for contrast limiting. Default: 40.0.
- tile_grid_size (tuple[int]): Size of grid for histogram equalization.
- Input image will be divided into equally sized rectangular tiles.
- It defines the number of tiles in row and column. Default: (8, 8).
-
- Returns:
- ndarray: The processed image.
- """
- assert isinstance(img, np.ndarray)
- assert img.ndim == 2
- assert isinstance(clip_limit, (float, int))
- assert is_tuple_of(tile_grid_size, int)
- assert len(tile_grid_size) == 2
-
- clahe = cv2.createCLAHE(clip_limit, tile_grid_size)
- return clahe.apply(np.array(img, dtype=np.uint8))
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/dataset_wrappers.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/dataset_wrappers.py
deleted file mode 100644
index d6a5e957ec3b44465432617cf6e8f0b86a8a5efa..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/dataset_wrappers.py
+++ /dev/null
@@ -1,50 +0,0 @@
-from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
-
-from .builder import DATASETS
-
-
-@DATASETS.register_module()
-class ConcatDataset(_ConcatDataset):
- """A wrapper of concatenated dataset.
-
- Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
- concat the group flag for image aspect ratio.
-
- Args:
- datasets (list[:obj:`Dataset`]): A list of datasets.
- """
-
- def __init__(self, datasets):
- super(ConcatDataset, self).__init__(datasets)
- self.CLASSES = datasets[0].CLASSES
- self.PALETTE = datasets[0].PALETTE
-
-
-@DATASETS.register_module()
-class RepeatDataset(object):
- """A wrapper of repeated dataset.
-
- The length of repeated dataset will be `times` larger than the original
- dataset. This is useful when the data loading time is long but the dataset
- is small. Using RepeatDataset can reduce the data loading time between
- epochs.
-
- Args:
- dataset (:obj:`Dataset`): The dataset to be repeated.
- times (int): Repeat times.
- """
-
- def __init__(self, dataset, times):
- self.dataset = dataset
- self.times = times
- self.CLASSES = dataset.CLASSES
- self.PALETTE = dataset.PALETTE
- self._ori_len = len(self.dataset)
-
- def __getitem__(self, idx):
- """Get item from original dataset."""
- return self.dataset[idx % self._ori_len]
-
- def __len__(self):
- """The length is multiplied by ``times``"""
- return self.times * self._ori_len
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/markup.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/markup.py
deleted file mode 100644
index fd80d8c1129722b84771bd6a0f6ccfd57f5cf78e..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/markup.py
+++ /dev/null
@@ -1,246 +0,0 @@
-import re
-from ast import literal_eval
-from operator import attrgetter
-from typing import Callable, Iterable, List, Match, NamedTuple, Optional, Tuple, Union
-
-from ._emoji_replace import _emoji_replace
-from .emoji import EmojiVariant
-from .errors import MarkupError
-from .style import Style
-from .text import Span, Text
-
-RE_TAGS = re.compile(
- r"""((\\*)\[([a-z#/@][^[]*?)])""",
- re.VERBOSE,
-)
-
-RE_HANDLER = re.compile(r"^([\w.]*?)(\(.*?\))?$")
-
-
-class Tag(NamedTuple):
- """A tag in console markup."""
-
- name: str
- """The tag name. e.g. 'bold'."""
- parameters: Optional[str]
- """Any additional parameters after the name."""
-
- def __str__(self) -> str:
- return (
- self.name if self.parameters is None else f"{self.name} {self.parameters}"
- )
-
- @property
- def markup(self) -> str:
- """Get the string representation of this tag."""
- return (
- f"[{self.name}]"
- if self.parameters is None
- else f"[{self.name}={self.parameters}]"
- )
-
-
-_ReStringMatch = Match[str] # regex match object
-_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub
-_EscapeSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re
-
-
-def escape(
- markup: str,
- _escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#/@][^[]*?])").sub,
-) -> str:
- """Escapes text so that it won't be interpreted as markup.
-
- Args:
- markup (str): Content to be inserted in to markup.
-
- Returns:
- str: Markup with square brackets escaped.
- """
-
- def escape_backslashes(match: Match[str]) -> str:
- """Called by re.sub replace matches."""
- backslashes, text = match.groups()
- return f"{backslashes}{backslashes}\\{text}"
-
- markup = _escape(escape_backslashes, markup)
- return markup
-
-
-def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:
- """Parse markup in to an iterable of tuples of (position, text, tag).
-
- Args:
- markup (str): A string containing console markup
-
- """
- position = 0
- _divmod = divmod
- _Tag = Tag
- for match in RE_TAGS.finditer(markup):
- full_text, escapes, tag_text = match.groups()
- start, end = match.span()
- if start > position:
- yield start, markup[position:start], None
- if escapes:
- backslashes, escaped = _divmod(len(escapes), 2)
- if backslashes:
- # Literal backslashes
- yield start, "\\" * backslashes, None
- start += backslashes * 2
- if escaped:
- # Escape of tag
- yield start, full_text[len(escapes) :], None
- position = end
- continue
- text, equals, parameters = tag_text.partition("=")
- yield start, None, _Tag(text, parameters if equals else None)
- position = end
- if position < len(markup):
- yield position, markup[position:], None
-
-
-def render(
- markup: str,
- style: Union[str, Style] = "",
- emoji: bool = True,
- emoji_variant: Optional[EmojiVariant] = None,
-) -> Text:
- """Render console markup in to a Text instance.
-
- Args:
- markup (str): A string containing console markup.
- emoji (bool, optional): Also render emoji code. Defaults to True.
-
- Raises:
- MarkupError: If there is a syntax error in the markup.
-
- Returns:
- Text: A test instance.
- """
- emoji_replace = _emoji_replace
- if "[" not in markup:
- return Text(
- emoji_replace(markup, default_variant=emoji_variant) if emoji else markup,
- style=style,
- )
- text = Text(style=style)
- append = text.append
- normalize = Style.normalize
-
- style_stack: List[Tuple[int, Tag]] = []
- pop = style_stack.pop
-
- spans: List[Span] = []
- append_span = spans.append
-
- _Span = Span
- _Tag = Tag
-
- def pop_style(style_name: str) -> Tuple[int, Tag]:
- """Pop tag matching given style name."""
- for index, (_, tag) in enumerate(reversed(style_stack), 1):
- if tag.name == style_name:
- return pop(-index)
- raise KeyError(style_name)
-
- for position, plain_text, tag in _parse(markup):
- if plain_text is not None:
- # Handle open brace escapes, where the brace is not part of a tag.
- plain_text = plain_text.replace("\\[", "[")
- append(emoji_replace(plain_text) if emoji else plain_text)
- elif tag is not None:
- if tag.name.startswith("/"): # Closing tag
- style_name = tag.name[1:].strip()
-
- if style_name: # explicit close
- style_name = normalize(style_name)
- try:
- start, open_tag = pop_style(style_name)
- except KeyError:
- raise MarkupError(
- f"closing tag '{tag.markup}' at position {position} doesn't match any open tag"
- ) from None
- else: # implicit close
- try:
- start, open_tag = pop()
- except IndexError:
- raise MarkupError(
- f"closing tag '[/]' at position {position} has nothing to close"
- ) from None
-
- if open_tag.name.startswith("@"):
- if open_tag.parameters:
- handler_name = ""
- parameters = open_tag.parameters.strip()
- handler_match = RE_HANDLER.match(parameters)
- if handler_match is not None:
- handler_name, match_parameters = handler_match.groups()
- parameters = (
- "()" if match_parameters is None else match_parameters
- )
-
- try:
- meta_params = literal_eval(parameters)
- except SyntaxError as error:
- raise MarkupError(
- f"error parsing {parameters!r} in {open_tag.parameters!r}; {error.msg}"
- )
- except Exception as error:
- raise MarkupError(
- f"error parsing {open_tag.parameters!r}; {error}"
- ) from None
-
- if handler_name:
- meta_params = (
- handler_name,
- meta_params
- if isinstance(meta_params, tuple)
- else (meta_params,),
- )
-
- else:
- meta_params = ()
-
- append_span(
- _Span(
- start, len(text), Style(meta={open_tag.name: meta_params})
- )
- )
- else:
- append_span(_Span(start, len(text), str(open_tag)))
-
- else: # Opening tag
- normalized_tag = _Tag(normalize(tag.name), tag.parameters)
- style_stack.append((len(text), normalized_tag))
-
- text_length = len(text)
- while style_stack:
- start, tag = style_stack.pop()
- style = str(tag)
- if style:
- append_span(_Span(start, text_length, style))
-
- text.spans = sorted(spans[::-1], key=attrgetter("start"))
- return text
-
-
-if __name__ == "__main__": # pragma: no cover
-
- MARKUP = [
- "[red]Hello World[/red]",
- "[magenta]Hello [b]World[/b]",
- "[bold]Bold[italic] bold and italic [/bold]italic[/italic]",
- "Click [link=https://www.willmcgugan.com]here[/link] to visit my Blog",
- ":warning-emoji: [bold red blink] DANGER![/]",
- ]
-
- from pip._vendor.rich import print
- from pip._vendor.rich.table import Table
-
- grid = Table("Markup", "Result", padding=(0, 1))
-
- for markup in MARKUP:
- grid.add_row(Text(markup), markup)
-
- print(grid)
diff --git a/spaces/Audio-AGI/WavJourney/code_generator.py b/spaces/Audio-AGI/WavJourney/code_generator.py
deleted file mode 100644
index 8a2192251d9df420a96430e602d5d115fac79047..0000000000000000000000000000000000000000
--- a/spaces/Audio-AGI/WavJourney/code_generator.py
+++ /dev/null
@@ -1,188 +0,0 @@
-import os
-import json5
-import utils
-
-
-def check_json_script(data):
- foreground_mandatory_attrs_map = {
- 'music': ['vol', 'len', 'desc'],
- 'sound_effect': ['vol', 'len', 'desc'],
- 'speech': ['vol', 'text']
- }
- background_mandatory_attrs_map = {
- 'music': ['vol', 'desc'],
- 'sound_effect': ['vol', 'desc'],
- }
-
- def check_by_audio_type(audio, mandatory_attrs_map, audio_str):
- if audio['audio_type'] not in mandatory_attrs_map:
- raise ValueError('audio_type is not allowed in this layout, audio={audio_str}')
- for attr_name in mandatory_attrs_map[audio['audio_type']]:
- if attr_name not in audio:
- raise ValueError(f'{attr_name} does not exist, audio={audio_str}')
-
- # Check json's format
- for audio in data:
- audio_str = json5.dumps(audio, indent=None)
- if 'layout' not in audio:
- raise ValueError(f'layout missing, audio={audio_str}')
- elif 'audio_type' not in audio:
- raise ValueError(f'audio_type missing, audio={audio_str}')
- elif audio['layout'] == 'foreground':
- check_by_audio_type(audio, foreground_mandatory_attrs_map, audio_str)
- elif audio['layout'] == 'background':
- if 'id' not in audio:
- raise ValueError(f'id not in background audio, audio={audio_str}')
- if 'action' not in audio:
- raise ValueError(f'action not in background audio, audio={audio_str}')
- if audio['action'] == 'begin':
- check_by_audio_type(audio, background_mandatory_attrs_map, audio_str)
- else:
- if audio['action'] != 'end':
- raise ValueError(f'Unknown action, audio={audio_str}')
- else:
- raise ValueError(f'Unknown layout, audio={audio_str}')
- #except Exception as err:
- # sys.stderr.write(f'PARSING ERROR: {err}, audio={json5.dumps(audio, indent=None)}\n')
- # all_clear = False
-
-
-def collect_and_check_audio_data(data):
- fg_audio_id = 0
- fg_audios = []
- bg_audios = []
- # Collect all the foreground and background audio ids used to calculate background audio length later
- for audio in data:
- if audio['layout'] == 'foreground':
- audio['id'] = fg_audio_id
- fg_audios.append(audio)
- fg_audio_id += 1
- else: # background
- if audio['action'] == 'begin':
- audio['begin_fg_audio_id'] = fg_audio_id
- bg_audios.append(audio)
- else: # ends
- # find the backgound with the id, and update its 'end_fg_audio_id'
- for bg_audio in bg_audios:
- if bg_audio['id'] == audio['id'] and bg_audio['audio_type'] == audio['audio_type']:
- bg_audio['end_fg_audio_id'] = fg_audio_id
- break
-
- # check if all background audios are valid
- for bg_audio in bg_audios:
- if 'begin_fg_audio_id' not in bg_audio:
- raise ValueError(f'begin of background missing, audio={bg_audio}')
- elif 'end_fg_audio_id' not in bg_audio:
- raise ValueError(f'end of background missing, audio={bg_audio}')
-
- if bg_audio['begin_fg_audio_id'] > bg_audio['end_fg_audio_id']:
- raise ValueError(f'background audio ends before start, audio={bg_audio}')
- elif bg_audio['begin_fg_audio_id'] == bg_audio['end_fg_audio_id']:
- raise ValueError(f'background audio contains no foreground audio, audio={bg_audio}')
- #except Exception as err:
- # sys.stderr.write(f'ALIGNMENT ERROR: {err}, audio={bg_audio}\n')
- # return None, None
-
- return fg_audios, bg_audios
-
-
-class AudioCodeGenerator:
- def __init__(self):
- self.wav_counters = {
- 'bg_sound_effect': 0,
- 'bg_music': 0,
- 'idle': 0,
- 'fg_sound_effect': 0,
- 'fg_music': 0,
- 'fg_speech': 0,
- }
- self.code = ''
-
- def append_code(self, content):
- self.code = f'{self.code}{content}\n'
-
- def generate_code(self, fg_audios, bg_audios, output_path, result_filename):
- def get_wav_name(audio):
- audio_type = audio['audio_type']
- layout = 'fg' if audio['layout'] == 'foreground' else 'bg'
- wav_type = f'{layout}_{audio_type}' if layout else audio_type
- desc = audio['text'] if 'text' in audio else audio['desc']
- desc = utils.text_to_abbrev_prompt(desc)
- wav_filename = f'{wav_type}_{self.wav_counters[wav_type]}_{desc}.wav'
- self.wav_counters[wav_type] += 1
- return wav_filename
-
- header = f'''
-import os
-import sys
-import datetime
-
-from APIs import TTM, TTS, TTA, MIX, CAT, COMPUTE_LEN
-
-
-fg_audio_lens = []
-wav_path = \"{output_path.absolute()}/audio\"
-os.makedirs(wav_path, exist_ok=True)
-
-'''
- self.append_code(header)
-
- fg_audio_wavs = []
- for fg_audio in fg_audios:
- wav_name = get_wav_name(fg_audio)
- if fg_audio['audio_type'] == 'sound_effect':
- self.append_code(f'TTA(text=\"{fg_audio["desc"]}\", length={fg_audio["len"]}, volume={fg_audio["vol"]}, out_wav=os.path.join(wav_path, \"{wav_name}\"))')
- elif fg_audio['audio_type'] == 'music':
- self.append_code(f'TTM(text=\"{fg_audio["desc"]}\", length={fg_audio["len"]}, volume={fg_audio["vol"]}, out_wav=os.path.join(wav_path, \"{wav_name}\"))')
- elif fg_audio['audio_type'] == 'speech':
- npz_path = self.char_to_voice_map[fg_audio["character"]]["npz_path"]
- npz_full_path = os.path.abspath(npz_path) if os.path.exists(npz_path) else npz_path
- self.append_code(f'TTS(text=\"{fg_audio["text"]}\", speaker_id=\"{self.char_to_voice_map[fg_audio["character"]]["id"]}\", volume={fg_audio["vol"]}, out_wav=os.path.join(wav_path, \"{wav_name}\"), speaker_npz=\"{npz_full_path}\")')
- fg_audio_wavs.append(wav_name)
- self.append_code(f'fg_audio_lens.append(COMPUTE_LEN(os.path.join(wav_path, \"{wav_name}\")))\n')
-
- # cat all foreground audio together
- self.append_code(f'fg_audio_wavs = []')
- for wav_filename in fg_audio_wavs:
- self.append_code(f'fg_audio_wavs.append(os.path.join(wav_path, \"{wav_filename}\"))')
- self.append_code(f'CAT(wavs=fg_audio_wavs, out_wav=os.path.join(wav_path, \"foreground.wav\"))')
-
- bg_audio_wavs = []
- self.append_code(f'\nbg_audio_offsets = []')
- for bg_audio in bg_audios:
- wav_name = get_wav_name(bg_audio)
- self.append_code(f'bg_audio_len = sum(fg_audio_lens[{bg_audio["begin_fg_audio_id"]}:{bg_audio["end_fg_audio_id"]}])')
- self.append_code(f'bg_audio_offset = sum(fg_audio_lens[:{bg_audio["begin_fg_audio_id"]}])')
- if bg_audio['audio_type'] == 'sound_effect':
- self.append_code(f'TTA(text=\"{bg_audio["desc"]}\", volume={bg_audio["vol"]}, length=bg_audio_len, out_wav=os.path.join(wav_path, \"{wav_name}\"))')
- elif bg_audio['audio_type'] == 'music':
- self.append_code(f'TTM(text=\"{bg_audio["desc"]}\", volume={bg_audio["vol"]}, length=bg_audio_len, out_wav=os.path.join(wav_path, \"{wav_name}\"))')
- else:
- raise ValueError()
- bg_audio_wavs.append(wav_name)
- self.append_code(f'bg_audio_offsets.append(bg_audio_offset)\n')
- self.append_code(f'bg_audio_wavs = []')
- for wav_filename in bg_audio_wavs:
- self.append_code(f'bg_audio_wavs.append(os.path.join(wav_path, \"{wav_filename}\"))')
-
- self.append_code(f'bg_audio_wav_offset_pairs = list(zip(bg_audio_wavs, bg_audio_offsets))')
- self.append_code(f'bg_audio_wav_offset_pairs.append((os.path.join(wav_path, \"foreground.wav\"), 0))')
- self.append_code(f'MIX(wavs=bg_audio_wav_offset_pairs, out_wav=os.path.join(wav_path, \"{result_filename}.wav\"))')
-
-
- def init_char_to_voice_map(self, filename):
- with open(filename, 'r') as file:
- self.char_to_voice_map = json5.load(file)
-
-
- def parse_and_generate(self, script_filename, char_to_voice_map_filename, output_path, result_filename='result'):
- self.code = ''
- self.init_char_to_voice_map(char_to_voice_map_filename)
-
- with open(script_filename, 'r') as file:
- data = json5.load(file)
-
- check_json_script(data)
- fg_audios, bg_audios = collect_and_check_audio_data(data)
- self.generate_code(fg_audios, bg_audios, output_path, result_filename)
- return self.code
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/catalog.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/catalog.py
deleted file mode 100644
index 45c110c19508f23921b9033cdaf0aa8056f0c125..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/catalog.py
+++ /dev/null
@@ -1,236 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import copy
-import logging
-import types
-from collections import UserDict
-from typing import List
-
-from detectron2.utils.logger import log_first_n
-
-__all__ = ["DatasetCatalog", "MetadataCatalog", "Metadata"]
-
-
-class _DatasetCatalog(UserDict):
- """
- A global dictionary that stores information about the datasets and how to obtain them.
-
- It contains a mapping from strings
- (which are names that identify a dataset, e.g. "coco_2014_train")
- to a function which parses the dataset and returns the samples in the
- format of `list[dict]`.
-
- The returned dicts should be in Detectron2 Dataset format (See DATASETS.md for details)
- if used with the data loader functionalities in `data/build.py,data/detection_transform.py`.
-
- The purpose of having this catalog is to make it easy to choose
- different datasets, by just using the strings in the config.
- """
-
- def register(self, name, func):
- """
- Args:
- name (str): the name that identifies a dataset, e.g. "coco_2014_train".
- func (callable): a callable which takes no arguments and returns a list of dicts.
- It must return the same results if called multiple times.
- """
- assert callable(func), "You must register a function with `DatasetCatalog.register`!"
- assert name not in self, "Dataset '{}' is already registered!".format(name)
- self[name] = func
-
- def get(self, name):
- """
- Call the registered function and return its results.
-
- Args:
- name (str): the name that identifies a dataset, e.g. "coco_2014_train".
-
- Returns:
- list[dict]: dataset annotations.
- """
- try:
- f = self[name]
- except KeyError as e:
- raise KeyError(
- "Dataset '{}' is not registered! Available datasets are: {}".format(
- name, ", ".join(list(self.keys()))
- )
- ) from e
- return f()
-
- def list(self) -> List[str]:
- """
- List all registered datasets.
-
- Returns:
- list[str]
- """
- return list(self.keys())
-
- def remove(self, name):
- """
- Alias of ``pop``.
- """
- self.pop(name)
-
- def __str__(self):
- return "DatasetCatalog(registered datasets: {})".format(", ".join(self.keys()))
-
- __repr__ = __str__
-
-
-DatasetCatalog = _DatasetCatalog()
-DatasetCatalog.__doc__ = (
- _DatasetCatalog.__doc__
- + """
- .. automethod:: detectron2.data.catalog.DatasetCatalog.register
- .. automethod:: detectron2.data.catalog.DatasetCatalog.get
-"""
-)
-
-
-class Metadata(types.SimpleNamespace):
- """
- A class that supports simple attribute setter/getter.
- It is intended for storing metadata of a dataset and make it accessible globally.
-
- Examples:
- ::
- # somewhere when you load the data:
- MetadataCatalog.get("mydataset").thing_classes = ["person", "dog"]
-
- # somewhere when you print statistics or visualize:
- classes = MetadataCatalog.get("mydataset").thing_classes
- """
-
- # the name of the dataset
- # set default to N/A so that `self.name` in the errors will not trigger getattr again
- name: str = "N/A"
-
- _RENAMED = {
- "class_names": "thing_classes",
- "dataset_id_to_contiguous_id": "thing_dataset_id_to_contiguous_id",
- "stuff_class_names": "stuff_classes",
- }
-
- def __getattr__(self, key):
- if key in self._RENAMED:
- log_first_n(
- logging.WARNING,
- "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]),
- n=10,
- )
- return getattr(self, self._RENAMED[key])
-
- # "name" exists in every metadata
- if len(self.__dict__) > 1:
- raise AttributeError(
- "Attribute '{}' does not exist in the metadata of dataset '{}'. Available "
- "keys are {}.".format(key, self.name, str(self.__dict__.keys()))
- )
- else:
- raise AttributeError(
- f"Attribute '{key}' does not exist in the metadata of dataset '{self.name}': "
- "metadata is empty."
- )
-
- def __setattr__(self, key, val):
- if key in self._RENAMED:
- log_first_n(
- logging.WARNING,
- "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]),
- n=10,
- )
- setattr(self, self._RENAMED[key], val)
-
- # Ensure that metadata of the same name stays consistent
- try:
- oldval = getattr(self, key)
- assert oldval == val, (
- "Attribute '{}' in the metadata of '{}' cannot be set "
- "to a different value!\n{} != {}".format(key, self.name, oldval, val)
- )
- except AttributeError:
- super().__setattr__(key, val)
-
- def as_dict(self):
- """
- Returns all the metadata as a dict.
- Note that modifications to the returned dict will not reflect on the Metadata object.
- """
- return copy.copy(self.__dict__)
-
- def set(self, **kwargs):
- """
- Set multiple metadata with kwargs.
- """
- for k, v in kwargs.items():
- setattr(self, k, v)
- return self
-
- def get(self, key, default=None):
- """
- Access an attribute and return its value if exists.
- Otherwise return default.
- """
- try:
- return getattr(self, key)
- except AttributeError:
- return default
-
-
-class _MetadataCatalog(UserDict):
- """
- MetadataCatalog is a global dictionary that provides access to
- :class:`Metadata` of a given dataset.
-
- The metadata associated with a certain name is a singleton: once created, the
- metadata will stay alive and will be returned by future calls to ``get(name)``.
-
- It's like global variables, so don't abuse it.
- It's meant for storing knowledge that's constant and shared across the execution
- of the program, e.g.: the class names in COCO.
- """
-
- def get(self, name):
- """
- Args:
- name (str): name of a dataset (e.g. coco_2014_train).
-
- Returns:
- Metadata: The :class:`Metadata` instance associated with this name,
- or create an empty one if none is available.
- """
- assert len(name)
- r = super().get(name, None)
- if r is None:
- r = self[name] = Metadata(name=name)
- return r
-
- def list(self):
- """
- List all registered metadata.
-
- Returns:
- list[str]: keys (names of datasets) of all registered metadata
- """
- return list(self.keys())
-
- def remove(self, name):
- """
- Alias of ``pop``.
- """
- self.pop(name)
-
- def __str__(self):
- return "MetadataCatalog(registered metadata: {})".format(", ".join(self.keys()))
-
- __repr__ = __str__
-
-
-MetadataCatalog = _MetadataCatalog()
-MetadataCatalog.__doc__ = (
- _MetadataCatalog.__doc__
- + """
- .. automethod:: detectron2.data.catalog.MetadataCatalog.get
-"""
-)
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/README.md b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/README.md
deleted file mode 100644
index 9fb3e4f7afec17137c95c78be6ef06d520ec8032..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-### Common Datasets
-
-The dataset implemented here do not need to load the data into the final format.
-It should provide the minimal data structure needed to use the dataset, so it can be very efficient.
-
-For example, for an image dataset, just provide the file names and labels, but don't read the images.
-Let the downstream decide how to read.
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/__init__.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/__init__.py
deleted file mode 100644
index 9020c2df23e2af280b7bb168b996ae9eaf312eb8..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
diff --git a/spaces/AzumaSeren100/XuanShen-Bert-VITS2/server.py b/spaces/AzumaSeren100/XuanShen-Bert-VITS2/server.py
deleted file mode 100644
index 00581d353712a0612ede918acdbf5041e42bf7d2..0000000000000000000000000000000000000000
--- a/spaces/AzumaSeren100/XuanShen-Bert-VITS2/server.py
+++ /dev/null
@@ -1,124 +0,0 @@
-from flask import Flask, request, Response
-from io import BytesIO
-import torch
-from av import open as avopen
-
-import commons
-import utils
-from models import SynthesizerTrn
-from text.symbols import symbols
-from text import cleaned_text_to_sequence, get_bert
-from text.cleaner import clean_text
-from scipy.io import wavfile
-
-# Flask Init
-app = Flask(__name__)
-app.config['JSON_AS_ASCII'] = False
-def get_text(text, language_str, hps):
- norm_text, phone, tone, word2ph = clean_text(text, language_str)
- print([f"{p}{t}" for p, t in zip(phone, tone)])
- phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
-
- if hps.data.add_blank:
- phone = commons.intersperse(phone, 0)
- tone = commons.intersperse(tone, 0)
- language = commons.intersperse(language, 0)
- for i in range(len(word2ph)):
- word2ph[i] = word2ph[i] * 2
- word2ph[0] += 1
- bert = get_bert(norm_text, word2ph, language_str)
-
- assert bert.shape[-1] == len(phone)
-
- phone = torch.LongTensor(phone)
- tone = torch.LongTensor(tone)
- language = torch.LongTensor(language)
-
- return bert, phone, tone, language
-
-def infer(text, sdp_ratio, noise_scale, noise_scale_w,length_scale,sid):
- bert, phones, tones, lang_ids = get_text(text,"ZH", hps,)
- with torch.no_grad():
- x_tst=phones.to(dev).unsqueeze(0)
- tones=tones.to(dev).unsqueeze(0)
- lang_ids=lang_ids.to(dev).unsqueeze(0)
- bert = bert.to(dev).unsqueeze(0)
- x_tst_lengths = torch.LongTensor([phones.size(0)]).to(dev)
- speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(dev)
- audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids,bert, sdp_ratio=sdp_ratio
- , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy()
- return audio
-
-def replace_punctuation(text, i=2):
- punctuation = ",。?!"
- for char in punctuation:
- text = text.replace(char, char * i)
- return text
-
-def wav2(i, o, format):
- inp = avopen(i, 'rb')
- out = avopen(o, 'wb', format=format)
- if format == "ogg": format = "libvorbis"
-
- ostream = out.add_stream(format)
-
- for frame in inp.decode(audio=0):
- for p in ostream.encode(frame): out.mux(p)
-
- for p in ostream.encode(None): out.mux(p)
-
- out.close()
- inp.close()
-
-# Load Generator
-hps = utils.get_hparams_from_file("./configs/config.json")
-
-dev='cuda'
-net_g = SynthesizerTrn(
- len(symbols),
- hps.data.filter_length // 2 + 1,
- hps.train.segment_size // hps.data.hop_length,
- n_speakers=hps.data.n_speakers,
- **hps.model).to(dev)
-_ = net_g.eval()
-
-# _ = utils.load_checkpoint("logs/G_649000.pth", net_g, None,skip_optimizer=True)
-_ = utils.load_checkpoint("logs/dxl/G_21000.pth", net_g, None,skip_optimizer=True)
-
-@app.route("/",methods=['GET','POST'])
-def main():
- if request.method == 'GET':
- try:
- speaker = request.args.get('speaker')
- text = request.args.get('text').replace("/n","")
- sdp_ratio = float(request.args.get("sdp_ratio", 0.2))
- noise = float(request.args.get("noise", 0.5))
- noisew = float(request.args.get("noisew", 0.6))
- length = float(request.args.get("length", 1.2))
- if length >= 2:
- return "Too big length"
- if len(text) >=200:
- return "Too long text"
- fmt = request.args.get("format", "wav")
- if None in (speaker, text):
- return "Missing Parameter"
- if fmt not in ("mp3", "wav", "ogg"):
- return "Invalid Format"
- except:
- return "Invalid Parameter"
-
- with torch.no_grad():
- audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise, noise_scale_w=noisew, length_scale=length, sid=speaker)
-
- with BytesIO() as wav:
- wavfile.write(wav, hps.data.sampling_rate, audio)
- torch.cuda.empty_cache()
- if fmt == "wav":
- return Response(wav.getvalue(), mimetype="audio/wav")
- wav.seek(0, 0)
- with BytesIO() as ofp:
- wav2(wav, ofp, fmt)
- return Response(
- ofp.getvalue(),
- mimetype="audio/mpeg" if fmt == "mp3" else "audio/ogg"
- )
diff --git a/spaces/Benson/text-generation/Examples/Descargar Gratis Gta 5 Mvil Apk Para Android.md b/spaces/Benson/text-generation/Examples/Descargar Gratis Gta 5 Mvil Apk Para Android.md
deleted file mode 100644
index 124cdf8580326c37242ee9125e52652aa358f93a..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Gratis Gta 5 Mvil Apk Para Android.md
+++ /dev/null
@@ -1,42 +0,0 @@
-
-Metro surfistas ilimitado Hack 2023 APK Descargar: Todo lo que necesita saber
-Si eres un fan de los juegos de corredor sin fin, probablemente hayas oído hablar de Subway Surfers. Este popular juego le permite navegar por el metro con su equipo fresco mientras esquiva los trenes, obstáculos, y el inspector gruñón. ¿Pero qué pasa si quieres disfrutar del juego sin preocuparte por monedas, llaves, potenciadores, personajes y tableros? Ahí es donde Subway Surfers ilimitada hack 2023 apk descarga es muy útil. En este artículo, le diremos todo lo que necesita saber acerca de este hack apk, incluyendo sus características, beneficios, riesgos, y cómo usarlo. También te daremos algunos consejos y trucos para jugar a Subway Surfers y echar un vistazo a la última actualización del juego en 2023. ¡Así que comencemos!
- ¿Qué es Subway Surfers?
-Subway Surfers es un clásico juego de corredor sin fin que fue creado por SYBO Games en 2012. El juego ha sido descargado más de 1 mil millones de veces en Google Play Store solo y ha ganado muchos premios y reconocimientos. El juego cuenta con Jake, Tricky, Fresh y otros personajes que navegan por el metro alrededor del mundo mientras escapan del inspector y su perro. El juego tiene gráficos HD coloridos y vívidos, hoverboard surf, jetpacks de pintura, acrobacias de golpe rápido relámpago, y más. El juego también tiene un modo World Tour donde puedes explorar diferentes ciudades cada mes y recoger recompensas especiales.
-descargar gratis gta 5 móvil apk para android
DOWNLOAD • https://bltlly.com/2v6JEd
- ¿Por qué Hack Subway Surfers?
-
-Hackear Subway Surfers no es tan fácil como parece. Hay muchos riesgos y desafíos involucrados en hacerlo. Por un lado, hackear Subway Surfers puede exponer tu dispositivo a virus, malware u otro software dañino que puede dañar tus datos o comprometer tu seguridad. Por otra parte, hackear Subway Surfers puede hacer que te prohíban participar en el juego o incluso enfrentar consecuencias legales si violas los términos y condiciones del juego. Por lo tanto, debe ser cuidadoso y cauteloso al hackear Subway Surfers y solo utilizar fuentes confiables y confiables para descargar archivos apk hack.
- Los mejores surfistas de metro Hack APK para 2023
-Si usted está buscando el mejor metro surfistas hack apk para 2023, usted debe comprobar el metro surfistas ilimitada Hack 2023 APK Descargar. Esta es una versión modificada del juego original que ofrece monedas ilimitadas, llaves, potenciadores, personajes y tableros. Puede utilizar este hack apk para disfrutar del juego sin limitaciones o restricciones. Puedes comprar lo que quieras, mejorar tus habilidades, desbloquear nuevo contenido, completar misiones y aumentar tu puntuación. También puedes navegar por el metro con cualquier personaje o tabla que te guste y divertirte con el juego.
- Características de los surfistas de metro Hack APK
-
- Cómo descargar e instalar el metro surfistas Hack APK
-Si desea descargar e instalar el Subway Surfers Hack APK en su dispositivo, es necesario seguir estos sencillos pasos: - Paso 1: Ir a [este enlace] y descargar el archivo Subway Surfers Hack APK en su dispositivo. - Paso 2: Ir a la configuración del dispositivo y habilitar fuentes desconocidas. Esto le permitirá instalar aplicaciones de fuentes distintas de Google Play Store. - Paso 3: Ir a su administrador de archivos y localizar el archivo descargado Subway Surfers Hack APK. Toque en él e instalarlo en su dispositivo. - Paso 4: Espere a que el proceso de instalación termine y luego inicie el juego desde el cajón de la aplicación. - Paso 5: ¡Disfruta jugando Subway Surfers con recursos ilimitados!
- Cómo utilizar el metro surfistas Hack APK
-Usando el metro Surfers Hack APK es muy fácil y sencillo. Solo tienes que seguir estos sencillos pasos: - Paso 1: Iniciar el juego desde el cajón de la aplicación y esperar a que se cargue. - Paso 2: Toque en el icono de la tienda en la esquina superior derecha de la pantalla y comprar cualquier cosa que desee con monedas y llaves ilimitadas. - Paso 3: Toque en el icono del carácter en la esquina inferior izquierda de la pantalla y seleccione cualquier personaje que desee con monedas y teclas ilimitadas. - Paso 4: Toque en el icono del tablero en la esquina inferior derecha de la pantalla y seleccione cualquier tablero que desee con monedas y teclas ilimitadas. - Paso 5: Toca el botón de reproducción en el centro inferior de la pantalla y comienza a navegar por el metro con potenciadores ilimitados.
- Consejos y trucos para surfistas de metro
-
-utilizar diferentes potenciadores para maximizar sus efectos y beneficios. Por ejemplo, debe usar jetpacks cuando no hay obstáculos por encima de la cabeza, imanes cuando hay muchas monedas, multiplicadores de puntuación cuando tiene una larga carrera, hoverboards cuando está en peligro de estrellarse, y así sucesivamente. También debe combinar diferentes potenciadores para crear combos potentes que pueden aumentar su puntuación y rendimiento. - Mejora tus habilidades: Una de las cosas que puedes hacer con tus monedas es mejorar tus habilidades. Estas son habilidades que pueden mejorar tu juego y hacerte un mejor surfista. Hay cuatro habilidades que puedes mejorar: jetpack, imán, multiplicador 2x y súper zapatillas. Cada habilidad tiene cinco niveles que aumentan su duración y efectividad. Deberías mejorar tus habilidades regularmente para disfrutar de sus beneficios y ventajas.
- Actualización de Subway Surfers 2023
-Subway Surfers es un juego que se actualiza constantemente con nuevas características y contenido. La última actualización de Subway Surfers es la actualización de 2023 que presenta una nueva ciudad, personaje, atuendo y tabla. La actualización de 2023 te lleva a Tokio, Japón, donde puedes navegar por el metro con Harumi, una linda chica japonesa que ama el anime y el cosplay. También puedes desbloquear su traje Meow y su Kitty Board que tiene un efecto de rastro especial.
-
- ¿Qué hay de nuevo en la actualización de Subway Surfers 2023?
-
- ¿Cómo descargar e instalar la actualización de Subway Surfers 2023?
-Si desea descargar e instalar la actualización de Subway Surfers 2023 en su dispositivo, debe seguir estos sencillos pasos: - Paso 1: Vaya a Google Play Store y busque Subway Surfers. Si ya tiene el juego instalado, verá un botón de actualización. Toque en él y espere a que la actualización se descargue e instale. Si no tiene el juego instalado, verá un botón de instalación. Toque en él y espere a que el juego se descargue e instale. - Paso 2: Una vez que el juego se ha actualizado o instalado, lanzarlo desde el cajón de la aplicación y esperar a que se cargue. - Paso 3: ¡Disfruta jugando a Subway Surfers con la nueva actualización de 2023!
- Conclusión
-Subway Surfers es un divertido y adictivo juego de corredor sin fin que te permite navegar por el metro con tu equipo mientras esquivas trenes, obstáculos y el inspector gruñón. Pero si desea disfrutar del juego sin limitaciones o restricciones, puede utilizar Subway Surfers ilimitada hack 2023 apk descargar. Este hack apk le da monedas ilimitadas, llaves, potenciadores, personajes y tableros que puede utilizar para comprar lo que quieras, mejorar sus habilidades, desbloquear nuevo contenido, misiones completas, y aumentar su puntuación. También se puede utilizar este hack apk para jugar la última 2023 actualización de Subway Surfers que cuenta con una nueva ciudad, personaje, equipo y tablero. Sin embargo, también debes ser consciente de los riesgos y desafíos de hackear Subway Surfers, como virus, malware, prohibiciones y problemas legales. Por lo tanto, debe ser cuidadoso y cauteloso al hackear Subway Surfers y solo utilizar fuentes confiables y confiables para descargar archivos apk hack. Esperamos que este artículo le ha ayudado a aprender todo lo que necesita saber sobre Subway Surfers ilimitada hack 2023 apk descargar. Ahora seguir adelante y navegar por el metro con recursos ilimitados!
- Preguntas frecuentes
-Aquí hay algunas preguntas frecuentes y respuestas sobre Subway Surfers ilimitada hack 2023 apk download:
-
-A: Subway Surfers ilimitada hack 2023 apk descarga es seguro de usar, siempre y cuando se descarga de una fuente confiable y confiable. Sin embargo, también debe tener cuidado con los virus, malware u otro software dañino que puede dañar su dispositivo o comprometer su seguridad. También debe escanear el archivo apk hack con un programa antivirus o anti-malware antes de instalarlo en su dispositivo.
- Q: Es Subway Surfers ilimitada hack 2023 apk descarga legal de usar?
-A: Subway Surfers ilimitada hack 2023 apk descarga no es legal de usar, ya que viola los términos y condiciones del juego. Mediante el uso de este hack apk, que está modificando el juego original y acceder a sus recursos sin permiso o autorización. Esto puede hacer que te prohíban participar en el juego o incluso enfrentar consecuencias legales si te atrapan o te denuncian. Por lo tanto, usted debe utilizar este hack apk a su propio riesgo y discreción.
- P: ¿Cómo puedo obtener más monedas y llaves en Subway Surfers?
-A: Hay varias maneras de conseguir más monedas y llaves en Subway Surfers. Puedes recogerlas en las pistas o comprarlas con dinero real. También puedes completar misiones, logros, desafíos diarios, eventos o ver anuncios para ganar más monedas y llaves. Sin embargo, si desea obtener monedas y llaves ilimitadas en Subway Surfers, puede utilizar Subway Surfers ilimitada hack 2023 apk download.
- P: ¿Cómo puedo desbloquear todos los personajes y tablas en Subway Surfers?
-A: Hay varias maneras de desbloquear todos los personajes y tablas en Subway Surfers. Puedes comprarlos con monedas o llaves o encontrarlos en las pistas. También puedes desbloquearlos completando ciertas misiones, logros, eventos o colecciones. Sin embargo, si desea desbloquear todos los personajes y tablas en Subway Surfers, puede utilizar Subway Surfers ilimitada hack 2023 apk download.
- Q: ¿Cómo puedo actualizar Subway Surfers a la última versión?
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/BetterAPI/BetterChat_new/src/routes/conversation/[id]/+page.server.ts b/spaces/BetterAPI/BetterChat_new/src/routes/conversation/[id]/+page.server.ts
deleted file mode 100644
index 3aced2cc895525e5bad8acd59546897df945f137..0000000000000000000000000000000000000000
--- a/spaces/BetterAPI/BetterChat_new/src/routes/conversation/[id]/+page.server.ts
+++ /dev/null
@@ -1,33 +0,0 @@
-import type { PageServerLoad } from "./$types";
-import { collections } from "$lib/server/database";
-import { ObjectId } from "mongodb";
-import { error } from "@sveltejs/kit";
-
-export const load: PageServerLoad = async (event) => {
- // todo: add validation on params.id
- const conversation = await collections.conversations.findOne({
- _id: new ObjectId(event.params.id),
- sessionId: event.locals.sessionId,
- });
-
- if (!conversation) {
- const conversationExists =
- (await collections.conversations.countDocuments({
- _id: new ObjectId(event.params.id),
- })) !== 0;
-
- if (conversationExists) {
- throw error(
- 403,
- "You don't have access to this conversation. If someone gave you this link, ask them to use the 'share' feature instead."
- );
- }
-
- throw error(404, "Conversation not found.");
- }
-
- return {
- messages: conversation.messages,
- title: conversation.title,
- };
-};
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/retries/standard.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/retries/standard.py
deleted file mode 100644
index 1f73db0cf8b044b8b542610de4169aba71c462fa..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/retries/standard.py
+++ /dev/null
@@ -1,532 +0,0 @@
-"""Standard retry behavior.
-
-This contains the default standard retry behavior.
-It provides consistent behavior with other AWS SDKs.
-
-The key base classes uses for retries:
-
- * ``BaseRetryableChecker`` - Use to check a specific condition that
- indicates a retry should happen. This can include things like
- max attempts, HTTP status code checks, error code checks etc.
- * ``RetryBackoff`` - Use to determine how long we should backoff until
- we retry a request. This is the class that will implement delay such
- as exponential backoff.
- * ``RetryPolicy`` - Main class that determines if a retry should
- happen. It can combine data from a various BaseRetryableCheckers
- to make a final call as to whether or not a retry should happen.
- It then uses a ``BaseRetryBackoff`` to determine how long to delay.
- * ``RetryHandler`` - The bridge between botocore's event system
- used by endpoint.py to manage retries and the interfaces defined
- in this module.
-
-This allows us to define an API that has minimal coupling to the event
-based API used by botocore.
-
-"""
-import logging
-import random
-
-from botocore.exceptions import (
- ConnectionError,
- ConnectTimeoutError,
- HTTPClientError,
- ReadTimeoutError,
-)
-from botocore.retries import quota, special
-from botocore.retries.base import BaseRetryableChecker, BaseRetryBackoff
-
-DEFAULT_MAX_ATTEMPTS = 3
-logger = logging.getLogger(__name__)
-
-
-def register_retry_handler(client, max_attempts=DEFAULT_MAX_ATTEMPTS):
- retry_quota = RetryQuotaChecker(quota.RetryQuota())
-
- service_id = client.meta.service_model.service_id
- service_event_name = service_id.hyphenize()
- client.meta.events.register(
- f'after-call.{service_event_name}', retry_quota.release_retry_quota
- )
-
- handler = RetryHandler(
- retry_policy=RetryPolicy(
- retry_checker=StandardRetryConditions(max_attempts=max_attempts),
- retry_backoff=ExponentialBackoff(),
- ),
- retry_event_adapter=RetryEventAdapter(),
- retry_quota=retry_quota,
- )
-
- unique_id = 'retry-config-%s' % service_event_name
- client.meta.events.register(
- 'needs-retry.%s' % service_event_name,
- handler.needs_retry,
- unique_id=unique_id,
- )
- return handler
-
-
-class RetryHandler:
- """Bridge between botocore's event system and this module.
-
- This class is intended to be hooked to botocore's event system
- as an event handler.
- """
-
- def __init__(self, retry_policy, retry_event_adapter, retry_quota):
- self._retry_policy = retry_policy
- self._retry_event_adapter = retry_event_adapter
- self._retry_quota = retry_quota
-
- def needs_retry(self, **kwargs):
- """Connect as a handler to the needs-retry event."""
- retry_delay = None
- context = self._retry_event_adapter.create_retry_context(**kwargs)
- if self._retry_policy.should_retry(context):
- # Before we can retry we need to ensure we have sufficient
- # capacity in our retry quota.
- if self._retry_quota.acquire_retry_quota(context):
- retry_delay = self._retry_policy.compute_retry_delay(context)
- logger.debug(
- "Retry needed, retrying request after delay of: %s",
- retry_delay,
- )
- else:
- logger.debug(
- "Retry needed but retry quota reached, "
- "not retrying request."
- )
- else:
- logger.debug("Not retrying request.")
- self._retry_event_adapter.adapt_retry_response_from_context(context)
- return retry_delay
-
-
-class RetryEventAdapter:
- """Adapter to existing retry interface used in the endpoints layer.
-
- This existing interface for determining if a retry needs to happen
- is event based and used in ``botocore.endpoint``. The interface has
- grown organically over the years and could use some cleanup. This
- adapter converts that interface into the interface used by the
- new retry strategies.
-
- """
-
- def create_retry_context(self, **kwargs):
- """Create context based on needs-retry kwargs."""
- response = kwargs['response']
- if response is None:
- # If response is None it means that an exception was raised
- # because we never received a response from the service. This
- # could be something like a ConnectionError we get from our
- # http layer.
- http_response = None
- parsed_response = None
- else:
- http_response, parsed_response = response
- # This provides isolation between the kwargs emitted in the
- # needs-retry event, and what this module uses to check for
- # retries.
- context = RetryContext(
- attempt_number=kwargs['attempts'],
- operation_model=kwargs['operation'],
- http_response=http_response,
- parsed_response=parsed_response,
- caught_exception=kwargs['caught_exception'],
- request_context=kwargs['request_dict']['context'],
- )
- return context
-
- def adapt_retry_response_from_context(self, context):
- """Modify response back to user back from context."""
- # This will mutate attributes that are returned back to the end
- # user. We do it this way so that all the various retry classes
- # don't mutate any input parameters from the needs-retry event.
- metadata = context.get_retry_metadata()
- if context.parsed_response is not None:
- context.parsed_response.setdefault('ResponseMetadata', {}).update(
- metadata
- )
-
-
-# Implementation note: this is meant to encapsulate all the misc stuff
-# that gets sent in the needs-retry event. This is mapped so that params
-# are more clear and explicit.
-class RetryContext:
- """Normalize a response that we use to check if a retry should occur.
-
- This class smoothes over the different types of responses we may get
- from a service including:
-
- * A modeled error response from the service that contains a service
- code and error message.
- * A raw HTTP response that doesn't contain service protocol specific
- error keys.
- * An exception received while attempting to retrieve a response.
- This could be a ConnectionError we receive from our HTTP layer which
- could represent that we weren't able to receive a response from
- the service.
-
- This class guarantees that at least one of the above attributes will be
- non None.
-
- This class is meant to provide a read-only view into the properties
- associated with a possible retryable response. None of the properties
- are meant to be modified directly.
-
- """
-
- def __init__(
- self,
- attempt_number,
- operation_model=None,
- parsed_response=None,
- http_response=None,
- caught_exception=None,
- request_context=None,
- ):
- # 1-based attempt number.
- self.attempt_number = attempt_number
- self.operation_model = operation_model
- # This is the parsed response dictionary we get from parsing
- # the HTTP response from the service.
- self.parsed_response = parsed_response
- # This is an instance of botocore.awsrequest.AWSResponse.
- self.http_response = http_response
- # This is a subclass of Exception that will be non None if
- # an exception was raised when retrying to retrieve a response.
- self.caught_exception = caught_exception
- # This is the request context dictionary that's added to the
- # request dict. This is used to story any additional state
- # about the request. We use this for storing retry quota
- # capacity.
- if request_context is None:
- request_context = {}
- self.request_context = request_context
- self._retry_metadata = {}
-
- # These are misc helper methods to avoid duplication in the various
- # checkers.
- def get_error_code(self):
- """Check if there was a parsed response with an error code.
-
- If we could not find any error codes, ``None`` is returned.
-
- """
- if self.parsed_response is None:
- return
- error = self.parsed_response.get('Error', {})
- if not isinstance(error, dict):
- return
- return error.get('Code')
-
- def add_retry_metadata(self, **kwargs):
- """Add key/value pairs to the retry metadata.
-
- This allows any objects during the retry process to add
- metadata about any checks/validations that happened.
-
- This gets added to the response metadata in the retry handler.
-
- """
- self._retry_metadata.update(**kwargs)
-
- def get_retry_metadata(self):
- return self._retry_metadata.copy()
-
-
-class RetryPolicy:
- def __init__(self, retry_checker, retry_backoff):
- self._retry_checker = retry_checker
- self._retry_backoff = retry_backoff
-
- def should_retry(self, context):
- return self._retry_checker.is_retryable(context)
-
- def compute_retry_delay(self, context):
- return self._retry_backoff.delay_amount(context)
-
-
-class ExponentialBackoff(BaseRetryBackoff):
-
- _BASE = 2
- _MAX_BACKOFF = 20
-
- def __init__(self, max_backoff=20, random=random.random):
- self._base = self._BASE
- self._max_backoff = max_backoff
- self._random = random
-
- def delay_amount(self, context):
- """Calculates delay based on exponential backoff.
-
- This class implements truncated binary exponential backoff
- with jitter::
-
- t_i = min(rand(0, 1) * 2 ** attempt, MAX_BACKOFF)
-
- where ``i`` is the request attempt (0 based).
-
- """
- # The context.attempt_number is a 1-based value, but we have
- # to calculate the delay based on i based a 0-based value. We
- # want the first delay to just be ``rand(0, 1)``.
- return min(
- self._random() * (self._base ** (context.attempt_number - 1)),
- self._max_backoff,
- )
-
-
-class MaxAttemptsChecker(BaseRetryableChecker):
- def __init__(self, max_attempts):
- self._max_attempts = max_attempts
-
- def is_retryable(self, context):
- under_max_attempts = context.attempt_number < self._max_attempts
- retries_context = context.request_context.get('retries')
- if retries_context:
- retries_context['max'] = max(
- retries_context.get('max', 0), self._max_attempts
- )
- if not under_max_attempts:
- logger.debug("Max attempts of %s reached.", self._max_attempts)
- context.add_retry_metadata(MaxAttemptsReached=True)
- return under_max_attempts
-
-
-class TransientRetryableChecker(BaseRetryableChecker):
- _TRANSIENT_ERROR_CODES = [
- 'RequestTimeout',
- 'RequestTimeoutException',
- 'PriorRequestNotComplete',
- ]
- _TRANSIENT_STATUS_CODES = [500, 502, 503, 504]
- _TRANSIENT_EXCEPTION_CLS = (
- ConnectionError,
- HTTPClientError,
- )
-
- def __init__(
- self,
- transient_error_codes=None,
- transient_status_codes=None,
- transient_exception_cls=None,
- ):
- if transient_error_codes is None:
- transient_error_codes = self._TRANSIENT_ERROR_CODES[:]
- if transient_status_codes is None:
- transient_status_codes = self._TRANSIENT_STATUS_CODES[:]
- if transient_exception_cls is None:
- transient_exception_cls = self._TRANSIENT_EXCEPTION_CLS
- self._transient_error_codes = transient_error_codes
- self._transient_status_codes = transient_status_codes
- self._transient_exception_cls = transient_exception_cls
-
- def is_retryable(self, context):
- if context.get_error_code() in self._transient_error_codes:
- return True
- if context.http_response is not None:
- if (
- context.http_response.status_code
- in self._transient_status_codes
- ):
- return True
- if context.caught_exception is not None:
- return isinstance(
- context.caught_exception, self._transient_exception_cls
- )
- return False
-
-
-class ThrottledRetryableChecker(BaseRetryableChecker):
- # This is the union of all error codes we've seen that represent
- # a throttled error.
- _THROTTLED_ERROR_CODES = [
- 'Throttling',
- 'ThrottlingException',
- 'ThrottledException',
- 'RequestThrottledException',
- 'TooManyRequestsException',
- 'ProvisionedThroughputExceededException',
- 'TransactionInProgressException',
- 'RequestLimitExceeded',
- 'BandwidthLimitExceeded',
- 'LimitExceededException',
- 'RequestThrottled',
- 'SlowDown',
- 'PriorRequestNotComplete',
- 'EC2ThrottledException',
- ]
-
- def __init__(self, throttled_error_codes=None):
- if throttled_error_codes is None:
- throttled_error_codes = self._THROTTLED_ERROR_CODES[:]
- self._throttled_error_codes = throttled_error_codes
-
- def is_retryable(self, context):
- # Only the error code from a parsed service response is used
- # to determine if the response is a throttled response.
- return context.get_error_code() in self._throttled_error_codes
-
-
-class ModeledRetryableChecker(BaseRetryableChecker):
- """Check if an error has been modeled as retryable."""
-
- def __init__(self):
- self._error_detector = ModeledRetryErrorDetector()
-
- def is_retryable(self, context):
- error_code = context.get_error_code()
- if error_code is None:
- return False
- return self._error_detector.detect_error_type(context) is not None
-
-
-class ModeledRetryErrorDetector:
- """Checks whether or not an error is a modeled retryable error."""
-
- # There are return values from the detect_error_type() method.
- TRANSIENT_ERROR = 'TRANSIENT_ERROR'
- THROTTLING_ERROR = 'THROTTLING_ERROR'
- # This class is lower level than ModeledRetryableChecker, which
- # implements BaseRetryableChecker. This object allows you to distinguish
- # between the various types of retryable errors.
-
- def detect_error_type(self, context):
- """Detect the error type associated with an error code and model.
-
- This will either return:
-
- * ``self.TRANSIENT_ERROR`` - If the error is a transient error
- * ``self.THROTTLING_ERROR`` - If the error is a throttling error
- * ``None`` - If the error is neither type of error.
-
- """
- error_code = context.get_error_code()
- op_model = context.operation_model
- if op_model is None or not op_model.error_shapes:
- return
- for shape in op_model.error_shapes:
- if shape.metadata.get('retryable') is not None:
- # Check if this error code matches the shape. This can
- # be either by name or by a modeled error code.
- error_code_to_check = (
- shape.metadata.get('error', {}).get('code') or shape.name
- )
- if error_code == error_code_to_check:
- if shape.metadata['retryable'].get('throttling'):
- return self.THROTTLING_ERROR
- return self.TRANSIENT_ERROR
-
-
-class ThrottlingErrorDetector:
- def __init__(self, retry_event_adapter):
- self._modeled_error_detector = ModeledRetryErrorDetector()
- self._fixed_error_code_detector = ThrottledRetryableChecker()
- self._retry_event_adapter = retry_event_adapter
-
- # This expects the kwargs from needs-retry to be passed through.
- def is_throttling_error(self, **kwargs):
- context = self._retry_event_adapter.create_retry_context(**kwargs)
- if self._fixed_error_code_detector.is_retryable(context):
- return True
- error_type = self._modeled_error_detector.detect_error_type(context)
- return error_type == self._modeled_error_detector.THROTTLING_ERROR
-
-
-class StandardRetryConditions(BaseRetryableChecker):
- """Concrete class that implements the standard retry policy checks.
-
- Specifically:
-
- not max_attempts and (transient or throttled or modeled_retry)
-
- """
-
- def __init__(self, max_attempts=DEFAULT_MAX_ATTEMPTS):
- # Note: This class is for convenience so you can have the
- # standard retry condition in a single class.
- self._max_attempts_checker = MaxAttemptsChecker(max_attempts)
- self._additional_checkers = OrRetryChecker(
- [
- TransientRetryableChecker(),
- ThrottledRetryableChecker(),
- ModeledRetryableChecker(),
- OrRetryChecker(
- [
- special.RetryIDPCommunicationError(),
- special.RetryDDBChecksumError(),
- ]
- ),
- ]
- )
-
- def is_retryable(self, context):
- return self._max_attempts_checker.is_retryable(
- context
- ) and self._additional_checkers.is_retryable(context)
-
-
-class OrRetryChecker(BaseRetryableChecker):
- def __init__(self, checkers):
- self._checkers = checkers
-
- def is_retryable(self, context):
- return any(checker.is_retryable(context) for checker in self._checkers)
-
-
-class RetryQuotaChecker:
- _RETRY_COST = 5
- _NO_RETRY_INCREMENT = 1
- _TIMEOUT_RETRY_REQUEST = 10
- _TIMEOUT_EXCEPTIONS = (ConnectTimeoutError, ReadTimeoutError)
-
- # Implementation note: We're not making this a BaseRetryableChecker
- # because this isn't just a check if we can retry. This also changes
- # state so we have to careful when/how we call this. Making it
- # a BaseRetryableChecker implies you can call .is_retryable(context)
- # as many times as you want and not affect anything.
-
- def __init__(self, quota):
- self._quota = quota
- # This tracks the last amount
- self._last_amount_acquired = None
-
- def acquire_retry_quota(self, context):
- if self._is_timeout_error(context):
- capacity_amount = self._TIMEOUT_RETRY_REQUEST
- else:
- capacity_amount = self._RETRY_COST
- success = self._quota.acquire(capacity_amount)
- if success:
- # We add the capacity amount to the request context so we know
- # how much to release later. The capacity amount can vary based
- # on the error.
- context.request_context['retry_quota_capacity'] = capacity_amount
- return True
- context.add_retry_metadata(RetryQuotaReached=True)
- return False
-
- def _is_timeout_error(self, context):
- return isinstance(context.caught_exception, self._TIMEOUT_EXCEPTIONS)
-
- # This is intended to be hooked up to ``after-call``.
- def release_retry_quota(self, context, http_response, **kwargs):
- # There's three possible options.
- # 1. The HTTP response did not have a 2xx response. In that case we
- # give no quota back.
- # 2. The HTTP request was successful and was never retried. In
- # that case we give _NO_RETRY_INCREMENT back.
- # 3. The API call had retries, and we eventually receive an HTTP
- # response with a 2xx status code. In that case we give back
- # whatever quota was associated with the last acquisition.
- if http_response is None:
- return
- status_code = http_response.status_code
- if 200 <= status_code < 300:
- if 'retry_quota_capacity' not in context:
- self._quota.release(self._NO_RETRY_INCREMENT)
- else:
- capacity_amount = context['retry_quota_capacity']
- self._quota.release(capacity_amount)
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/build/wheel.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/build/wheel.py
deleted file mode 100644
index 064811ad11bb07b2b7bc8e30ec6c03f21997d6b2..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/build/wheel.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import logging
-import os
-from typing import Optional
-
-from pip._vendor.pyproject_hooks import BuildBackendHookCaller
-
-from pip._internal.utils.subprocess import runner_with_spinner_message
-
-logger = logging.getLogger(__name__)
-
-
-def build_wheel_pep517(
- name: str,
- backend: BuildBackendHookCaller,
- metadata_directory: str,
- tempd: str,
-) -> Optional[str]:
- """Build one InstallRequirement using the PEP 517 build process.
-
- Returns path to wheel if successfully built. Otherwise, returns None.
- """
- assert metadata_directory is not None
- try:
- logger.debug("Destination directory: %s", tempd)
-
- runner = runner_with_spinner_message(
- f"Building wheel for {name} (pyproject.toml)"
- )
- with backend.subprocess_runner(runner):
- wheel_name = backend.build_wheel(
- tempd,
- metadata_directory=metadata_directory,
- )
- except Exception:
- logger.error("Failed building wheel for %s", name)
- return None
- return os.path.join(tempd, wheel_name)
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build.py
deleted file mode 100644
index 6d453419d073677e33ab60a5e627ba412be1fd6a..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build.py
+++ /dev/null
@@ -1,153 +0,0 @@
-"""distutils.command.build
-
-Implements the Distutils 'build' command."""
-
-import sys
-import os
-from distutils.core import Command
-from distutils.errors import DistutilsOptionError
-from distutils.util import get_platform
-
-
-def show_compilers():
- from distutils.ccompiler import show_compilers
-
- show_compilers()
-
-
-class build(Command):
-
- description = "build everything needed to install"
-
- user_options = [
- ('build-base=', 'b', "base directory for build library"),
- ('build-purelib=', None, "build directory for platform-neutral distributions"),
- ('build-platlib=', None, "build directory for platform-specific distributions"),
- (
- 'build-lib=',
- None,
- "build directory for all distribution (defaults to either "
- + "build-purelib or build-platlib",
- ),
- ('build-scripts=', None, "build directory for scripts"),
- ('build-temp=', 't', "temporary build directory"),
- (
- 'plat-name=',
- 'p',
- "platform name to build for, if supported "
- "(default: %s)" % get_platform(),
- ),
- ('compiler=', 'c', "specify the compiler type"),
- ('parallel=', 'j', "number of parallel build jobs"),
- ('debug', 'g', "compile extensions and libraries with debugging information"),
- ('force', 'f', "forcibly build everything (ignore file timestamps)"),
- ('executable=', 'e', "specify final destination interpreter path (build.py)"),
- ]
-
- boolean_options = ['debug', 'force']
-
- help_options = [
- ('help-compiler', None, "list available compilers", show_compilers),
- ]
-
- def initialize_options(self):
- self.build_base = 'build'
- # these are decided only after 'build_base' has its final value
- # (unless overridden by the user or client)
- self.build_purelib = None
- self.build_platlib = None
- self.build_lib = None
- self.build_temp = None
- self.build_scripts = None
- self.compiler = None
- self.plat_name = None
- self.debug = None
- self.force = 0
- self.executable = None
- self.parallel = None
-
- def finalize_options(self): # noqa: C901
- if self.plat_name is None:
- self.plat_name = get_platform()
- else:
- # plat-name only supported for windows (other platforms are
- # supported via ./configure flags, if at all). Avoid misleading
- # other platforms.
- if os.name != 'nt':
- raise DistutilsOptionError(
- "--plat-name only supported on Windows (try "
- "using './configure --help' on your platform)"
- )
-
- plat_specifier = ".{}-{}".format(self.plat_name, sys.implementation.cache_tag)
-
- # Make it so Python 2.x and Python 2.x with --with-pydebug don't
- # share the same build directories. Doing so confuses the build
- # process for C modules
- if hasattr(sys, 'gettotalrefcount'):
- plat_specifier += '-pydebug'
-
- # 'build_purelib' and 'build_platlib' just default to 'lib' and
- # 'lib.' under the base build directory. We only use one of
- # them for a given distribution, though --
- if self.build_purelib is None:
- self.build_purelib = os.path.join(self.build_base, 'lib')
- if self.build_platlib is None:
- self.build_platlib = os.path.join(self.build_base, 'lib' + plat_specifier)
-
- # 'build_lib' is the actual directory that we will use for this
- # particular module distribution -- if user didn't supply it, pick
- # one of 'build_purelib' or 'build_platlib'.
- if self.build_lib is None:
- if self.distribution.has_ext_modules():
- self.build_lib = self.build_platlib
- else:
- self.build_lib = self.build_purelib
-
- # 'build_temp' -- temporary directory for compiler turds,
- # "build/temp."
- if self.build_temp is None:
- self.build_temp = os.path.join(self.build_base, 'temp' + plat_specifier)
- if self.build_scripts is None:
- self.build_scripts = os.path.join(
- self.build_base, 'scripts-%d.%d' % sys.version_info[:2]
- )
-
- if self.executable is None and sys.executable:
- self.executable = os.path.normpath(sys.executable)
-
- if isinstance(self.parallel, str):
- try:
- self.parallel = int(self.parallel)
- except ValueError:
- raise DistutilsOptionError("parallel should be an integer")
-
- def run(self):
- # Run all relevant sub-commands. This will be some subset of:
- # - build_py - pure Python modules
- # - build_clib - standalone C libraries
- # - build_ext - Python extensions
- # - build_scripts - (Python) scripts
- for cmd_name in self.get_sub_commands():
- self.run_command(cmd_name)
-
- # -- Predicates for the sub-command list ---------------------------
-
- def has_pure_modules(self):
- return self.distribution.has_pure_modules()
-
- def has_c_libraries(self):
- return self.distribution.has_c_libraries()
-
- def has_ext_modules(self):
- return self.distribution.has_ext_modules()
-
- def has_scripts(self):
- return self.distribution.has_scripts()
-
- sub_commands = [
- ('build_py', has_pure_modules),
- ('build_clib', has_c_libraries),
- ('build_ext', has_ext_modules),
- ('build_scripts', has_scripts),
- ]
diff --git a/spaces/CALM/Dashboard/streamlit_observable/frontend/src/Observable.tsx b/spaces/CALM/Dashboard/streamlit_observable/frontend/src/Observable.tsx
deleted file mode 100644
index f90267fc0c1597f858c36aea88c01269230c8dab..0000000000000000000000000000000000000000
--- a/spaces/CALM/Dashboard/streamlit_observable/frontend/src/Observable.tsx
+++ /dev/null
@@ -1,161 +0,0 @@
-import React, { ReactNode } from "react"
-import {
- withStreamlitConnection,
- StreamlitComponentBase,
- Streamlit,
-} from "./streamlit"
-import { Runtime, Inspector } from "@observablehq/runtime";
-
-class Observable extends StreamlitComponentBase<{}> {
- public observeValue = {};
- private notebookRef = React.createRef();
- private runtime: any = null;
- private main: any = null;
-
- componentWillUnmount() {
- this.runtime?.dispose();
- }
- // @ts-ignore
- public componentDidUpdate(prevProps: any) {
- const { args: prevArgs } = prevProps;
- if (prevArgs.notebook !== this.props.args.notebook) {
- // TODO handle new notebook
- }
- console.log('this.props.args.redefine: ', this.props.args.redefine);
- if (this.main !== null) {
- this.redefineCells(this.main, this.props.args.redefine);
- }
- }
-
- async embedNotebook(notebook: string, targets: string[], observe: string[], hide:string[]) {
- if (this.runtime) {
- this.runtime.dispose();
- }
-
- console.log('Console says hi!');
-
- const targetSet = new Set(targets);
- const observeSet = new Set(observe);
- const hideSet = new Set(hide);
- this.runtime = new Runtime();
- const { default: define } = await eval(`import("https://api.observablehq.com/${notebook}.js?v=3")`);
-
- this.main = this.runtime.module(define, (name: string) => {
- console.log('name: ', name);
- console.log('observeSet.has(name: ', observeSet.has(name));
- console.log('targetSet.has(name): ', targetSet.has(name));
- if (observeSet.has(name) && !targetSet.has(name)) {
- const observeValue = this.observeValue;
-
- console.log('observeValue: ', observeValue);
-
- return {
- fulfilled: (value: any) => {
- //@ts-ignore
- observeValue[name] = value;
- //@ts-ignore
- Streamlit.setComponentValue(observeValue);
- }
- }
- }
- if (targetSet.size > 0 && !targetSet.has(name)) return;
- if(hideSet.has(name)) return true;
- const el = document.createElement('div');
- this.notebookRef.current?.appendChild(el);
-
- const i = new Inspector(el);
- el.addEventListener('input', e => {
- Streamlit.setFrameHeight();
- })
- return {
- pending() {
- i.pending();
- Streamlit.setFrameHeight();
- },
- fulfilled(value: any) {
- i.fulfilled(value);
- Streamlit.setFrameHeight();
- },
- rejected(error: any) {
- i.rejected(error);
- Streamlit.setFrameHeight();
- },
- };
- });
- if (observeSet.size > 0) {
- Promise.all(Array.from(observeSet).map(async name => [name, await this.main.value(name)])).then(initial => {
- for (const [name, value] of initial) {
- // @ts-ignore
- this.observeValue[name] = value
- };
- Streamlit.setComponentValue(this.observeValue);
- })
- }
- }
-
- redefineCells(main: any, redefine = {}) {
-
- console.log('Console says hi 2 !');
-
- for (let cell in redefine) {
- //@ts-ignore
- main.redefine(cell, redefine[cell]);
- }
- }
- componentDidMount() {
- const { notebook, targets = [], observe = [], redefine = {} , hide=[]} = this.props.args;
- Streamlit.setComponentValue(this.observeValue);
- this.embedNotebook(notebook, targets, observe, hide).then(() => {
- this.redefineCells(this.main, redefine);
- });
-
- }
-
- public render = (): ReactNode => {
-
- console.log('this.props.args.render_empty: ', this.props.args.render_empty);
- if (this.props.args.render_empty) {
- return (
-
-
-
-
-
-
{this.props.args.name}
-
-
-
-
- )
- }
- return (
-
-
-
-
-
-
{this.props.args.name}
-
-
-
-
- )
- }
-}
-
-export default withStreamlitConnection(Observable)
diff --git a/spaces/ChrisCaviar/ControlNet-v1-1/app_mlsd.py b/spaces/ChrisCaviar/ControlNet-v1-1/app_mlsd.py
deleted file mode 100644
index 9440f2f480b3713aa081258909221eab792157b5..0000000000000000000000000000000000000000
--- a/spaces/ChrisCaviar/ControlNet-v1-1/app_mlsd.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env python
-
-import gradio as gr
-
-from utils import randomize_seed_fn
-
-
-def create_demo(process, max_images=12, default_num_images=3):
- with gr.Blocks() as demo:
- with gr.Row():
- with gr.Column():
- image = gr.Image()
- prompt = gr.Textbox(label='Prompt')
- run_button = gr.Button('Run')
- with gr.Accordion('Advanced options', open=False):
- num_samples = gr.Slider(label='Number of images',
- minimum=1,
- maximum=max_images,
- value=default_num_images,
- step=1)
- image_resolution = gr.Slider(label='Image resolution',
- minimum=256,
- maximum=512,
- value=512,
- step=256)
- preprocess_resolution = gr.Slider(
- label='Preprocess resolution',
- minimum=128,
- maximum=512,
- value=512,
- step=1)
- mlsd_value_threshold = gr.Slider(
- label='Hough value threshold (MLSD)',
- minimum=0.01,
- maximum=2.0,
- value=0.1,
- step=0.01)
- mlsd_distance_threshold = gr.Slider(
- label='Hough distance threshold (MLSD)',
- minimum=0.01,
- maximum=20.0,
- value=0.1,
- step=0.01)
- num_steps = gr.Slider(label='Number of steps',
- minimum=1,
- maximum=100,
- value=20,
- step=1)
- guidance_scale = gr.Slider(label='Guidance scale',
- minimum=0.1,
- maximum=30.0,
- value=9.0,
- step=0.1)
- seed = gr.Slider(label='Seed',
- minimum=0,
- maximum=1000000,
- step=1,
- value=0,
- randomize=True)
- randomize_seed = gr.Checkbox(label='Randomize seed',
- value=True)
- a_prompt = gr.Textbox(
- label='Additional prompt',
- value='best quality, extremely detailed')
- n_prompt = gr.Textbox(
- label='Negative prompt',
- value=
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
- )
- with gr.Column():
- result = gr.Gallery(label='Output', show_label=False).style(
- columns=2, object_fit='scale-down')
- inputs = [
- image,
- prompt,
- a_prompt,
- n_prompt,
- num_samples,
- image_resolution,
- preprocess_resolution,
- num_steps,
- guidance_scale,
- seed,
- mlsd_value_threshold,
- mlsd_distance_threshold,
- ]
- prompt.submit(
- fn=randomize_seed_fn,
- inputs=[seed, randomize_seed],
- outputs=seed,
- ).then(
- fn=process,
- inputs=inputs,
- outputs=result,
- )
- run_button.click(
- fn=randomize_seed_fn,
- inputs=[seed, randomize_seed],
- outputs=seed,
- ).then(
- fn=process,
- inputs=inputs,
- outputs=result,
- api_name='mlsd',
- )
- return demo
-
-
-if __name__ == '__main__':
- from model import Model
- model = Model(task_name='MLSD')
- demo = create_demo(model.process_mlsd)
- demo.queue().launch()
diff --git a/spaces/Cloudfeng/anime-remove-background/README.md b/spaces/Cloudfeng/anime-remove-background/README.md
deleted file mode 100644
index 1ba3cb5ea0e994e246d57b7d62b8aa5a6331901c..0000000000000000000000000000000000000000
--- a/spaces/Cloudfeng/anime-remove-background/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Anime Remove Background
-emoji: 🪄🖼️
-colorFrom: indigo
-colorTo: pink
-sdk: gradio
-sdk_version: 3.1.4
-app_file: app.py
-pinned: false
-license: apache-2.0
-duplicated_from: skytnt/anime-remove-background
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/CofAI/chat.b4/client/css/global.css b/spaces/CofAI/chat.b4/client/css/global.css
deleted file mode 100644
index 8de755e9df1b2c4ee74d18f00ce717b22c69db4b..0000000000000000000000000000000000000000
--- a/spaces/CofAI/chat.b4/client/css/global.css
+++ /dev/null
@@ -1,70 +0,0 @@
-@import url("https://fonts.googleapis.com/css2?family=Inter:wght@100;200;300;400;500;600;700;800;900&display=swap");
-* {
- --font-1: "Inter", sans-serif;
- --section-gap: 24px;
- --border-radius-1: 8px;
- margin: 0;
- padding: 0;
- box-sizing: border-box;
- position: relative;
- font-family: var(--font-1);
-}
-
-.theme-light {
- --colour-1: #f5f5f5;
- --colour-2: #000000;
- --colour-3: #474747;
- --colour-4: #949494;
- --colour-5: #ebebeb;
- --colour-6: #dadada;
-
- --accent: #3a3a3a;
- --blur-bg: #ffffff;
- --blur-border: #dbdbdb;
- --user-input: #282828;
- --conversations: #666666;
-}
-
-.theme-dark {
- --colour-1: #181818;
- --colour-2: #ccc;
- --colour-3: #dadada;
- --colour-4: #f0f0f0;
- --colour-5: #181818;
- --colour-6: #242424;
-
- --accent: #151718;
- --blur-bg: #242627;
- --blur-border: #242627;
- --user-input: #f5f5f5;
- --conversations: #555555;
-}
-
-html,
-body {
- background: var(--colour-1);
- color: var(--colour-3);
-}
-
-ol,
-ul {
- padding-left: 20px;
-}
-
-.shown {
- display: flex !important;
-}
-
-a:-webkit-any-link {
- color: var(--accent);
-}
-
-pre {
- white-space: pre-wrap;
-}
-
-@media screen and (max-height: 720px) {
- :root {
- --section-gap: 16px;
- }
-}
diff --git a/spaces/CoreyMorris/MMLU-by-task-Leaderboard/README.md b/spaces/CoreyMorris/MMLU-by-task-Leaderboard/README.md
deleted file mode 100644
index 1c0d67c576902ec7ac65ea5164a07b63ac475b3f..0000000000000000000000000000000000000000
--- a/spaces/CoreyMorris/MMLU-by-task-Leaderboard/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-title: MMLU By Task Leaderboard
-emoji: 🏆
-sdk: streamlit
-sdk_version: 1.25.0
-app_file: app.py
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Cpp4App/Cpp4App/CDM/result_processing/eval_classes.py b/spaces/Cpp4App/Cpp4App/CDM/result_processing/eval_classes.py
deleted file mode 100644
index 64f78e231017574ad0002c002e04178c288fd28f..0000000000000000000000000000000000000000
--- a/spaces/Cpp4App/Cpp4App/CDM/result_processing/eval_classes.py
+++ /dev/null
@@ -1,215 +0,0 @@
-import json
-import numpy as np
-import cv2
-from glob import glob
-from os.path import join as pjoin
-from tqdm import tqdm
-
-class_map = {'0':'Button', '1':'CheckBox', '2':'Chronometer', '3':'EditText', '4':'ImageButton', '5':'ImageView',
- '6':'ProgressBar', '7':'RadioButton', '8':'RatingBar', '9':'SeekBar', '10':'Spinner', '11':'Switch',
- '12':'ToggleButton', '13':'VideoView', '14':'TextView'}
-
-
-def resize_label(bboxes, d_height, gt_height, bias=0):
- bboxes_new = []
- scale = gt_height / d_height
- for bbox in bboxes:
- bbox = [int(b * scale + bias) for b in bbox]
- bboxes_new.append(bbox)
- return bboxes_new
-
-
-def draw_bounding_box(org, corners, color=(0, 255, 0), line=2, show=False):
- board = org.copy()
- for i in range(len(corners)):
- board = cv2.rectangle(board, (corners[i][0], corners[i][1]), (corners[i][2], corners[i][3]), color, line)
- if show:
- cv2.imshow('a', cv2.resize(board, (500, 1000)))
- cv2.waitKey(0)
- return board
-
-
-def load_detect_result_json(reslut_file_root, shrink=4):
- def is_bottom_or_top(corner):
- column_min, row_min, column_max, row_max = corner
- if row_max < 36 or row_min > 725:
- return True
- return False
-
- result_files = glob(pjoin(reslut_file_root, '*.json'))
- compos_reform = {}
- print('Loading %d detection results' % len(result_files))
- for reslut_file in tqdm(result_files):
- img_name = reslut_file.split('\\')[-1].split('.')[0]
- compos = json.load(open(reslut_file, 'r'))['compos']
- for compo in compos:
- if compo['column_max'] - compo['column_min'] < 10 or compo['row_max'] - compo['row_min'] < 10:
- continue
- if is_bottom_or_top((compo['column_min'], compo['row_min'], compo['column_max'], compo['row_max'])):
- continue
- if img_name not in compos_reform:
- compos_reform[img_name] = {'bboxes': [[compo['column_min'] + shrink, compo['row_min'] + shrink, compo['column_max'] - shrink, compo['row_max'] - shrink]],
- 'categories': [compo['category']]}
- else:
- compos_reform[img_name]['bboxes'].append([compo['column_min'] + shrink, compo['row_min'] + shrink, compo['column_max'] - shrink, compo['row_max'] - shrink])
- compos_reform[img_name]['categories'].append(compo['category'])
- return compos_reform
-
-
-def load_ground_truth_json(gt_file):
- def get_img_by_id(img_id):
- for image in images:
- if image['id'] == img_id:
- return image['file_name'].split('/')[-1][:-4], (image['height'], image['width'])
-
- def cvt_bbox(bbox):
- '''
- :param bbox: [x,y,width,height]
- :return: [col_min, row_min, col_max, row_max]
- '''
- bbox = [int(b) for b in bbox]
- return [bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]
-
- data = json.load(open(gt_file, 'r'))
- images = data['images']
- annots = data['annotations']
- compos = {}
- print('Loading %d ground truth' % len(annots))
- for annot in tqdm(annots):
- img_name, size = get_img_by_id(annot['image_id'])
- if img_name not in compos:
- compos[img_name] = {'bboxes': [cvt_bbox(annot['bbox'])], 'categories': [class_map[str(annot['category_id'])]], 'size': size}
- else:
- compos[img_name]['bboxes'].append(cvt_bbox(annot['bbox']))
- compos[img_name]['categories'].append(class_map[str(annot['category_id'])])
- return compos
-
-
-def eval(detection, ground_truth, img_root, show=True, no_text=False, only_text=False):
- def compo_filter(compos, flag):
- if not no_text and not only_text:
- return compos
- compos_new = {'bboxes': [], 'categories': []}
- for k, category in enumerate(compos['categories']):
- if only_text:
- if flag == 'det' and category != 'TextView':
- continue
- if flag == 'gt' and category != 'TextView':
- continue
- elif no_text:
- if flag == 'det' and category == 'TextView':
- continue
- if flag == 'gt' and category == 'TextView':
- continue
-
- compos_new['bboxes'].append(compos['bboxes'][k])
- compos_new['categories'].append(category)
- return compos_new
-
- def match(org, d_bbox, d_category, gt_compos, matched):
- '''
- :param matched: mark if the ground truth component is matched
- :param d_bbox: [col_min, row_min, col_max, row_max]
- :param gt_bboxes: list of ground truth [[col_min, row_min, col_max, row_max]]
- :return: Boolean: if IOU large enough or detected box is contained by ground truth
- '''
- area_d = (d_bbox[2] - d_bbox[0]) * (d_bbox[3] - d_bbox[1])
- gt_bboxes = gt_compos['bboxes']
- gt_categories = gt_compos['categories']
- for i, gt_bbox in enumerate(gt_bboxes):
- if matched[i] == 0:
- continue
- area_gt = (gt_bbox[2] - gt_bbox[0]) * (gt_bbox[3] - gt_bbox[1])
- col_min = max(d_bbox[0], gt_bbox[0])
- row_min = max(d_bbox[1], gt_bbox[1])
- col_max = min(d_bbox[2], gt_bbox[2])
- row_max = min(d_bbox[3], gt_bbox[3])
- # if not intersected, area intersection should be 0
- w = max(0, col_max - col_min)
- h = max(0, row_max - row_min)
- area_inter = w * h
- if area_inter == 0:
- continue
- iod = area_inter / area_d
- iou = area_inter / (area_d + area_gt - area_inter)
- # if show:
- # cv2.putText(org, (str(round(iou, 2)) + ',' + str(round(iod, 2))), (d_bbox[0], d_bbox[1]),
- # cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
-
- if iou > 0.9 or iod == 1:
- if d_category == gt_categories[i]:
- matched[i] = 0
- return True
- return False
-
- amount = len(detection)
- TP, FP, FN = 0, 0, 0
- pres, recalls, f1s = [], [], []
- for i, image_id in enumerate(detection):
- TP_this, FP_this, FN_this = 0, 0, 0
- img = cv2.imread(pjoin(img_root, image_id + '.jpg'))
- d_compos = detection[image_id]
- if image_id not in ground_truth:
- continue
- gt_compos = ground_truth[image_id]
-
- org_height = gt_compos['size'][0]
-
- d_compos = compo_filter(d_compos, 'det')
- gt_compos = compo_filter(gt_compos, 'gt')
-
- d_compos['bboxes'] = resize_label(d_compos['bboxes'], 800, org_height)
- matched = np.ones(len(gt_compos['bboxes']), dtype=int)
- for j, d_bbox in enumerate(d_compos['bboxes']):
- if match(img, d_bbox, d_compos['categories'][j], gt_compos, matched):
- TP += 1
- TP_this += 1
- else:
- FP += 1
- FP_this += 1
- FN += sum(matched)
- FN_this = sum(matched)
-
- try:
- pre_this = TP_this / (TP_this + FP_this)
- recall_this = TP_this / (TP_this + FN_this)
- f1_this = 2 * (pre_this * recall_this) / (pre_this + recall_this)
- except:
- print('empty')
- continue
-
- pres.append(pre_this)
- recalls.append(recall_this)
- f1s.append(f1_this)
- if show:
- print(image_id + '.jpg')
- print('[%d/%d] TP:%d, FP:%d, FN:%d, Precesion:%.3f, Recall:%.3f' % (
- i, amount, TP_this, FP_this, FN_this, pre_this, recall_this))
- # cv2.imshow('org', cv2.resize(img, (500, 1000)))
- broad = draw_bounding_box(img, d_compos['bboxes'], color=(255, 0, 0), line=3)
- draw_bounding_box(broad, gt_compos['bboxes'], color=(0, 0, 255), show=True, line=2)
-
- if i % 200 == 0:
- precision = TP / (TP + FP)
- recall = TP / (TP + FN)
- f1 = 2 * (precision * recall) / (precision + recall)
- print(
- '[%d/%d] TP:%d, FP:%d, FN:%d, Precesion:%.3f, Recall:%.3f, F1:%.3f' % (i, amount, TP, FP, FN, precision, recall, f1))
-
- precision = TP / (TP + FP)
- recall = TP / (TP + FN)
- print('[%d/%d] TP:%d, FP:%d, FN:%d, Precesion:%.3f, Recall:%.3f, F1:%.3f' % (i, amount, TP, FP, FN, precision, recall, f1))
- # print("Average precision:%.4f; Average recall:%.3f" % (sum(pres)/len(pres), sum(recalls)/len(recalls)))
-
- return pres, recalls, f1s
-
-
-no_text = True
-only_text = False
-
-# detect = load_detect_result_json('E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_cls\\ip')
-# detect = load_detect_result_json('E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_cls\\merge')
-detect = load_detect_result_json('E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_v3\\merge')
-# detect = load_detect_result_json('E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_v3\\ocr')
-gt = load_ground_truth_json('E:\\Mulong\\Datasets\\rico\\instances_test.json')
-eval(detect, gt, 'E:\\Mulong\\Datasets\\rico\\combined', show=False, no_text=no_text, only_text=only_text)
diff --git a/spaces/DaFujaTyping/hf-Chat-ui/src/lib/buildPrompt.ts b/spaces/DaFujaTyping/hf-Chat-ui/src/lib/buildPrompt.ts
deleted file mode 100644
index 605a415d0bb6ed6512be038a34405c4f0c456e45..0000000000000000000000000000000000000000
--- a/spaces/DaFujaTyping/hf-Chat-ui/src/lib/buildPrompt.ts
+++ /dev/null
@@ -1,30 +0,0 @@
-import type { BackendModel } from "./server/models";
-import type { Message } from "./types/Message";
-
-/**
- * Convert [{user: "assistant", content: "hi"}, {user: "user", content: "hello"}] to:
- *
- * <|assistant|>hi<|endoftext|><|prompter|>hello<|endoftext|><|assistant|>
- */
-export function buildPrompt(
- messages: Pick[],
- model: BackendModel
-): string {
- const prompt =
- messages
- .map(
- (m) =>
- (m.from === "user"
- ? model.userMessageToken + m.content
- : model.assistantMessageToken + m.content) +
- (model.messageEndToken
- ? m.content.endsWith(model.messageEndToken)
- ? ""
- : model.messageEndToken
- : "")
- )
- .join("") + model.assistantMessageToken;
-
- // Not super precise, but it's truncated in the model's backend anyway
- return model.preprompt + prompt.split(" ").slice(-model.parameters.truncate).join(" ");
-}
diff --git a/spaces/Dagfinn1962/prodia2/main.css b/spaces/Dagfinn1962/prodia2/main.css
deleted file mode 100644
index 7351bd1ce70107d59a538d07a038593e1023c72c..0000000000000000000000000000000000000000
--- a/spaces/Dagfinn1962/prodia2/main.css
+++ /dev/null
@@ -1,67 +0,0 @@
-body {
- background: #FFFFFF;
-
- width: 100%;
- color: #FFFFFF;
- padding: 20px;
- border-radius: 10px;
- border: 1px solid #1b0202;
- }
-
-gr.blocks {
- background-color: #758bec;
- width: 100%;
- color: #FFFFFF;
-}
-h3 {
- background-color:#758bec;
- color: #FFFFF;
- text-align: center;
- font-family: verdana;
- font-size: 24px;
- border: 1px solid #FFFFFF;
- border-radius: 10px;
-}
-
-p {
- font-family: verdana;
- font-size: 14px;
-}
-
-label {
- font-family: verdana;
- color: #FFB76B;
- font-weight: 700;
- font-size: 14px;
- border: 1px solid #000000;
-}
-
-gr.Textbox {
- font-family: verdana;
- background-color: #000000;
- color: #FFFFFF;
- font-weight: 700;
- font-size: 14px;
- border: 1px solid #FFFFFF;
- border-radius: 6px;
-}
-
-gr.Botton {
- font-family: verdana;
- background-color: #758bec;
- color: #FFFFFF;
- font-weight: 700;
- font-size: 14px;
- border: 1px solid #000000;
- border-radius: 6px;
-}
-
-a a:active a.hover
- {
- font-family: verdana;
- color: #572430;
- text-decoration: none;
- font-weight: 700;
- font-size: 14px;
-
-}
\ No newline at end of file
diff --git a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/loss/led_loss.py b/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/loss/led_loss.py
deleted file mode 100644
index 6d64700fe4796d50cb48122936bde23f66c86773..0000000000000000000000000000000000000000
--- a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/loss/led_loss.py
+++ /dev/null
@@ -1,47 +0,0 @@
-"""
-@Date: 2021/08/12
-@description:
-"""
-import torch
-import torch.nn as nn
-
-
-class LEDLoss(nn.Module):
- def __init__(self):
- super().__init__()
- self.loss = nn.L1Loss()
-
- def forward(self, gt, dt):
- camera_height = 1.6
- gt_depth = gt['depth'] * camera_height
-
- dt_ceil_depth = dt['ceil_depth'] * camera_height * gt['ratio']
- dt_floor_depth = dt['depth'] * camera_height
-
- ceil_loss = self.loss(gt_depth, dt_ceil_depth)
- floor_loss = self.loss(gt_depth, dt_floor_depth)
-
- loss = floor_loss + ceil_loss
-
- return loss
-
-
-if __name__ == '__main__':
- import numpy as np
- from dataset.mp3d_dataset import MP3DDataset
-
- mp3d_dataset = MP3DDataset(root_dir='../src/dataset/mp3d', mode='train')
- gt = mp3d_dataset.__getitem__(0)
-
- gt['depth'] = torch.from_numpy(gt['depth'][np.newaxis]) # batch size is 1
- gt['ratio'] = torch.from_numpy(gt['ratio'][np.newaxis]) # batch size is 1
-
- dummy_dt = {
- 'depth': gt['depth'].clone(),
- 'ceil_depth': gt['depth'] / gt['ratio']
- }
- # dummy_dt['depth'][..., :20] *= 3 # some different
-
- led_loss = LEDLoss()
- loss = led_loss(gt, dummy_dt)
- print(loss)
diff --git a/spaces/EPFL-VILAB/MultiMAE/multimae/multimae_utils.py b/spaces/EPFL-VILAB/MultiMAE/multimae/multimae_utils.py
deleted file mode 100644
index a7e3035d29e42fc4e08bbda95ae02b97cd512fe0..0000000000000000000000000000000000000000
--- a/spaces/EPFL-VILAB/MultiMAE/multimae/multimae_utils.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# Copyright (c) EPFL VILAB.
-# All rights reserved.
-
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-# --------------------------------------------------------
-# Based on timm, DeiT, DINO, MoCo-v3, BEiT, MAE-priv and MAE code bases
-# https://github.com/rwightman/pytorch-image-models/tree/master/timm
-# https://github.com/facebookresearch/deit
-# https://github.com/facebookresearch/dino
-# https://github.com/facebookresearch/moco-v3
-# https://github.com/microsoft/unilm/tree/master/beit
-# https://github.com/BUPT-PRIV/MAE-priv
-# https://github.com/facebookresearch/mae
-# --------------------------------------------------------
-
-import math
-import warnings
-
-import torch
-import torch.nn as nn
-from einops import rearrange
-
-
-def pair(t):
- return t if isinstance(t, tuple) else (t, t)
-
-
-def build_2d_sincos_posemb(h, w, embed_dim=1024, temperature=10000.):
- """Sine-cosine positional embeddings from MoCo-v3
-
- Source: https://github.com/facebookresearch/moco-v3/blob/main/vits.py
- """
- grid_w = torch.arange(w, dtype=torch.float32)
- grid_h = torch.arange(h, dtype=torch.float32)
- grid_w, grid_h = torch.meshgrid(grid_w, grid_h)
- assert embed_dim % 4 == 0, 'Embed dimension must be divisible by 4 for 2D sin-cos position embedding'
- pos_dim = embed_dim // 4
- omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim
- omega = 1. / (temperature ** omega)
- out_w = torch.einsum('m,d->md', [grid_w.flatten(), omega])
- out_h = torch.einsum('m,d->md', [grid_h.flatten(), omega])
- pos_emb = torch.cat([torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], dim=1)[None, :, :]
- pos_emb = rearrange(pos_emb, 'b (h w) d -> b d h w', h=h, w=w, d=embed_dim)
- return pos_emb
-
-
-def _no_grad_trunc_normal_(tensor, mean, std, a, b):
- # Cut & paste from PyTorch official master until it's in a few official releases - RW
- # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
- def norm_cdf(x):
- # Computes standard normal cumulative distribution function
- return (1. + math.erf(x / math.sqrt(2.))) / 2.
-
- if (mean < a - 2 * std) or (mean > b + 2 * std):
- warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
- "The distribution of values may be incorrect.",
- stacklevel=2)
-
- with torch.no_grad():
- # Values are generated by using a truncated uniform distribution and
- # then using the inverse CDF for the normal distribution.
- # Get upper and lower cdf values
- l = norm_cdf((a - mean) / std)
- u = norm_cdf((b - mean) / std)
-
- # Uniformly fill tensor with values from [l, u], then translate to
- # [2l-1, 2u-1].
- tensor.uniform_(2 * l - 1, 2 * u - 1)
-
- # Use inverse cdf transform for normal distribution to get truncated
- # standard normal
- tensor.erfinv_()
-
- # Transform to proper mean, std
- tensor.mul_(std * math.sqrt(2.))
- tensor.add_(mean)
-
- # Clamp to ensure it's in the proper range
- tensor.clamp_(min=a, max=b)
- return tensor
-
-
-def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
- # type: (Tensor, float, float, float, float) -> Tensor
- r"""Fills the input Tensor with values drawn from a truncated
- normal distribution. The values are effectively drawn from the
- normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
- with values outside :math:`[a, b]` redrawn until they are within
- the bounds. The method used for generating the random values works
- best when :math:`a \leq \text{mean} \leq b`.
- Args:
- tensor: an n-dimensional `torch.Tensor`
- mean: the mean of the normal distribution
- std: the standard deviation of the normal distribution
- a: the minimum cutoff value
- b: the maximum cutoff value
- Examples:
- >>> w = torch.empty(3, 5)
- >>> nn.init.trunc_normal_(w)
- """
- return _no_grad_trunc_normal_(tensor, mean, std, a, b)
-
-
-def drop_path(x, drop_prob: float = 0., training: bool = False):
- """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
- This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
- the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
- See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
- changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
- 'survival rate' as the argument.
- """
- if drop_prob == 0. or not training:
- return x
- keep_prob = 1 - drop_prob
- shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
- random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
- random_tensor.floor_() # binarize
- output = x.div(keep_prob) * random_tensor
- return output
-
-
-class DropPath(nn.Module):
- """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
- """
-
- def __init__(self, drop_prob=None):
- super(DropPath, self).__init__()
- self.drop_prob = drop_prob
-
- def forward(self, x):
- return drop_path(x, self.drop_prob, self.training)
-
- def extra_repr(self) -> str:
- return 'p={}'.format(self.drop_prob)
-
-
-class Mlp(nn.Module):
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Linear(in_features, hidden_features)
- self.act = act_layer()
- self.fc2 = nn.Linear(hidden_features, out_features)
- self.drop = nn.Dropout(drop)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act(x)
- # x = self.drop(x)
- # commit this for the orignal BERT implement
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-
-class Attention(nn.Module):
- def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.,):
- super().__init__()
- self.num_heads = num_heads
- head_dim = dim // num_heads
- self.scale = head_dim ** -0.5
-
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
- self.proj_drop = nn.Dropout(proj_drop)
-
- def forward(self, x):
- B, N, C = x.shape
- qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
- q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
-
- attn = (q @ k.transpose(-2, -1)) * self.scale
- attn = attn.softmax(dim=-1)
- attn = self.attn_drop(attn)
-
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
- x = self.proj(x)
- x = self.proj_drop(x)
- return x
-
-
-class CrossAttention(nn.Module):
- def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
- super().__init__()
- self.num_heads = num_heads
- head_dim = dim // num_heads
- self.scale = head_dim ** -0.5
-
- self.q = nn.Linear(dim, dim, bias=qkv_bias)
- self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
-
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
- self.proj_drop = nn.Dropout(proj_drop)
-
- def forward(self, x, context):
- B, N, C = x.shape
- _, M, _ = context.shape
-
- q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
- kv = self.kv(context).reshape(B, M, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
- k, v = kv[0], kv[1]
-
- attn = (q @ k.transpose(-2, -1)) * self.scale
- attn = attn.softmax(dim=-1)
- attn = self.attn_drop(attn)
-
- x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
- x = self.proj(x)
- x = self.proj_drop(x)
- return x
-
-
-class Block(nn.Module):
-
- def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
- drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
- super().__init__()
- self.norm1 = norm_layer(dim)
- self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
-
- def forward(self, x):
- x = x + self.drop_path(self.attn(self.norm1(x)))
- x = x + self.drop_path(self.mlp(self.norm2(x)))
- return x
-
-
-class DecoderBlock(nn.Module):
- def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
- drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
- super().__init__()
- self.norm1 = norm_layer(dim)
- self.self_attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
- self.cross_attn = CrossAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
- self.query_norm = norm_layer(dim)
- self.context_norm = norm_layer(dim)
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
-
- def forward(self, x, context):
- x = x + self.drop_path(self.self_attn(self.norm1(x)))
- x = x + self.drop_path(self.cross_attn(self.query_norm(x), self.context_norm(context)))
- x = x + self.drop_path(self.mlp(self.norm2(x)))
- return x
diff --git a/spaces/EXPOSUREEE/Ai-Image-Enhancer/scripts/generate_meta_info.py b/spaces/EXPOSUREEE/Ai-Image-Enhancer/scripts/generate_meta_info.py
deleted file mode 100644
index 9c3b7a37e85f534075c50e6c33d7cca999d8b836..0000000000000000000000000000000000000000
--- a/spaces/EXPOSUREEE/Ai-Image-Enhancer/scripts/generate_meta_info.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import argparse
-import cv2
-import glob
-import os
-
-
-def main(args):
- txt_file = open(args.meta_info, 'w')
- for folder, root in zip(args.input, args.root):
- img_paths = sorted(glob.glob(os.path.join(folder, '*')))
- for img_path in img_paths:
- status = True
- if args.check:
- # read the image once for check, as some images may have errors
- try:
- img = cv2.imread(img_path)
- except (IOError, OSError) as error:
- print(f'Read {img_path} error: {error}')
- status = False
- if img is None:
- status = False
- print(f'Img is None: {img_path}')
- if status:
- # get the relative path
- img_name = os.path.relpath(img_path, root)
- print(img_name)
- txt_file.write(f'{img_name}\n')
-
-
-if __name__ == '__main__':
- """Generate meta info (txt file) for only Ground-Truth images.
-
- It can also generate meta info from several folders into one txt file.
- """
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--input',
- nargs='+',
- default=['datasets/DF2K/DF2K_HR', 'datasets/DF2K/DF2K_multiscale'],
- help='Input folder, can be a list')
- parser.add_argument(
- '--root',
- nargs='+',
- default=['datasets/DF2K', 'datasets/DF2K'],
- help='Folder root, should have the length as input folders')
- parser.add_argument(
- '--meta_info',
- type=str,
- default='datasets/DF2K/meta_info/meta_info_DF2Kmultiscale.txt',
- help='txt path for meta info')
- parser.add_argument('--check', action='store_true', help='Read image to check whether it is ok')
- args = parser.parse_args()
-
- assert len(args.input) == len(args.root), ('Input folder and folder root should have the same length, but got '
- f'{len(args.input)} and {len(args.root)}.')
- os.makedirs(os.path.dirname(args.meta_info), exist_ok=True)
-
- main(args)
diff --git a/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers_537238KB.py b/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers_537238KB.py
deleted file mode 100644
index 9b127bc6427f5c60c8cf85603a3d8a093c3501c4..0000000000000000000000000000000000000000
--- a/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers_537238KB.py
+++ /dev/null
@@ -1,126 +0,0 @@
-import torch
-import torch.nn.functional as F
-from torch import nn
-
-from . import spec_utils
-
-
-class Conv2DBNActiv(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
- super(Conv2DBNActiv, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(
- nin,
- nout,
- kernel_size=ksize,
- stride=stride,
- padding=pad,
- dilation=dilation,
- bias=False,
- ),
- nn.BatchNorm2d(nout),
- activ(),
- )
-
- def __call__(self, x):
- return self.conv(x)
-
-
-class SeperableConv2DBNActiv(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
- super(SeperableConv2DBNActiv, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(
- nin,
- nin,
- kernel_size=ksize,
- stride=stride,
- padding=pad,
- dilation=dilation,
- groups=nin,
- bias=False,
- ),
- nn.Conv2d(nin, nout, kernel_size=1, bias=False),
- nn.BatchNorm2d(nout),
- activ(),
- )
-
- def __call__(self, x):
- return self.conv(x)
-
-
-class Encoder(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
- super(Encoder, self).__init__()
- self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
- self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
-
- def __call__(self, x):
- skip = self.conv1(x)
- h = self.conv2(skip)
-
- return h, skip
-
-
-class Decoder(nn.Module):
- def __init__(
- self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
- ):
- super(Decoder, self).__init__()
- self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
- self.dropout = nn.Dropout2d(0.1) if dropout else None
-
- def __call__(self, x, skip=None):
- x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
- if skip is not None:
- skip = spec_utils.crop_center(skip, x)
- x = torch.cat([x, skip], dim=1)
- h = self.conv(x)
-
- if self.dropout is not None:
- h = self.dropout(h)
-
- return h
-
-
-class ASPPModule(nn.Module):
- def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
- super(ASPPModule, self).__init__()
- self.conv1 = nn.Sequential(
- nn.AdaptiveAvgPool2d((1, None)),
- Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
- )
- self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
- self.conv3 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
- )
- self.conv4 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
- )
- self.conv5 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
- )
- self.conv6 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
- )
- self.conv7 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
- )
- self.bottleneck = nn.Sequential(
- Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
- )
-
- def forward(self, x):
- _, _, h, w = x.size()
- feat1 = F.interpolate(
- self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
- )
- feat2 = self.conv2(x)
- feat3 = self.conv3(x)
- feat4 = self.conv4(x)
- feat5 = self.conv5(x)
- feat6 = self.conv6(x)
- feat7 = self.conv7(x)
- out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
- bottle = self.bottleneck(out)
- return bottle
diff --git a/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/spec_utils.py b/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/spec_utils.py
deleted file mode 100644
index a9634fd51ff47bf90211839231774719154c37cf..0000000000000000000000000000000000000000
--- a/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/spec_utils.py
+++ /dev/null
@@ -1,672 +0,0 @@
-import hashlib
-import json
-import math
-import os
-
-import librosa
-import numpy as np
-import soundfile as sf
-from tqdm import tqdm
-
-
-def crop_center(h1, h2):
- h1_shape = h1.size()
- h2_shape = h2.size()
-
- if h1_shape[3] == h2_shape[3]:
- return h1
- elif h1_shape[3] < h2_shape[3]:
- raise ValueError("h1_shape[3] must be greater than h2_shape[3]")
-
- # s_freq = (h2_shape[2] - h1_shape[2]) // 2
- # e_freq = s_freq + h1_shape[2]
- s_time = (h1_shape[3] - h2_shape[3]) // 2
- e_time = s_time + h2_shape[3]
- h1 = h1[:, :, :, s_time:e_time]
-
- return h1
-
-
-def wave_to_spectrogram(
- wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False
-):
- if reverse:
- wave_left = np.flip(np.asfortranarray(wave[0]))
- wave_right = np.flip(np.asfortranarray(wave[1]))
- elif mid_side:
- wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2)
- wave_right = np.asfortranarray(np.subtract(wave[0], wave[1]))
- elif mid_side_b2:
- wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5))
- wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5))
- else:
- wave_left = np.asfortranarray(wave[0])
- wave_right = np.asfortranarray(wave[1])
-
- spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length)
- spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
-
- spec = np.asfortranarray([spec_left, spec_right])
-
- return spec
-
-
-def wave_to_spectrogram_mt(
- wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False
-):
- import threading
-
- if reverse:
- wave_left = np.flip(np.asfortranarray(wave[0]))
- wave_right = np.flip(np.asfortranarray(wave[1]))
- elif mid_side:
- wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2)
- wave_right = np.asfortranarray(np.subtract(wave[0], wave[1]))
- elif mid_side_b2:
- wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5))
- wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5))
- else:
- wave_left = np.asfortranarray(wave[0])
- wave_right = np.asfortranarray(wave[1])
-
- def run_thread(**kwargs):
- global spec_left
- spec_left = librosa.stft(**kwargs)
-
- thread = threading.Thread(
- target=run_thread,
- kwargs={"y": wave_left, "n_fft": n_fft, "hop_length": hop_length},
- )
- thread.start()
- spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
- thread.join()
-
- spec = np.asfortranarray([spec_left, spec_right])
-
- return spec
-
-
-def combine_spectrograms(specs, mp):
- l = min([specs[i].shape[2] for i in specs])
- spec_c = np.zeros(shape=(2, mp.param["bins"] + 1, l), dtype=np.complex64)
- offset = 0
- bands_n = len(mp.param["band"])
-
- for d in range(1, bands_n + 1):
- h = mp.param["band"][d]["crop_stop"] - mp.param["band"][d]["crop_start"]
- spec_c[:, offset : offset + h, :l] = specs[d][
- :, mp.param["band"][d]["crop_start"] : mp.param["band"][d]["crop_stop"], :l
- ]
- offset += h
-
- if offset > mp.param["bins"]:
- raise ValueError("Too much bins")
-
- # lowpass fiter
- if (
- mp.param["pre_filter_start"] > 0
- ): # and mp.param['band'][bands_n]['res_type'] in ['scipy', 'polyphase']:
- if bands_n == 1:
- spec_c = fft_lp_filter(
- spec_c, mp.param["pre_filter_start"], mp.param["pre_filter_stop"]
- )
- else:
- gp = 1
- for b in range(
- mp.param["pre_filter_start"] + 1, mp.param["pre_filter_stop"]
- ):
- g = math.pow(
- 10, -(b - mp.param["pre_filter_start"]) * (3.5 - gp) / 20.0
- )
- gp = g
- spec_c[:, b, :] *= g
-
- return np.asfortranarray(spec_c)
-
-
-def spectrogram_to_image(spec, mode="magnitude"):
- if mode == "magnitude":
- if np.iscomplexobj(spec):
- y = np.abs(spec)
- else:
- y = spec
- y = np.log10(y**2 + 1e-8)
- elif mode == "phase":
- if np.iscomplexobj(spec):
- y = np.angle(spec)
- else:
- y = spec
-
- y -= y.min()
- y *= 255 / y.max()
- img = np.uint8(y)
-
- if y.ndim == 3:
- img = img.transpose(1, 2, 0)
- img = np.concatenate([np.max(img, axis=2, keepdims=True), img], axis=2)
-
- return img
-
-
-def reduce_vocal_aggressively(X, y, softmask):
- v = X - y
- y_mag_tmp = np.abs(y)
- v_mag_tmp = np.abs(v)
-
- v_mask = v_mag_tmp > y_mag_tmp
- y_mag = np.clip(y_mag_tmp - v_mag_tmp * v_mask * softmask, 0, np.inf)
-
- return y_mag * np.exp(1.0j * np.angle(y))
-
-
-def mask_silence(mag, ref, thres=0.2, min_range=64, fade_size=32):
- if min_range < fade_size * 2:
- raise ValueError("min_range must be >= fade_area * 2")
-
- mag = mag.copy()
-
- idx = np.where(ref.mean(axis=(0, 1)) < thres)[0]
- starts = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0])
- ends = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1])
- uninformative = np.where(ends - starts > min_range)[0]
- if len(uninformative) > 0:
- starts = starts[uninformative]
- ends = ends[uninformative]
- old_e = None
- for s, e in zip(starts, ends):
- if old_e is not None and s - old_e < fade_size:
- s = old_e - fade_size * 2
-
- if s != 0:
- weight = np.linspace(0, 1, fade_size)
- mag[:, :, s : s + fade_size] += weight * ref[:, :, s : s + fade_size]
- else:
- s -= fade_size
-
- if e != mag.shape[2]:
- weight = np.linspace(1, 0, fade_size)
- mag[:, :, e - fade_size : e] += weight * ref[:, :, e - fade_size : e]
- else:
- e += fade_size
-
- mag[:, :, s + fade_size : e - fade_size] += ref[
- :, :, s + fade_size : e - fade_size
- ]
- old_e = e
-
- return mag
-
-
-def align_wave_head_and_tail(a, b):
- l = min([a[0].size, b[0].size])
-
- return a[:l, :l], b[:l, :l]
-
-
-def cache_or_load(mix_path, inst_path, mp):
- mix_basename = os.path.splitext(os.path.basename(mix_path))[0]
- inst_basename = os.path.splitext(os.path.basename(inst_path))[0]
-
- cache_dir = "mph{}".format(
- hashlib.sha1(json.dumps(mp.param, sort_keys=True).encode("utf-8")).hexdigest()
- )
- mix_cache_dir = os.path.join("cache", cache_dir)
- inst_cache_dir = os.path.join("cache", cache_dir)
-
- os.makedirs(mix_cache_dir, exist_ok=True)
- os.makedirs(inst_cache_dir, exist_ok=True)
-
- mix_cache_path = os.path.join(mix_cache_dir, mix_basename + ".npy")
- inst_cache_path = os.path.join(inst_cache_dir, inst_basename + ".npy")
-
- if os.path.exists(mix_cache_path) and os.path.exists(inst_cache_path):
- X_spec_m = np.load(mix_cache_path)
- y_spec_m = np.load(inst_cache_path)
- else:
- X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
-
- for d in range(len(mp.param["band"]), 0, -1):
- bp = mp.param["band"][d]
-
- if d == len(mp.param["band"]): # high-end band
- X_wave[d], _ = librosa.load(
- mix_path, bp["sr"], False, dtype=np.float32, res_type=bp["res_type"]
- )
- y_wave[d], _ = librosa.load(
- inst_path,
- bp["sr"],
- False,
- dtype=np.float32,
- res_type=bp["res_type"],
- )
- else: # lower bands
- X_wave[d] = librosa.resample(
- X_wave[d + 1],
- mp.param["band"][d + 1]["sr"],
- bp["sr"],
- res_type=bp["res_type"],
- )
- y_wave[d] = librosa.resample(
- y_wave[d + 1],
- mp.param["band"][d + 1]["sr"],
- bp["sr"],
- res_type=bp["res_type"],
- )
-
- X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d])
-
- X_spec_s[d] = wave_to_spectrogram(
- X_wave[d],
- bp["hl"],
- bp["n_fft"],
- mp.param["mid_side"],
- mp.param["mid_side_b2"],
- mp.param["reverse"],
- )
- y_spec_s[d] = wave_to_spectrogram(
- y_wave[d],
- bp["hl"],
- bp["n_fft"],
- mp.param["mid_side"],
- mp.param["mid_side_b2"],
- mp.param["reverse"],
- )
-
- del X_wave, y_wave
-
- X_spec_m = combine_spectrograms(X_spec_s, mp)
- y_spec_m = combine_spectrograms(y_spec_s, mp)
-
- if X_spec_m.shape != y_spec_m.shape:
- raise ValueError("The combined spectrograms are different: " + mix_path)
-
- _, ext = os.path.splitext(mix_path)
-
- np.save(mix_cache_path, X_spec_m)
- np.save(inst_cache_path, y_spec_m)
-
- return X_spec_m, y_spec_m
-
-
-def spectrogram_to_wave(spec, hop_length, mid_side, mid_side_b2, reverse):
- spec_left = np.asfortranarray(spec[0])
- spec_right = np.asfortranarray(spec[1])
-
- wave_left = librosa.istft(spec_left, hop_length=hop_length)
- wave_right = librosa.istft(spec_right, hop_length=hop_length)
-
- if reverse:
- return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)])
- elif mid_side:
- return np.asfortranarray(
- [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]
- )
- elif mid_side_b2:
- return np.asfortranarray(
- [
- np.add(wave_right / 1.25, 0.4 * wave_left),
- np.subtract(wave_left / 1.25, 0.4 * wave_right),
- ]
- )
- else:
- return np.asfortranarray([wave_left, wave_right])
-
-
-def spectrogram_to_wave_mt(spec, hop_length, mid_side, reverse, mid_side_b2):
- import threading
-
- spec_left = np.asfortranarray(spec[0])
- spec_right = np.asfortranarray(spec[1])
-
- def run_thread(**kwargs):
- global wave_left
- wave_left = librosa.istft(**kwargs)
-
- thread = threading.Thread(
- target=run_thread, kwargs={"stft_matrix": spec_left, "hop_length": hop_length}
- )
- thread.start()
- wave_right = librosa.istft(spec_right, hop_length=hop_length)
- thread.join()
-
- if reverse:
- return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)])
- elif mid_side:
- return np.asfortranarray(
- [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]
- )
- elif mid_side_b2:
- return np.asfortranarray(
- [
- np.add(wave_right / 1.25, 0.4 * wave_left),
- np.subtract(wave_left / 1.25, 0.4 * wave_right),
- ]
- )
- else:
- return np.asfortranarray([wave_left, wave_right])
-
-
-def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None):
- wave_band = {}
- bands_n = len(mp.param["band"])
- offset = 0
-
- for d in range(1, bands_n + 1):
- bp = mp.param["band"][d]
- spec_s = np.ndarray(
- shape=(2, bp["n_fft"] // 2 + 1, spec_m.shape[2]), dtype=complex
- )
- h = bp["crop_stop"] - bp["crop_start"]
- spec_s[:, bp["crop_start"] : bp["crop_stop"], :] = spec_m[
- :, offset : offset + h, :
- ]
-
- offset += h
- if d == bands_n: # higher
- if extra_bins_h: # if --high_end_process bypass
- max_bin = bp["n_fft"] // 2
- spec_s[:, max_bin - extra_bins_h : max_bin, :] = extra_bins[
- :, :extra_bins_h, :
- ]
- if bp["hpf_start"] > 0:
- spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1)
- if bands_n == 1:
- wave = spectrogram_to_wave(
- spec_s,
- bp["hl"],
- mp.param["mid_side"],
- mp.param["mid_side_b2"],
- mp.param["reverse"],
- )
- else:
- wave = np.add(
- wave,
- spectrogram_to_wave(
- spec_s,
- bp["hl"],
- mp.param["mid_side"],
- mp.param["mid_side_b2"],
- mp.param["reverse"],
- ),
- )
- else:
- sr = mp.param["band"][d + 1]["sr"]
- if d == 1: # lower
- spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"])
- wave = librosa.resample(
- spectrogram_to_wave(
- spec_s,
- bp["hl"],
- mp.param["mid_side"],
- mp.param["mid_side_b2"],
- mp.param["reverse"],
- ),
- bp["sr"],
- sr,
- res_type="sinc_fastest",
- )
- else: # mid
- spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1)
- spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"])
- wave2 = np.add(
- wave,
- spectrogram_to_wave(
- spec_s,
- bp["hl"],
- mp.param["mid_side"],
- mp.param["mid_side_b2"],
- mp.param["reverse"],
- ),
- )
- # wave = librosa.core.resample(wave2, bp['sr'], sr, res_type="sinc_fastest")
- wave = librosa.core.resample(wave2, bp["sr"], sr, res_type="scipy")
-
- return wave.T
-
-
-def fft_lp_filter(spec, bin_start, bin_stop):
- g = 1.0
- for b in range(bin_start, bin_stop):
- g -= 1 / (bin_stop - bin_start)
- spec[:, b, :] = g * spec[:, b, :]
-
- spec[:, bin_stop:, :] *= 0
-
- return spec
-
-
-def fft_hp_filter(spec, bin_start, bin_stop):
- g = 1.0
- for b in range(bin_start, bin_stop, -1):
- g -= 1 / (bin_start - bin_stop)
- spec[:, b, :] = g * spec[:, b, :]
-
- spec[:, 0 : bin_stop + 1, :] *= 0
-
- return spec
-
-
-def mirroring(a, spec_m, input_high_end, mp):
- if "mirroring" == a:
- mirror = np.flip(
- np.abs(
- spec_m[
- :,
- mp.param["pre_filter_start"]
- - 10
- - input_high_end.shape[1] : mp.param["pre_filter_start"]
- - 10,
- :,
- ]
- ),
- 1,
- )
- mirror = mirror * np.exp(1.0j * np.angle(input_high_end))
-
- return np.where(
- np.abs(input_high_end) <= np.abs(mirror), input_high_end, mirror
- )
-
- if "mirroring2" == a:
- mirror = np.flip(
- np.abs(
- spec_m[
- :,
- mp.param["pre_filter_start"]
- - 10
- - input_high_end.shape[1] : mp.param["pre_filter_start"]
- - 10,
- :,
- ]
- ),
- 1,
- )
- mi = np.multiply(mirror, input_high_end * 1.7)
-
- return np.where(np.abs(input_high_end) <= np.abs(mi), input_high_end, mi)
-
-
-def ensembling(a, specs):
- for i in range(1, len(specs)):
- if i == 1:
- spec = specs[0]
-
- ln = min([spec.shape[2], specs[i].shape[2]])
- spec = spec[:, :, :ln]
- specs[i] = specs[i][:, :, :ln]
-
- if "min_mag" == a:
- spec = np.where(np.abs(specs[i]) <= np.abs(spec), specs[i], spec)
- if "max_mag" == a:
- spec = np.where(np.abs(specs[i]) >= np.abs(spec), specs[i], spec)
-
- return spec
-
-
-def stft(wave, nfft, hl):
- wave_left = np.asfortranarray(wave[0])
- wave_right = np.asfortranarray(wave[1])
- spec_left = librosa.stft(wave_left, nfft, hop_length=hl)
- spec_right = librosa.stft(wave_right, nfft, hop_length=hl)
- spec = np.asfortranarray([spec_left, spec_right])
-
- return spec
-
-
-def istft(spec, hl):
- spec_left = np.asfortranarray(spec[0])
- spec_right = np.asfortranarray(spec[1])
-
- wave_left = librosa.istft(spec_left, hop_length=hl)
- wave_right = librosa.istft(spec_right, hop_length=hl)
- wave = np.asfortranarray([wave_left, wave_right])
-
-
-if __name__ == "__main__":
- import argparse
- import sys
- import time
-
- import cv2
- from model_param_init import ModelParameters
-
- p = argparse.ArgumentParser()
- p.add_argument(
- "--algorithm",
- "-a",
- type=str,
- choices=["invert", "invert_p", "min_mag", "max_mag", "deep", "align"],
- default="min_mag",
- )
- p.add_argument(
- "--model_params",
- "-m",
- type=str,
- default=os.path.join("modelparams", "1band_sr44100_hl512.json"),
- )
- p.add_argument("--output_name", "-o", type=str, default="output")
- p.add_argument("--vocals_only", "-v", action="store_true")
- p.add_argument("input", nargs="+")
- args = p.parse_args()
-
- start_time = time.time()
-
- if args.algorithm.startswith("invert") and len(args.input) != 2:
- raise ValueError("There should be two input files.")
-
- if not args.algorithm.startswith("invert") and len(args.input) < 2:
- raise ValueError("There must be at least two input files.")
-
- wave, specs = {}, {}
- mp = ModelParameters(args.model_params)
-
- for i in range(len(args.input)):
- spec = {}
-
- for d in range(len(mp.param["band"]), 0, -1):
- bp = mp.param["band"][d]
-
- if d == len(mp.param["band"]): # high-end band
- wave[d], _ = librosa.load(
- args.input[i],
- bp["sr"],
- False,
- dtype=np.float32,
- res_type=bp["res_type"],
- )
-
- if len(wave[d].shape) == 1: # mono to stereo
- wave[d] = np.array([wave[d], wave[d]])
- else: # lower bands
- wave[d] = librosa.resample(
- wave[d + 1],
- mp.param["band"][d + 1]["sr"],
- bp["sr"],
- res_type=bp["res_type"],
- )
-
- spec[d] = wave_to_spectrogram(
- wave[d],
- bp["hl"],
- bp["n_fft"],
- mp.param["mid_side"],
- mp.param["mid_side_b2"],
- mp.param["reverse"],
- )
-
- specs[i] = combine_spectrograms(spec, mp)
-
- del wave
-
- if args.algorithm == "deep":
- d_spec = np.where(np.abs(specs[0]) <= np.abs(spec[1]), specs[0], spec[1])
- v_spec = d_spec - specs[1]
- sf.write(
- os.path.join("{}.wav".format(args.output_name)),
- cmb_spectrogram_to_wave(v_spec, mp),
- mp.param["sr"],
- )
-
- if args.algorithm.startswith("invert"):
- ln = min([specs[0].shape[2], specs[1].shape[2]])
- specs[0] = specs[0][:, :, :ln]
- specs[1] = specs[1][:, :, :ln]
-
- if "invert_p" == args.algorithm:
- X_mag = np.abs(specs[0])
- y_mag = np.abs(specs[1])
- max_mag = np.where(X_mag >= y_mag, X_mag, y_mag)
- v_spec = specs[1] - max_mag * np.exp(1.0j * np.angle(specs[0]))
- else:
- specs[1] = reduce_vocal_aggressively(specs[0], specs[1], 0.2)
- v_spec = specs[0] - specs[1]
-
- if not args.vocals_only:
- X_mag = np.abs(specs[0])
- y_mag = np.abs(specs[1])
- v_mag = np.abs(v_spec)
-
- X_image = spectrogram_to_image(X_mag)
- y_image = spectrogram_to_image(y_mag)
- v_image = spectrogram_to_image(v_mag)
-
- cv2.imwrite("{}_X.png".format(args.output_name), X_image)
- cv2.imwrite("{}_y.png".format(args.output_name), y_image)
- cv2.imwrite("{}_v.png".format(args.output_name), v_image)
-
- sf.write(
- "{}_X.wav".format(args.output_name),
- cmb_spectrogram_to_wave(specs[0], mp),
- mp.param["sr"],
- )
- sf.write(
- "{}_y.wav".format(args.output_name),
- cmb_spectrogram_to_wave(specs[1], mp),
- mp.param["sr"],
- )
-
- sf.write(
- "{}_v.wav".format(args.output_name),
- cmb_spectrogram_to_wave(v_spec, mp),
- mp.param["sr"],
- )
- else:
- if not args.algorithm == "deep":
- sf.write(
- os.path.join("ensembled", "{}.wav".format(args.output_name)),
- cmb_spectrogram_to_wave(ensembling(args.algorithm, specs), mp),
- mp.param["sr"],
- )
-
- if args.algorithm == "align":
- trackalignment = [
- {
- "file1": '"{}"'.format(args.input[0]),
- "file2": '"{}"'.format(args.input[1]),
- }
- ]
-
- for i, e in tqdm(enumerate(trackalignment), desc="Performing Alignment..."):
- os.system(f"python lib/align_tracks.py {e['file1']} {e['file2']}")
-
- # print('Total time: {0:.{1}f}s'.format(time.time() - start_time, 1))
diff --git a/spaces/EuroPython2022/mmocr-demo/configs/textrecog/nrtr/nrtr_r31_1by16_1by8_academic.py b/spaces/EuroPython2022/mmocr-demo/configs/textrecog/nrtr/nrtr_r31_1by16_1by8_academic.py
deleted file mode 100644
index b7adc0d30cda5e5556821ff941d6e00dcd3b4ba7..0000000000000000000000000000000000000000
--- a/spaces/EuroPython2022/mmocr-demo/configs/textrecog/nrtr/nrtr_r31_1by16_1by8_academic.py
+++ /dev/null
@@ -1,48 +0,0 @@
-_base_ = [
- '../../_base_/default_runtime.py',
- '../../_base_/schedules/schedule_adam_step_6e.py',
- '../../_base_/recog_pipelines/nrtr_pipeline.py',
- '../../_base_/recog_datasets/ST_MJ_train.py',
- '../../_base_/recog_datasets/academic_test.py'
-]
-
-train_list = {{_base_.train_list}}
-test_list = {{_base_.test_list}}
-
-train_pipeline = {{_base_.train_pipeline}}
-test_pipeline = {{_base_.test_pipeline}}
-
-label_convertor = dict(
- type='AttnConvertor', dict_type='DICT90', with_unknown=True)
-
-model = dict(
- type='NRTR',
- backbone=dict(
- type='ResNet31OCR',
- layers=[1, 2, 5, 3],
- channels=[32, 64, 128, 256, 512, 512],
- stage4_pool_cfg=dict(kernel_size=(2, 1), stride=(2, 1)),
- last_stage_pool=True),
- encoder=dict(type='NRTREncoder'),
- decoder=dict(type='NRTRDecoder'),
- loss=dict(type='TFLoss'),
- label_convertor=label_convertor,
- max_seq_len=40)
-
-data = dict(
- samples_per_gpu=128,
- workers_per_gpu=4,
- train=dict(
- type='UniformConcatDataset',
- datasets=train_list,
- pipeline=train_pipeline),
- val=dict(
- type='UniformConcatDataset',
- datasets=test_list,
- pipeline=test_pipeline),
- test=dict(
- type='UniformConcatDataset',
- datasets=test_list,
- pipeline=test_pipeline))
-
-evaluation = dict(interval=1, metric='acc')
diff --git a/spaces/EyanAn/vits-uma-genshin-honkai/models.py b/spaces/EyanAn/vits-uma-genshin-honkai/models.py
deleted file mode 100644
index 52e15d1b9775038fd6e82b2efe6f95f51c66802d..0000000000000000000000000000000000000000
--- a/spaces/EyanAn/vits-uma-genshin-honkai/models.py
+++ /dev/null
@@ -1,534 +0,0 @@
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import modules
-import attentions
-import monotonic_align
-
-from torch.nn import Conv1d, ConvTranspose1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from commons import init_weights, get_padding
-
-
-class StochasticDurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
- super().__init__()
- filter_channels = in_channels # it needs to be removed from future version.
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.log_flow = modules.Log()
- self.flows = nn.ModuleList()
- self.flows.append(modules.ElementwiseAffine(2))
- for i in range(n_flows):
- self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.flows.append(modules.Flip())
-
- self.post_pre = nn.Conv1d(1, filter_channels, 1)
- self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- self.post_flows = nn.ModuleList()
- self.post_flows.append(modules.ElementwiseAffine(2))
- for i in range(4):
- self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.post_flows.append(modules.Flip())
-
- self.pre = nn.Conv1d(in_channels, filter_channels, 1)
- self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
-
- def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
- x = torch.detach(x)
- x = self.pre(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.convs(x, x_mask)
- x = self.proj(x) * x_mask
-
- if not reverse:
- flows = self.flows
- assert w is not None
-
- logdet_tot_q = 0
- h_w = self.post_pre(w)
- h_w = self.post_convs(h_w, x_mask)
- h_w = self.post_proj(h_w) * x_mask
- e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
- z_q = e_q
- for flow in self.post_flows:
- z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
- logdet_tot_q += logdet_q
- z_u, z1 = torch.split(z_q, [1, 1], 1)
- u = torch.sigmoid(z_u) * x_mask
- z0 = (w - u) * x_mask
- logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
- logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
-
- logdet_tot = 0
- z0, logdet = self.log_flow(z0, x_mask)
- logdet_tot += logdet
- z = torch.cat([z0, z1], 1)
- for flow in flows:
- z, logdet = flow(z, x_mask, g=x, reverse=reverse)
- logdet_tot = logdet_tot + logdet
- nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
- return nll + logq # [b]
- else:
- flows = list(reversed(self.flows))
- flows = flows[:-2] + [flows[-1]] # remove a useless vflow
- z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
- for flow in flows:
- z = flow(z, x_mask, g=x, reverse=reverse)
- z0, z1 = torch.split(z, [1, 1], 1)
- logw = z0
- return logw
-
-
-class DurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
- super().__init__()
-
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.gin_channels = gin_channels
-
- self.drop = nn.Dropout(p_dropout)
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.norm_1 = modules.LayerNorm(filter_channels)
- self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.norm_2 = modules.LayerNorm(filter_channels)
- self.proj = nn.Conv1d(filter_channels, 1, 1)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, in_channels, 1)
-
- def forward(self, x, x_mask, g=None):
- x = torch.detach(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.conv_1(x * x_mask)
- x = torch.relu(x)
- x = self.norm_1(x)
- x = self.drop(x)
- x = self.conv_2(x * x_mask)
- x = torch.relu(x)
- x = self.norm_2(x)
- x = self.drop(x)
- x = self.proj(x * x_mask)
- return x * x_mask
-
-
-class TextEncoder(nn.Module):
- def __init__(self,
- n_vocab,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout):
- super().__init__()
- self.n_vocab = n_vocab
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
-
- self.emb = nn.Embedding(n_vocab, hidden_channels)
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
-
- self.encoder = attentions.Encoder(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
- self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths):
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
-
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return x, m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
-
-class Generator(torch.nn.Module):
- def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
- resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(weight_norm(
- ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
- k, u, padding=(k-u)//2)))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel//(2**(i+1))
- for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i*self.num_kernels+j](x)
- else:
- xs += self.resblocks[i*self.num_kernels+j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- print('Removing weight norm...')
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
- ])
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ])
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2,3,5,7,11]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-
-class SynthesizerTrn(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(self,
- n_vocab,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- n_speakers=0,
- gin_channels=0,
- use_sdp=True,
- **kwargs):
-
- super().__init__()
- self.n_vocab = n_vocab
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.n_speakers = n_speakers
- self.gin_channels = gin_channels
-
- self.use_sdp = use_sdp
-
- self.enc_p = TextEncoder(n_vocab,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
- self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
- self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
-
- if use_sdp:
- self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
- else:
- self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
-
- if n_speakers > 1:
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
-
- def forward(self, x, x_lengths, y, y_lengths, sid=None):
-
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = None
-
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
-
- with torch.no_grad():
- # negative cross-entropy
- s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
- neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
- neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
- neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
-
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
-
- w = attn.sum(2)
- if self.use_sdp:
- l_length = self.dp(x, x_mask, w, g=g)
- l_length = l_length / torch.sum(x_mask)
- else:
- logw_ = torch.log(w + 1e-6) * x_mask
- logw = self.dp(x, x_mask, g=g)
- l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
-
- # expand prior
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
-
- z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
- o = self.dec(z_slice, g=g)
- return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
- device = next(self.parameters()).device # 获取模型所在的设备
- x, m_p, logs_p, x_mask = self.enc_p(x.to(device), x_lengths.to(device))
- if self.n_speakers > 0:
- g = self.emb_g(sid.to(device)).unsqueeze(-1) # [b, h, 1]
- else:
- g = None
-
- if self.use_sdp:
- logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
- else:
- logw = self.dp(x, x_mask, g=g)
- w = torch.exp(logw) * x_mask * length_scale
- w_ceil = torch.ceil(w)
- y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = commons.generate_path(w_ceil, attn_mask)
-
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
-
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
- z = self.flow(z_p, y_mask, g=g, reverse=True)
- o = self.dec((z * y_mask)[:,:,:max_len], g=g)
- return o, attn, y_mask, (z, z_p, m_p, logs_p)
-
- def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
- assert self.n_speakers > 0, "n_speakers have to be larger than 0."
- g_src = self.emb_g(sid_src).unsqueeze(-1)
- g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
- z_p = self.flow(z, y_mask, g=g_src)
- z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
- o_hat = self.dec(z_hat * y_mask, g=g_tgt)
- return o_hat, y_mask, (z, z_p, z_hat)
-
diff --git "a/spaces/Fengbinbin/gpt-academic/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py" "b/spaces/Fengbinbin/gpt-academic/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py"
deleted file mode 100644
index 72ffe6b1a8f2a59a3c5c364e30dfb4949bd6a929..0000000000000000000000000000000000000000
--- "a/spaces/Fengbinbin/gpt-academic/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py"
+++ /dev/null
@@ -1,67 +0,0 @@
-from toolbox import update_ui
-from toolbox import CatchException, report_execption, write_results_to_file
-from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
-fast_debug = False
-
-
-def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
- import time, glob, os
- print('begin analysis on:', file_manifest)
- for index, fp in enumerate(file_manifest):
- with open(fp, 'r', encoding='utf-8', errors='replace') as f:
- file_content = f.read()
-
- prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
- i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
- i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
- chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- if not fast_debug:
- msg = '正常'
- # ** gpt request **
- gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时
-
- chatbot[-1] = (i_say_show_user, gpt_say)
- history.append(i_say_show_user); history.append(gpt_say)
- yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
- if not fast_debug: time.sleep(2)
-
- all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
- i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
- chatbot.append((i_say, "[Local Message] waiting gpt response."))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- if not fast_debug:
- msg = '正常'
- # ** gpt request **
- gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say, llm_kwargs, chatbot, history=history, sys_prompt=system_prompt) # 带超时倒计时
-
- chatbot[-1] = (i_say, gpt_say)
- history.append(i_say); history.append(gpt_say)
- yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
- res = write_results_to_file(history)
- chatbot.append(("完成了吗?", res))
- yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
-
-
-
-@CatchException
-def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- history = [] # 清空历史,以免输入溢出
- import glob, os
- if os.path.exists(txt):
- project_folder = txt
- else:
- if txt == "": txt = '空空如也的输入栏'
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \
- # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
- # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
- if len(file_manifest) == 0:
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
diff --git a/spaces/GadaiEngin-GBOX/GadaiEngineNeo-A/README.md b/spaces/GadaiEngin-GBOX/GadaiEngineNeo-A/README.md
deleted file mode 100644
index dbe9065dc5eeb35419c6051c0115c73c3951fc5d..0000000000000000000000000000000000000000
--- a/spaces/GadaiEngin-GBOX/GadaiEngineNeo-A/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: GadaiEngine 1
-emoji: 📈
-colorFrom: blue
-colorTo: red
-sdk: gradio
-sdk_version: 3.13.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/sequential_insertion_and_stacking.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/sequential_insertion_and_stacking.py
deleted file mode 100644
index 665c67fa6c6d101577095b12a4d533d7a3ff8d8f..0000000000000000000000000000000000000000
--- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/sequential_insertion_and_stacking.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import numpy as np
-import os
-import pybullet as p
-import random
-from cliport.tasks import primitives
-from cliport.tasks.grippers import Spatula
-from cliport.tasks.task import Task
-from cliport.utils import utils
-import numpy as np
-from cliport.tasks.task import Task
-from cliport.utils import utils
-import pybullet as p
-
-class SequentialInsertionAndStacking(Task):
- """Pick up and insert each ell block into the corresponding colored fixture in the sequence of red, blue, and green. After successful insertion, pick up the three blocks again from the fixtures and stack them in a corner of the tabletop in the same color sequence - red at the bottom, blue in the middle, and green on top."""
-
- def __init__(self):
- super().__init__()
- self.max_steps = 15
- self.lang_template = "insert the {color} ell block into the {color} fixture and then stack them in the corner"
- self.task_completed_desc = "done inserting and stacking."
- self.additional_reset()
-
- def reset(self, env):
- super().reset(env)
-
- # Add fixtures.
- fixture_size = (0.12, 0.12, 0)
- fixture_urdf = 'insertion/fixture.urdf'
- fixture_poses = []
- colors = ['red', 'blue', 'green']
- for color in colors:
- fixture_pose = self.get_random_pose(env, fixture_size)
- env.add_object(fixture_urdf, fixture_pose, category='fixed', color=utils.COLORS[color])
- fixture_poses.append(fixture_pose)
-
- # Add ell blocks.
- ell_size = (0.04, 0.04, 0.04)
- ell_urdf = 'insertion/ell.urdf'
- ells = []
- for color in colors:
- ell_pose = self.get_random_pose(env, ell_size)
- ell_id = env.add_object(ell_urdf, ell_pose, color=utils.COLORS[color])
- ells.append(ell_id)
-
- # Goal: each ell block is in the corresponding colored fixture.
- for i in range(3):
- self.add_goal(objs=[ells[i]], matches=np.ones((1, 1)), targ_poses=[fixture_poses[i]], replace=False,
- rotations=True, metric='pose', params=None, step_max_reward=1/3)
- self.lang_goals.append(self.lang_template.format(color=colors[i]))
-
- # Add corner.
- corner_size = (0.12, 0.12, 0)
- corner_pose = self.get_random_pose(env, corner_size)
- corner_urdf = 'corner/corner-template.urdf'
- env.add_object(corner_urdf, corner_pose, category='fixed')
-
- # Goal: ell blocks are stacked in the corner in the color sequence - red at the bottom, blue in the middle, and green on top.
- stack_poses = [(0, 0, 0.04), (0, 0, 0.08), (0, 0, 0.12)]
- targs = [(utils.apply(corner_pose, i), corner_pose[1]) for i in stack_poses]
- self.add_goal(objs=ells, matches=np.ones((3, 3)), targ_poses=targs, replace=False,
- rotations=True, metric='pose', params=None, step_max_reward=1/3,
- language_goal="stack the ell blocks in the corner in the color sequence - red at the bottom, blue in the middle, and green on top")
\ No newline at end of file
diff --git a/spaces/Gradio-Blocks/DualStyleGAN/style.css b/spaces/Gradio-Blocks/DualStyleGAN/style.css
deleted file mode 100644
index c1ebed2ffffa4abc9a268ca635447ba3dbd78fa5..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/DualStyleGAN/style.css
+++ /dev/null
@@ -1,17 +0,0 @@
-h1 {
- text-align: center;
-}
-img#overview {
- max-width: 800px;
- max-height: 600px;
- display: block;
- margin: auto;
-}
-img#style-image {
- max-width: 1000px;
- max-height: 600px;
-}
-img#visitor-badge {
- display: block;
- margin: auto;
-}
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/_base_/models/mask_rcnn_r50_caffe_c4.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/_base_/models/mask_rcnn_r50_caffe_c4.py
deleted file mode 100644
index eaae1342f4aaa7015510d51bb4f12500a8a6b81d..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/_base_/models/mask_rcnn_r50_caffe_c4.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# model settings
-norm_cfg = dict(type='BN', requires_grad=False)
-model = dict(
- type='MaskRCNN',
- pretrained='open-mmlab://detectron2/resnet50_caffe',
- backbone=dict(
- type='ResNet',
- depth=50,
- num_stages=3,
- strides=(1, 2, 2),
- dilations=(1, 1, 1),
- out_indices=(2, ),
- frozen_stages=1,
- norm_cfg=norm_cfg,
- norm_eval=True,
- style='caffe'),
- rpn_head=dict(
- type='RPNHead',
- in_channels=1024,
- feat_channels=1024,
- anchor_generator=dict(
- type='AnchorGenerator',
- scales=[2, 4, 8, 16, 32],
- ratios=[0.5, 1.0, 2.0],
- strides=[16]),
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[.0, .0, .0, .0],
- target_stds=[1.0, 1.0, 1.0, 1.0]),
- loss_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
- loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
- roi_head=dict(
- type='StandardRoIHead',
- shared_head=dict(
- type='ResLayer',
- depth=50,
- stage=3,
- stride=2,
- dilation=1,
- style='caffe',
- norm_cfg=norm_cfg,
- norm_eval=True),
- bbox_roi_extractor=dict(
- type='SingleRoIExtractor',
- roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
- out_channels=1024,
- featmap_strides=[16]),
- bbox_head=dict(
- type='BBoxHead',
- with_avg_pool=True,
- roi_feat_size=7,
- in_channels=2048,
- num_classes=80,
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[0., 0., 0., 0.],
- target_stds=[0.1, 0.1, 0.2, 0.2]),
- reg_class_agnostic=False,
- loss_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
- loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
- mask_roi_extractor=None,
- mask_head=dict(
- type='FCNMaskHead',
- num_convs=0,
- in_channels=2048,
- conv_out_channels=256,
- num_classes=80,
- loss_mask=dict(
- type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
- # model training and testing settings
- train_cfg=dict(
- rpn=dict(
- assigner=dict(
- type='MaxIoUAssigner',
- pos_iou_thr=0.7,
- neg_iou_thr=0.3,
- min_pos_iou=0.3,
- match_low_quality=True,
- ignore_iof_thr=-1),
- sampler=dict(
- type='RandomSampler',
- num=256,
- pos_fraction=0.5,
- neg_pos_ub=-1,
- add_gt_as_proposals=False),
- allowed_border=0,
- pos_weight=-1,
- debug=False),
- rpn_proposal=dict(
- nms_pre=12000,
- max_per_img=2000,
- nms=dict(type='nms', iou_threshold=0.7),
- min_bbox_size=0),
- rcnn=dict(
- assigner=dict(
- type='MaxIoUAssigner',
- pos_iou_thr=0.5,
- neg_iou_thr=0.5,
- min_pos_iou=0.5,
- match_low_quality=False,
- ignore_iof_thr=-1),
- sampler=dict(
- type='RandomSampler',
- num=512,
- pos_fraction=0.25,
- neg_pos_ub=-1,
- add_gt_as_proposals=True),
- mask_size=14,
- pos_weight=-1,
- debug=False)),
- test_cfg=dict(
- rpn=dict(
- nms_pre=6000,
- nms=dict(type='nms', iou_threshold=0.7),
- max_per_img=1000,
- min_bbox_size=0),
- rcnn=dict(
- score_thr=0.05,
- nms=dict(type='nms', iou_threshold=0.5),
- max_per_img=100,
- mask_thr_binary=0.5)))
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/rpn/rpn_x101_64x4d_fpn_2x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/rpn/rpn_x101_64x4d_fpn_2x_coco.py
deleted file mode 100644
index 8c766f05f4ee61273670ce74ed60c91c89beb50e..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/rpn/rpn_x101_64x4d_fpn_2x_coco.py
+++ /dev/null
@@ -1,13 +0,0 @@
-_base_ = './rpn_r50_fpn_2x_coco.py'
-model = dict(
- pretrained='open-mmlab://resnext101_64x4d',
- backbone=dict(
- type='ResNeXt',
- depth=101,
- groups=64,
- base_width=4,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=1,
- norm_cfg=dict(type='BN', requires_grad=True),
- style='pytorch'))
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/vfnet/vfnet_r50_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/vfnet/vfnet_r50_fpn_1x_coco.py
deleted file mode 100644
index 76566bdb0fe827f222924142c22c846a86fd1d32..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/vfnet/vfnet_r50_fpn_1x_coco.py
+++ /dev/null
@@ -1,108 +0,0 @@
-_base_ = [
- '../_base_/datasets/coco_detection.py',
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
-]
-# model settings
-model = dict(
- type='VFNet',
- pretrained='torchvision://resnet50',
- backbone=dict(
- type='ResNet',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=1,
- norm_cfg=dict(type='BN', requires_grad=True),
- norm_eval=True,
- style='pytorch'),
- neck=dict(
- type='FPN',
- in_channels=[256, 512, 1024, 2048],
- out_channels=256,
- start_level=1,
- add_extra_convs=True,
- extra_convs_on_inputs=False, # use P5
- num_outs=5,
- relu_before_extra_convs=True),
- bbox_head=dict(
- type='VFNetHead',
- num_classes=80,
- in_channels=256,
- stacked_convs=3,
- feat_channels=256,
- strides=[8, 16, 32, 64, 128],
- center_sampling=False,
- dcn_on_last_conv=False,
- use_atss=True,
- use_vfl=True,
- loss_cls=dict(
- type='VarifocalLoss',
- use_sigmoid=True,
- alpha=0.75,
- gamma=2.0,
- iou_weighted=True,
- loss_weight=1.0),
- loss_bbox=dict(type='GIoULoss', loss_weight=1.5),
- loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0)),
- # training and testing settings
- train_cfg=dict(
- assigner=dict(type='ATSSAssigner', topk=9),
- allowed_border=-1,
- pos_weight=-1,
- debug=False),
- test_cfg=dict(
- nms_pre=1000,
- min_bbox_size=0,
- score_thr=0.05,
- nms=dict(type='nms', iou_threshold=0.6),
- max_per_img=100))
-
-# data setting
-dataset_type = 'CocoDataset'
-data_root = 'data/coco/'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', with_bbox=True),
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(1333, 800),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=2,
- workers_per_gpu=2,
- train=dict(pipeline=train_pipeline),
- val=dict(pipeline=test_pipeline),
- test=dict(pipeline=test_pipeline))
-
-# optimizer
-optimizer = dict(
- lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
-optimizer_config = dict(grad_clip=None)
-# learning policy
-lr_config = dict(
- policy='step',
- warmup='linear',
- warmup_iters=500,
- warmup_ratio=0.1,
- step=[8, 11])
-runner = dict(type='EpochBasedRunner', max_epochs=12)
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/models/upernet_r50.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/models/upernet_r50.py
deleted file mode 100644
index 10974962fdd7136031fd06de1700f497d355ceaa..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/models/upernet_r50.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- type='EncoderDecoder',
- pretrained='open-mmlab://resnet50_v1c',
- backbone=dict(
- type='ResNetV1c',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- dilations=(1, 1, 1, 1),
- strides=(1, 2, 2, 2),
- norm_cfg=norm_cfg,
- norm_eval=False,
- style='pytorch',
- contract_dilation=True),
- decode_head=dict(
- type='UPerHead',
- in_channels=[256, 512, 1024, 2048],
- in_index=[0, 1, 2, 3],
- pool_scales=(1, 2, 3, 6),
- channels=512,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
- auxiliary_head=dict(
- type='FCNHead',
- in_channels=1024,
- in_index=2,
- channels=256,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='whole'))
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py
deleted file mode 100644
index 398d9759cafc1d01e78c138abd249808531a97b9..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py'
-model = dict(
- pretrained='torchvision://resnet101',
- backbone=dict(type='ResNet', depth=101))
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/stare.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/stare.py
deleted file mode 100644
index cbd14e0920e7f6a73baff1432e5a32ccfdb0dfae..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/stare.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import os.path as osp
-
-from .builder import DATASETS
-from .custom import CustomDataset
-
-
-@DATASETS.register_module()
-class STAREDataset(CustomDataset):
- """STARE dataset.
-
- In segmentation map annotation for STARE, 0 stands for background, which is
- included in 2 categories. ``reduce_zero_label`` is fixed to False. The
- ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
- '.ah.png'.
- """
-
- CLASSES = ('background', 'vessel')
-
- PALETTE = [[120, 120, 120], [6, 230, 230]]
-
- def __init__(self, **kwargs):
- super(STAREDataset, self).__init__(
- img_suffix='.png',
- seg_map_suffix='.ah.png',
- reduce_zero_label=False,
- **kwargs)
- assert osp.exists(self.img_dir)
diff --git a/spaces/GreenRaptor/MMS/app.py b/spaces/GreenRaptor/MMS/app.py
deleted file mode 100644
index f2762533939d40eead602a662d0c4189839a17cf..0000000000000000000000000000000000000000
--- a/spaces/GreenRaptor/MMS/app.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import gradio as gr
-
-import argparse
-import soundfile as sf
-import numpy as np
-import tempfile
-from pathlib import Path
-import os
-import subprocess
-import sys
-import re
-
-# from transformers import AutoProcessor, AutoModelForPreTraining
-
-# processor = AutoProcessor.from_pretrained("patrickvonplaten/mms-1b")
-
-# model = AutoModelForPreTraining.from_pretrained("patrickvonplaten/mms-1b")
-
-def process(audio, model, lang, format):
- with tempfile.TemporaryDirectory() as tmpdir:
- print(">>> preparing tmp manifest dir ...", file=sys.stderr)
- tmpdir = Path(tmpdir)
- with open(tmpdir / "dev.tsv", "w") as fw:
- fw.write("/\n")
- for audio in audio:
- nsample = sf.SoundFile(audio).frames
- fw.write(f"{audio}\t{nsample}\n")
- with open(tmpdir / "dev.uid", "w") as fw:
- fw.write(f"{audio}\n"*len(audio))
- with open(tmpdir / "dev.ltr", "w") as fw:
- fw.write("d u m m y | d u m m y\n"*len(audio))
- with open(tmpdir / "dev.wrd", "w") as fw:
- fw.write("dummy dummy\n"*len(audio))
- cmd = f"""
- PYTHONPATH=. PREFIX=INFER HYDRA_FULL_ERROR=1 python infer.py -m decoding.type=viterbi dataset.max_tokens=4000000 distributed_training.distributed_world_size=1 "common_eval.path='{model}'" task.data={tmpdir} dataset.gen_subset="{lang}:dev" common_eval.post_process={format} decoding.results_path={tmpdir}
- """
- print(">>> loading model & running inference ...", file=sys.stderr)
- subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL,)
- with open(tmpdir/"hypo.word") as fr:
- for ii, hypo in enumerate(fr):
- hypo = re.sub("\(\S+\)$", "", hypo).strip()
- print(f'===============\nInput: {audio[ii]}\nOutput: {hypo}')
-
-def transcribe(audio):
- model = "base_300m.pt"
- lang = "eng"
- format = "letter"
- process(np.ravel(audio), model, lang, format)
-
-gr.Interface(
- title = 'MetaAI (Facebook Research) MMS (Massively Multilingual Speech) ASR',
- fn=transcribe,
- inputs=[
- gr.inputs.Audio(source="microphone", type="filepath")
- ],
- outputs=[
- "textbox"
- ],
- live=True).launch()
\ No newline at end of file
diff --git a/spaces/Grezz/generate_human_motion/VQ-Trans/visualize/simplify_loc2rot.py b/spaces/Grezz/generate_human_motion/VQ-Trans/visualize/simplify_loc2rot.py
deleted file mode 100644
index 5d3d4411310876033cb50d998ad64557a9c4b0c1..0000000000000000000000000000000000000000
--- a/spaces/Grezz/generate_human_motion/VQ-Trans/visualize/simplify_loc2rot.py
+++ /dev/null
@@ -1,131 +0,0 @@
-import numpy as np
-import os
-import torch
-from visualize.joints2smpl.src import config
-import smplx
-import h5py
-from visualize.joints2smpl.src.smplify import SMPLify3D
-from tqdm import tqdm
-import utils.rotation_conversions as geometry
-import argparse
-
-
-class joints2smpl:
-
- def __init__(self, num_frames, device_id, cuda=True):
- self.device = torch.device("cuda:" + str(device_id) if cuda else "cpu")
- # self.device = torch.device("cpu")
- self.batch_size = num_frames
- self.num_joints = 22 # for HumanML3D
- self.joint_category = "AMASS"
- self.num_smplify_iters = 150
- self.fix_foot = False
- print(config.SMPL_MODEL_DIR)
- smplmodel = smplx.create(config.SMPL_MODEL_DIR,
- model_type="smpl", gender="neutral", ext="pkl",
- batch_size=self.batch_size).to(self.device)
-
- # ## --- load the mean pose as original ----
- smpl_mean_file = config.SMPL_MEAN_FILE
-
- file = h5py.File(smpl_mean_file, 'r')
- self.init_mean_pose = torch.from_numpy(file['pose'][:]).unsqueeze(0).repeat(self.batch_size, 1).float().to(self.device)
- self.init_mean_shape = torch.from_numpy(file['shape'][:]).unsqueeze(0).repeat(self.batch_size, 1).float().to(self.device)
- self.cam_trans_zero = torch.Tensor([0.0, 0.0, 0.0]).unsqueeze(0).to(self.device)
- #
-
- # # #-------------initialize SMPLify
- self.smplify = SMPLify3D(smplxmodel=smplmodel,
- batch_size=self.batch_size,
- joints_category=self.joint_category,
- num_iters=self.num_smplify_iters,
- device=self.device)
-
-
- def npy2smpl(self, npy_path):
- out_path = npy_path.replace('.npy', '_rot.npy')
- motions = np.load(npy_path, allow_pickle=True)[None][0]
- # print_batch('', motions)
- n_samples = motions['motion'].shape[0]
- all_thetas = []
- for sample_i in tqdm(range(n_samples)):
- thetas, _ = self.joint2smpl(motions['motion'][sample_i].transpose(2, 0, 1)) # [nframes, njoints, 3]
- all_thetas.append(thetas.cpu().numpy())
- motions['motion'] = np.concatenate(all_thetas, axis=0)
- print('motions', motions['motion'].shape)
-
- print(f'Saving [{out_path}]')
- np.save(out_path, motions)
- exit()
-
-
-
- def joint2smpl(self, input_joints, init_params=None):
- _smplify = self.smplify # if init_params is None else self.smplify_fast
- pred_pose = torch.zeros(self.batch_size, 72).to(self.device)
- pred_betas = torch.zeros(self.batch_size, 10).to(self.device)
- pred_cam_t = torch.zeros(self.batch_size, 3).to(self.device)
- keypoints_3d = torch.zeros(self.batch_size, self.num_joints, 3).to(self.device)
-
- # run the whole seqs
- num_seqs = input_joints.shape[0]
-
-
- # joints3d = input_joints[idx] # *1.2 #scale problem [check first]
- keypoints_3d = torch.Tensor(input_joints).to(self.device).float()
-
- # if idx == 0:
- if init_params is None:
- pred_betas = self.init_mean_shape
- pred_pose = self.init_mean_pose
- pred_cam_t = self.cam_trans_zero
- else:
- pred_betas = init_params['betas']
- pred_pose = init_params['pose']
- pred_cam_t = init_params['cam']
-
- if self.joint_category == "AMASS":
- confidence_input = torch.ones(self.num_joints)
- # make sure the foot and ankle
- if self.fix_foot == True:
- confidence_input[7] = 1.5
- confidence_input[8] = 1.5
- confidence_input[10] = 1.5
- confidence_input[11] = 1.5
- else:
- print("Such category not settle down!")
-
- new_opt_vertices, new_opt_joints, new_opt_pose, new_opt_betas, \
- new_opt_cam_t, new_opt_joint_loss = _smplify(
- pred_pose.detach(),
- pred_betas.detach(),
- pred_cam_t.detach(),
- keypoints_3d,
- conf_3d=confidence_input.to(self.device),
- # seq_ind=idx
- )
-
- thetas = new_opt_pose.reshape(self.batch_size, 24, 3)
- thetas = geometry.matrix_to_rotation_6d(geometry.axis_angle_to_matrix(thetas)) # [bs, 24, 6]
- root_loc = torch.tensor(keypoints_3d[:, 0]) # [bs, 3]
- root_loc = torch.cat([root_loc, torch.zeros_like(root_loc)], dim=-1).unsqueeze(1) # [bs, 1, 6]
- thetas = torch.cat([thetas, root_loc], dim=1).unsqueeze(0).permute(0, 2, 3, 1) # [1, 25, 6, 196]
-
- return thetas.clone().detach(), {'pose': new_opt_joints[0, :24].flatten().clone().detach(), 'betas': new_opt_betas.clone().detach(), 'cam': new_opt_cam_t.clone().detach()}
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument("--input_path", type=str, required=True, help='Blender file or dir with blender files')
- parser.add_argument("--cuda", type=bool, default=True, help='')
- parser.add_argument("--device", type=int, default=0, help='')
- params = parser.parse_args()
-
- simplify = joints2smpl(device_id=params.device, cuda=params.cuda)
-
- if os.path.isfile(params.input_path) and params.input_path.endswith('.npy'):
- simplify.npy2smpl(params.input_path)
- elif os.path.isdir(params.input_path):
- files = [os.path.join(params.input_path, f) for f in os.listdir(params.input_path) if f.endswith('.npy')]
- for f in files:
- simplify.npy2smpl(f)
\ No newline at end of file
diff --git a/spaces/GroveStreet/GTA_SOVITS/diffusion/unit2mel.py b/spaces/GroveStreet/GTA_SOVITS/diffusion/unit2mel.py
deleted file mode 100644
index 52293b13da8e1afeef6fa5586aeaf01cbcc27fb7..0000000000000000000000000000000000000000
--- a/spaces/GroveStreet/GTA_SOVITS/diffusion/unit2mel.py
+++ /dev/null
@@ -1,147 +0,0 @@
-import os
-import yaml
-import torch
-import torch.nn as nn
-import numpy as np
-from .diffusion import GaussianDiffusion
-from .wavenet import WaveNet
-from .vocoder import Vocoder
-
-class DotDict(dict):
- def __getattr__(*args):
- val = dict.get(*args)
- return DotDict(val) if type(val) is dict else val
-
- __setattr__ = dict.__setitem__
- __delattr__ = dict.__delitem__
-
-
-def load_model_vocoder(
- model_path,
- device='cpu',
- config_path = None
- ):
- if config_path is None: config_file = os.path.join(os.path.split(model_path)[0], 'config.yaml')
- else: config_file = config_path
-
- with open(config_file, "r") as config:
- args = yaml.safe_load(config)
- args = DotDict(args)
-
- # load vocoder
- vocoder = Vocoder(args.vocoder.type, args.vocoder.ckpt, device=device)
-
- # load model
- model = Unit2Mel(
- args.data.encoder_out_channels,
- args.model.n_spk,
- args.model.use_pitch_aug,
- vocoder.dimension,
- args.model.n_layers,
- args.model.n_chans,
- args.model.n_hidden)
-
- print(' [Loading] ' + model_path)
- ckpt = torch.load(model_path, map_location=torch.device(device))
- model.to(device)
- model.load_state_dict(ckpt['model'])
- model.eval()
- return model, vocoder, args
-
-
-class Unit2Mel(nn.Module):
- def __init__(
- self,
- input_channel,
- n_spk,
- use_pitch_aug=False,
- out_dims=128,
- n_layers=20,
- n_chans=384,
- n_hidden=256):
- super().__init__()
- self.unit_embed = nn.Linear(input_channel, n_hidden)
- self.f0_embed = nn.Linear(1, n_hidden)
- self.volume_embed = nn.Linear(1, n_hidden)
- if use_pitch_aug:
- self.aug_shift_embed = nn.Linear(1, n_hidden, bias=False)
- else:
- self.aug_shift_embed = None
- self.n_spk = n_spk
- if n_spk is not None and n_spk > 1:
- self.spk_embed = nn.Embedding(n_spk, n_hidden)
-
- self.n_hidden = n_hidden
- # diffusion
- self.decoder = GaussianDiffusion(WaveNet(out_dims, n_layers, n_chans, n_hidden), out_dims=out_dims)
- self.input_channel = input_channel
-
- def init_spkembed(self, units, f0, volume, spk_id = None, spk_mix_dict = None, aug_shift = None,
- gt_spec=None, infer=True, infer_speedup=10, method='dpm-solver', k_step=300, use_tqdm=True):
-
- '''
- input:
- B x n_frames x n_unit
- return:
- dict of B x n_frames x feat
- '''
- x = self.unit_embed(units) + self.f0_embed((1+ f0 / 700).log()) + self.volume_embed(volume)
- if self.n_spk is not None and self.n_spk > 1:
- if spk_mix_dict is not None:
- spk_embed_mix = torch.zeros((1,1,self.hidden_size))
- for k, v in spk_mix_dict.items():
- spk_id_torch = torch.LongTensor(np.array([[k]])).to(units.device)
- spk_embeddd = self.spk_embed(spk_id_torch)
- self.speaker_map[k] = spk_embeddd
- spk_embed_mix = spk_embed_mix + v * spk_embeddd
- x = x + spk_embed_mix
- else:
- x = x + self.spk_embed(spk_id - 1)
- self.speaker_map = self.speaker_map.unsqueeze(0)
- self.speaker_map = self.speaker_map.detach()
- return x.transpose(1, 2)
-
- def init_spkmix(self, n_spk):
- self.speaker_map = torch.zeros((n_spk,1,1,self.n_hidden))
- hubert_hidden_size = self.input_channel
- n_frames = 10
- hubert = torch.randn((1, n_frames, hubert_hidden_size))
- mel2ph = torch.arange(end=n_frames).unsqueeze(0).long()
- f0 = torch.randn((1, n_frames))
- volume = torch.randn((1, n_frames))
- spks = {}
- for i in range(n_spk):
- spks.update({i:1.0/float(self.n_spk)})
- orgouttt = self.init_spkembed(hubert, f0.unsqueeze(-1), volume.unsqueeze(-1), spk_mix_dict=spks)
-
- def forward(self, units, f0, volume, spk_id = None, spk_mix_dict = None, aug_shift = None,
- gt_spec=None, infer=True, infer_speedup=10, method='dpm-solver', k_step=300, use_tqdm=True):
-
- '''
- input:
- B x n_frames x n_unit
- return:
- dict of B x n_frames x feat
- '''
-
- x = self.unit_embed(units) + self.f0_embed((1+ f0 / 700).log()) + self.volume_embed(volume)
- if self.n_spk is not None and self.n_spk > 1:
- if spk_mix_dict is not None:
- for k, v in spk_mix_dict.items():
- spk_id_torch = torch.LongTensor(np.array([[k]])).to(units.device)
- x = x + v * self.spk_embed(spk_id_torch)
- else:
- if spk_id.shape[1] > 1:
- g = spk_id.reshape((spk_id.shape[0], spk_id.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
- g = g * self.speaker_map # [N, S, B, 1, H]
- g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
- g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
- x = x + g
- else:
- x = x + self.spk_embed(spk_id)
- if self.aug_shift_embed is not None and aug_shift is not None:
- x = x + self.aug_shift_embed(aug_shift / 5)
- x = self.decoder(x, gt_spec=gt_spec, infer=infer, infer_speedup=infer_speedup, method=method, k_step=k_step, use_tqdm=use_tqdm)
-
- return x
-
diff --git a/spaces/GroveStreet/GTA_SOVITS/vencoder/ContentVec256L12_Onnx.py b/spaces/GroveStreet/GTA_SOVITS/vencoder/ContentVec256L12_Onnx.py
deleted file mode 100644
index 9ad5085e02654fd1fcfbdad7d476bfa9b763d2c6..0000000000000000000000000000000000000000
--- a/spaces/GroveStreet/GTA_SOVITS/vencoder/ContentVec256L12_Onnx.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from vencoder.encoder import SpeechEncoder
-import onnxruntime
-import torch
-
-class ContentVec256L12_Onnx(SpeechEncoder):
- def __init__(self,vec_path = "pretrain/vec-256-layer-12.onnx",device=None):
- print("load model(s) from {}".format(vec_path))
- self.hidden_dim = 256
- if device is None:
- self.dev = torch.device("cpu")
- else:
- self.dev = torch.device(device)
- if device == 'cpu' or device == torch.device("cpu") or device is None:
- providers = ['CPUExecutionProvider']
- elif device == 'cuda' or device == torch.device("cuda"):
- providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
- self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
-
- def encoder(self, wav):
- feats = wav
- if feats.dim() == 2: # double channels
- feats = feats.mean(-1)
- assert feats.dim() == 1, feats.dim()
- feats = feats.view(1, -1)
- feats = feats.unsqueeze(0).cpu().detach().numpy()
- onnx_input = {self.model.get_inputs()[0].name: feats}
- logits = self.model.run(None, onnx_input)
- return torch.tensor(logits[0]).transpose(1, 2).to(self.dev)
diff --git a/spaces/HaoFeng2019/DocTr/app.py b/spaces/HaoFeng2019/DocTr/app.py
deleted file mode 100644
index f92a15d84882181937229f20857aa8931234285d..0000000000000000000000000000000000000000
--- a/spaces/HaoFeng2019/DocTr/app.py
+++ /dev/null
@@ -1,124 +0,0 @@
-from seg import U2NETP
-from GeoTr import GeoTr
-from IllTr import IllTr
-from inference_ill import rec_ill
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import skimage.io as io
-import numpy as np
-import cv2
-import glob
-import os
-from PIL import Image
-import argparse
-import warnings
-
-warnings.filterwarnings('ignore')
-
-import gradio as gr
-
-example_img_list = ['51_1 copy.png', '48_2 copy.png', '25.jpg']
-
-
-def reload_model(model, path=""):
- if not bool(path):
- return model
- else:
- model_dict = model.state_dict()
- pretrained_dict = torch.load(path, map_location='cpu')
- # print(len(pretrained_dict.keys()))
- pretrained_dict = {k[7:]: v for k, v in pretrained_dict.items() if k[7:] in model_dict}
- # print(len(pretrained_dict.keys()))
- model_dict.update(pretrained_dict)
- model.load_state_dict(model_dict)
-
- return model
-
-
-def reload_segmodel(model, path=""):
- if not bool(path):
- return model
- else:
- model_dict = model.state_dict()
- pretrained_dict = torch.load(path, map_location='cpu')
- # print(len(pretrained_dict.keys()))
- pretrained_dict = {k[6:]: v for k, v in pretrained_dict.items() if k[6:] in model_dict}
- # print(len(pretrained_dict.keys()))
- model_dict.update(pretrained_dict)
- model.load_state_dict(model_dict)
-
- return model
-
-
-class GeoTr_Seg(nn.Module):
- def __init__(self):
- super(GeoTr_Seg, self).__init__()
- self.msk = U2NETP(3, 1)
- self.GeoTr = GeoTr(num_attn_layers=6)
-
- def forward(self, x):
- msk, _1, _2, _3, _4, _5, _6 = self.msk(x)
- msk = (msk > 0.5).float()
- x = msk * x
-
- bm = self.GeoTr(x)
- bm = (2 * (bm / 286.8) - 1) * 0.99
-
- return bm
-
-
-# Initialize models
-GeoTr_Seg_model = GeoTr_Seg()
-# IllTr_model = IllTr()
-
-# Load models only once
-reload_segmodel(GeoTr_Seg_model.msk, './model_pretrained/seg.pth')
-reload_model(GeoTr_Seg_model.GeoTr, './model_pretrained/geotr.pth')
-# reload_model(IllTr_model, './model_pretrained/illtr.pth')
-
-# Compile models (assuming PyTorch 2.0)
-GeoTr_Seg_model = torch.compile(GeoTr_Seg_model)
-
-
-# IllTr_model = torch.compile(IllTr_model)
-
-def process_image(input_image):
- GeoTr_Seg_model.eval()
- # IllTr_model.eval()
-
- im_ori = np.array(input_image)[:, :, :3] / 255.
- h, w, _ = im_ori.shape
- im = cv2.resize(im_ori, (288, 288))
- im = im.transpose(2, 0, 1)
- im = torch.from_numpy(im).float().unsqueeze(0)
-
- with torch.no_grad():
- bm = GeoTr_Seg_model(im)
- bm = bm.cpu()
- bm0 = cv2.resize(bm[0, 0].numpy(), (w, h))
- bm1 = cv2.resize(bm[0, 1].numpy(), (w, h))
- bm0 = cv2.blur(bm0, (3, 3))
- bm1 = cv2.blur(bm1, (3, 3))
- lbl = torch.from_numpy(np.stack([bm0, bm1], axis=2)).unsqueeze(0)
-
- out = F.grid_sample(torch.from_numpy(im_ori).permute(2, 0, 1).unsqueeze(0).float(), lbl, align_corners=True)
- img_geo = ((out[0] * 255).permute(1, 2, 0).numpy()).astype(np.uint8)
-
- ill_rec = False
-
- if ill_rec:
- img_ill = rec_ill(IllTr_model, img_geo)
- return Image.fromarray(img_ill)
- else:
- return Image.fromarray(img_geo)
-
-
-# Define Gradio interface
-input_image = gr.inputs.Image()
-output_image = gr.outputs.Image(type='pil')
-
-iface = gr.Interface(fn=process_image, inputs=input_image, outputs=output_image, title="DocTr",
- examples=example_img_list)
-iface.launch()
\ No newline at end of file
diff --git a/spaces/Harsh502s/Autonomous_Text_Tagging_App/app.py b/spaces/Harsh502s/Autonomous_Text_Tagging_App/app.py
deleted file mode 100644
index be143f72160eb3368d97d07c44f982a9cb8b0bf6..0000000000000000000000000000000000000000
--- a/spaces/Harsh502s/Autonomous_Text_Tagging_App/app.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import streamlit as st
-from st_pages import Page, show_pages
-
-# should be
-show_pages(
- [
- Page(r"app.py", "Home", "🏠"),
- Page(r"Pages/Topic Model Results.py", "Topic Model Result", "📊"),
- Page(r"Pages/Models.py", "Models", "🤖"),
- Page(r"Pages/About.py", "About", "👋"),
- ]
-)
-
-st.set_page_config(
- page_title="Autonomous Text Tagging App",
- page_icon="📝",
- layout="wide",
- initial_sidebar_state="expanded",
-)
-
-
-# Display the main page of the app with instructions on how to use it
-def main():
- st.title("Autonomous Text Tagging App")
- cols = st.columns([1, 1])
- with st.container():
- with cols[0]:
- st.write(
- "A Text tagging is the process of adding metadata or labels to specific elements within a text, such as identifying and categorizing named entities, parts of speech, or sentiment."
- )
- st.write(
- "This app show the results of BERTopic Model and a demo of all the models used in this project."
- )
- st.subheader("How to use this app:")
- st.write("1. Select the model you want to use from the sidebar.")
- st.write("2. Enter the text you want to tag.")
- st.write('3. Click on the "Tag" button.')
- st.write("4. The tags will be displayed in the output section.")
- st.write("5. You can see the results of BERTopic Model in the sidebar.")
- st.write("6. You can use tabs to see the visualization of the results.")
- st.divider()
-
- with cols[1]:
- st.image("Sort.svg", width=450)
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/utils/inference/transliterate.py b/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/utils/inference/transliterate.py
deleted file mode 100644
index de1ccab4426659552a019b593c4766522efff616..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/utils/inference/transliterate.py
+++ /dev/null
@@ -1,919 +0,0 @@
-import torch
-import torch.nn as nn
-import numpy as np
-import pandas as pd
-import random
-import sys
-import os
-import json
-import enum
-import traceback
-import re
-
-#F_DIR = os.path.dirname(os.path.realpath(__file__))
-F_DIR = '/home/user/app/ttsv/checkpoints/'
-
-class XlitError(enum.Enum):
- lang_err = "Unsupported langauge ID requested ;( Please check available languages."
- string_err = "String passed is incompatable ;("
- internal_err = "Internal crash ;("
- unknown_err = "Unknown Failure"
- loading_err = "Loading failed ;( Check if metadata/paths are correctly configured."
-
-
-##=================== Network ==================================================
-
-
-class Encoder(nn.Module):
- def __init__(
- self,
- input_dim,
- embed_dim,
- hidden_dim,
- rnn_type="gru",
- layers=1,
- bidirectional=False,
- dropout=0,
- device="cpu",
- ):
- super(Encoder, self).__init__()
-
- self.input_dim = input_dim # src_vocab_sz
- self.enc_embed_dim = embed_dim
- self.enc_hidden_dim = hidden_dim
- self.enc_rnn_type = rnn_type
- self.enc_layers = layers
- self.enc_directions = 2 if bidirectional else 1
- self.device = device
-
- self.embedding = nn.Embedding(self.input_dim, self.enc_embed_dim)
-
- if self.enc_rnn_type == "gru":
- self.enc_rnn = nn.GRU(
- input_size=self.enc_embed_dim,
- hidden_size=self.enc_hidden_dim,
- num_layers=self.enc_layers,
- bidirectional=bidirectional,
- )
- elif self.enc_rnn_type == "lstm":
- self.enc_rnn = nn.LSTM(
- input_size=self.enc_embed_dim,
- hidden_size=self.enc_hidden_dim,
- num_layers=self.enc_layers,
- bidirectional=bidirectional,
- )
- else:
- raise Exception("XlitError: unknown RNN type mentioned")
-
- def forward(self, x, x_sz, hidden=None):
- """
- x_sz: (batch_size, 1) - Unpadded sequence lengths used for pack_pad
- """
- batch_sz = x.shape[0]
- # x: batch_size, max_length, enc_embed_dim
- x = self.embedding(x)
-
- ## pack the padded data
- # x: max_length, batch_size, enc_embed_dim -> for pack_pad
- x = x.permute(1, 0, 2)
- x = nn.utils.rnn.pack_padded_sequence(x, x_sz, enforce_sorted=False) # unpad
-
- # output: packed_size, batch_size, enc_embed_dim
- # hidden: n_layer**num_directions, batch_size, hidden_dim | if LSTM (h_n, c_n)
- output, hidden = self.enc_rnn(
- x
- ) # gru returns hidden state of all timesteps as well as hidden state at last timestep
-
- ## pad the sequence to the max length in the batch
- # output: max_length, batch_size, enc_emb_dim*directions)
- output, _ = nn.utils.rnn.pad_packed_sequence(output)
-
- # output: batch_size, max_length, hidden_dim
- output = output.permute(1, 0, 2)
-
- return output, hidden
-
- def get_word_embedding(self, x):
- """ """
- x_sz = torch.tensor([len(x)])
- x_ = torch.tensor(x).unsqueeze(0).to(dtype=torch.long)
- # x: 1, max_length, enc_embed_dim
- x = self.embedding(x_)
-
- ## pack the padded data
- # x: max_length, 1, enc_embed_dim -> for pack_pad
- x = x.permute(1, 0, 2)
- x = nn.utils.rnn.pack_padded_sequence(x, x_sz, enforce_sorted=False) # unpad
-
- # output: packed_size, 1, enc_embed_dim
- # hidden: n_layer**num_directions, 1, hidden_dim | if LSTM (h_n, c_n)
- output, hidden = self.enc_rnn(
- x
- ) # gru returns hidden state of all timesteps as well as hidden state at last timestep
-
- out_embed = hidden[0].squeeze()
-
- return out_embed
-
-
-class Decoder(nn.Module):
- def __init__(
- self,
- output_dim,
- embed_dim,
- hidden_dim,
- rnn_type="gru",
- layers=1,
- use_attention=True,
- enc_outstate_dim=None, # enc_directions * enc_hidden_dim
- dropout=0,
- device="cpu",
- ):
- super(Decoder, self).__init__()
-
- self.output_dim = output_dim # tgt_vocab_sz
- self.dec_hidden_dim = hidden_dim
- self.dec_embed_dim = embed_dim
- self.dec_rnn_type = rnn_type
- self.dec_layers = layers
- self.use_attention = use_attention
- self.device = device
- if self.use_attention:
- self.enc_outstate_dim = enc_outstate_dim if enc_outstate_dim else hidden_dim
- else:
- self.enc_outstate_dim = 0
-
- self.embedding = nn.Embedding(self.output_dim, self.dec_embed_dim)
-
- if self.dec_rnn_type == "gru":
- self.dec_rnn = nn.GRU(
- input_size=self.dec_embed_dim
- + self.enc_outstate_dim, # to concat attention_output
- hidden_size=self.dec_hidden_dim, # previous Hidden
- num_layers=self.dec_layers,
- batch_first=True,
- )
- elif self.dec_rnn_type == "lstm":
- self.dec_rnn = nn.LSTM(
- input_size=self.dec_embed_dim
- + self.enc_outstate_dim, # to concat attention_output
- hidden_size=self.dec_hidden_dim, # previous Hidden
- num_layers=self.dec_layers,
- batch_first=True,
- )
- else:
- raise Exception("XlitError: unknown RNN type mentioned")
-
- self.fc = nn.Sequential(
- nn.Linear(self.dec_hidden_dim, self.dec_embed_dim),
- nn.LeakyReLU(),
- # nn.Linear(self.dec_embed_dim, self.dec_embed_dim), nn.LeakyReLU(), # removing to reduce size
- nn.Linear(self.dec_embed_dim, self.output_dim),
- )
-
- ##----- Attention ----------
- if self.use_attention:
- self.W1 = nn.Linear(self.enc_outstate_dim, self.dec_hidden_dim)
- self.W2 = nn.Linear(self.dec_hidden_dim, self.dec_hidden_dim)
- self.V = nn.Linear(self.dec_hidden_dim, 1)
-
- def attention(self, x, hidden, enc_output):
- """
- x: (batch_size, 1, dec_embed_dim) -> after Embedding
- enc_output: batch_size, max_length, enc_hidden_dim *num_directions
- hidden: n_layers, batch_size, hidden_size | if LSTM (h_n, c_n)
- """
-
- ## perform addition to calculate the score
-
- # hidden_with_time_axis: batch_size, 1, hidden_dim
- ## hidden_with_time_axis = hidden.permute(1, 0, 2) ## replaced with below 2lines
- hidden_with_time_axis = (
- torch.sum(hidden, axis=0)
- if self.dec_rnn_type != "lstm"
- else torch.sum(hidden[0], axis=0)
- ) # h_n
-
- hidden_with_time_axis = hidden_with_time_axis.unsqueeze(1)
-
- # score: batch_size, max_length, hidden_dim
- score = torch.tanh(self.W1(enc_output) + self.W2(hidden_with_time_axis))
-
- # attention_weights: batch_size, max_length, 1
- # we get 1 at the last axis because we are applying score to self.V
- attention_weights = torch.softmax(self.V(score), dim=1)
-
- # context_vector shape after sum == (batch_size, hidden_dim)
- context_vector = attention_weights * enc_output
- context_vector = torch.sum(context_vector, dim=1)
- # context_vector: batch_size, 1, hidden_dim
- context_vector = context_vector.unsqueeze(1)
-
- # attend_out (batch_size, 1, dec_embed_dim + hidden_size)
- attend_out = torch.cat((context_vector, x), -1)
-
- return attend_out, attention_weights
-
- def forward(self, x, hidden, enc_output):
- """
- x: (batch_size, 1)
- enc_output: batch_size, max_length, dec_embed_dim
- hidden: n_layer, batch_size, hidden_size | lstm: (h_n, c_n)
- """
- if (hidden is None) and (self.use_attention is False):
- raise Exception(
- "XlitError: No use of a decoder with No attention and No Hidden"
- )
-
- batch_sz = x.shape[0]
-
- if hidden is None:
- # hidden: n_layers, batch_size, hidden_dim
- hid_for_att = torch.zeros(
- (self.dec_layers, batch_sz, self.dec_hidden_dim)
- ).to(self.device)
- elif self.dec_rnn_type == "lstm":
- hid_for_att = hidden[1] # c_n
-
- # x (batch_size, 1, dec_embed_dim) -> after embedding
- x = self.embedding(x)
-
- if self.use_attention:
- # x (batch_size, 1, dec_embed_dim + hidden_size) -> after attention
- # aw: (batch_size, max_length, 1)
- x, aw = self.attention(x, hidden, enc_output)
- else:
- x, aw = x, 0
-
- # passing the concatenated vector to the GRU
- # output: (batch_size, n_layers, hidden_size)
- # hidden: n_layers, batch_size, hidden_size | if LSTM (h_n, c_n)
- output, hidden = (
- self.dec_rnn(x, hidden) if hidden is not None else self.dec_rnn(x)
- )
-
- # output :shp: (batch_size * 1, hidden_size)
- output = output.view(-1, output.size(2))
-
- # output :shp: (batch_size * 1, output_dim)
- output = self.fc(output)
-
- return output, hidden, aw
-
-
-class Seq2Seq(nn.Module):
- """
- Class dependency: Encoder, Decoder
- """
-
- def __init__(
- self, encoder, decoder, pass_enc2dec_hid=False, dropout=0, device="cpu"
- ):
- super(Seq2Seq, self).__init__()
-
- self.encoder = encoder
- self.decoder = decoder
- self.device = device
- self.pass_enc2dec_hid = pass_enc2dec_hid
- _force_en2dec_hid_conv = False
-
- if self.pass_enc2dec_hid:
- assert (
- decoder.dec_hidden_dim == encoder.enc_hidden_dim
- ), "Hidden Dimension of encoder and decoder must be same, or unset `pass_enc2dec_hid`"
- if decoder.use_attention:
- assert (
- decoder.enc_outstate_dim
- == encoder.enc_directions * encoder.enc_hidden_dim
- ), "Set `enc_out_dim` correctly in decoder"
- assert (
- self.pass_enc2dec_hid or decoder.use_attention
- ), "No use of a decoder with No attention and No Hidden from Encoder"
-
- self.use_conv_4_enc2dec_hid = False
- if (
- self.pass_enc2dec_hid
- and (encoder.enc_directions * encoder.enc_layers != decoder.dec_layers)
- ) or _force_en2dec_hid_conv:
- if encoder.enc_rnn_type == "lstm" or encoder.enc_rnn_type == "lstm":
- raise Exception(
- "XlitError: conv for enc2dec_hid not implemented; Change the layer numbers appropriately"
- )
-
- self.use_conv_4_enc2dec_hid = True
- self.enc_hid_1ax = encoder.enc_directions * encoder.enc_layers
- self.dec_hid_1ax = decoder.dec_layers
- self.e2d_hidden_conv = nn.Conv1d(self.enc_hid_1ax, self.dec_hid_1ax, 1)
-
- def enc2dec_hidden(self, enc_hidden):
- """
- enc_hidden: n_layer, batch_size, hidden_dim*num_directions
- TODO: Implement the logic for LSTm bsed model
- """
- # hidden: batch_size, enc_layer*num_directions, enc_hidden_dim
- hidden = enc_hidden.permute(1, 0, 2).contiguous()
- # hidden: batch_size, dec_layers, dec_hidden_dim -> [N,C,Tstep]
- hidden = self.e2d_hidden_conv(hidden)
-
- # hidden: dec_layers, batch_size , dec_hidden_dim
- hidden_for_dec = hidden.permute(1, 0, 2).contiguous()
-
- return hidden_for_dec
-
- def active_beam_inference(self, src, beam_width=3, max_tgt_sz=50):
- """Search based decoding
- src: (sequence_len)
- """
-
- def _avg_score(p_tup):
- """Used for Sorting
- TODO: Dividing by length of sequence power alpha as hyperparam
- """
- return p_tup[0]
-
- import sys
-
- batch_size = 1
- start_tok = src[0]
- end_tok = src[-1]
- src_sz = torch.tensor([len(src)])
- src_ = src.unsqueeze(0)
-
- # enc_output: (batch_size, padded_seq_length, enc_hidden_dim*num_direction)
- # enc_hidden: (enc_layers*num_direction, batch_size, hidden_dim)
- enc_output, enc_hidden = self.encoder(src_, src_sz)
-
- if self.pass_enc2dec_hid:
- # dec_hidden: dec_layers, batch_size , dec_hidden_dim
- if self.use_conv_4_enc2dec_hid:
- init_dec_hidden = self.enc2dec_hidden(enc_hidden)
- else:
- init_dec_hidden = enc_hidden
- else:
- # dec_hidden -> Will be initialized to zeros internally
- init_dec_hidden = None
-
- # top_pred[][0] = Σ-log_softmax
- # top_pred[][1] = sequence torch.tensor shape: (1)
- # top_pred[][2] = dec_hidden
- top_pred_list = [(0, start_tok.unsqueeze(0), init_dec_hidden)]
-
- for t in range(max_tgt_sz):
- cur_pred_list = []
-
- for p_tup in top_pred_list:
- if p_tup[1][-1] == end_tok:
- cur_pred_list.append(p_tup)
- continue
-
- # dec_hidden: dec_layers, 1, hidden_dim
- # dec_output: 1, output_dim
- dec_output, dec_hidden, _ = self.decoder(
- x=p_tup[1][-1].view(1, 1), # dec_input: (1,1)
- hidden=p_tup[2],
- enc_output=enc_output,
- )
-
- ## π{prob} = Σ{log(prob)} -> to prevent diminishing
- # dec_output: (1, output_dim)
- dec_output = nn.functional.log_softmax(dec_output, dim=1)
- # pred_topk.values & pred_topk.indices: (1, beam_width)
- pred_topk = torch.topk(dec_output, k=beam_width, dim=1)
-
- for i in range(beam_width):
- sig_logsmx_ = p_tup[0] + pred_topk.values[0][i]
- # seq_tensor_ : (seq_len)
- seq_tensor_ = torch.cat((p_tup[1], pred_topk.indices[0][i].view(1)))
-
- cur_pred_list.append((sig_logsmx_, seq_tensor_, dec_hidden))
-
- cur_pred_list.sort(key=_avg_score, reverse=True) # Maximized order
- top_pred_list = cur_pred_list[:beam_width]
-
- # check if end_tok of all topk
- end_flags_ = [1 if t[1][-1] == end_tok else 0 for t in top_pred_list]
- if beam_width == sum(end_flags_):
- break
-
- pred_tnsr_list = [t[1] for t in top_pred_list]
-
- return pred_tnsr_list
-
-
-##===================== Glyph handlers =======================================
-
-
-class GlyphStrawboss:
- def __init__(self, glyphs="en"):
- """list of letters in a language in unicode
- lang: ISO Language code
- glyphs: json file with script information
- """
- if glyphs == "en":
- # Smallcase alone
- self.glyphs = [chr(alpha) for alpha in range(97, 122 + 1)]
- else:
- self.dossier = json.load(open(glyphs, encoding="utf-8"))
- self.glyphs = self.dossier["glyphs"]
- self.numsym_map = self.dossier["numsym_map"]
-
- self.char2idx = {}
- self.idx2char = {}
- self._create_index()
-
- def _create_index(self):
-
- self.char2idx["_"] = 0 # pad
- self.char2idx["$"] = 1 # start
- self.char2idx["#"] = 2 # end
- self.char2idx["*"] = 3 # Mask
- self.char2idx["'"] = 4 # apostrophe U+0027
- self.char2idx["%"] = 5 # unused
- self.char2idx["!"] = 6 # unused
-
- # letter to index mapping
- for idx, char in enumerate(self.glyphs):
- self.char2idx[char] = idx + 7 # +7 token initially
-
- # index to letter mapping
- for char, idx in self.char2idx.items():
- self.idx2char[idx] = char
-
- def size(self):
- return len(self.char2idx)
-
- def word2xlitvec(self, word):
- """Converts given string of gyphs(word) to vector(numpy)
- Also adds tokens for start and end
- """
- try:
- vec = [self.char2idx["$"]] # start token
- for i in list(word):
- vec.append(self.char2idx[i])
- vec.append(self.char2idx["#"]) # end token
-
- vec = np.asarray(vec, dtype=np.int64)
- return vec
-
- except Exception as error:
- print("XlitError: In word:", word, "Error Char not in Token:", error)
- sys.exit()
-
- def xlitvec2word(self, vector):
- """Converts vector(numpy) to string of glyphs(word)"""
- char_list = []
- for i in vector:
- char_list.append(self.idx2char[i])
-
- word = "".join(char_list).replace("$", "").replace("#", "") # remove tokens
- word = word.replace("_", "").replace("*", "") # remove tokens
- return word
-
-
-class VocabSanitizer:
- def __init__(self, data_file):
- """
- data_file: path to file conatining vocabulary list
- """
- extension = os.path.splitext(data_file)[-1]
- if extension == ".json":
- self.vocab_set = set(json.load(open(data_file, encoding="utf-8")))
- elif extension == ".csv":
- self.vocab_df = pd.read_csv(data_file).set_index("WORD")
- self.vocab_set = set(self.vocab_df.index)
- else:
- print("XlitError: Only Json/CSV file extension supported")
-
- def reposition(self, word_list):
- """Reorder Words in list"""
- new_list = []
- temp_ = word_list.copy()
- for v in word_list:
- if v in self.vocab_set:
- new_list.append(v)
- temp_.remove(v)
- new_list.extend(temp_)
-
- return new_list
-
-
-##=============== INSTANTIATION ================================================
-
-
-class XlitPiston:
- """
- For handling prediction & post-processing of transliteration for a single language
- Class dependency: Seq2Seq, GlyphStrawboss, VocabSanitizer
- Global Variables: F_DIR
- """
-
- def __init__(
- self,
- weight_path,
- vocab_file,
- tglyph_cfg_file,
- iglyph_cfg_file="en",
- device="cpu",
- ):
-
- self.device = device
- self.in_glyph_obj = GlyphStrawboss(iglyph_cfg_file)
- self.tgt_glyph_obj = GlyphStrawboss(glyphs=tglyph_cfg_file)
- self.voc_sanity = VocabSanitizer(vocab_file)
-
- self._numsym_set = set(
- json.load(open(tglyph_cfg_file, encoding="utf-8"))["numsym_map"].keys()
- )
- self._inchar_set = set("abcdefghijklmnopqrstuvwxyz")
- self._natscr_set = set().union(
- self.tgt_glyph_obj.glyphs, sum(self.tgt_glyph_obj.numsym_map.values(), [])
- )
-
- ## Model Config Static TODO: add defining in json support
- input_dim = self.in_glyph_obj.size()
- output_dim = self.tgt_glyph_obj.size()
- enc_emb_dim = 300
- dec_emb_dim = 300
- enc_hidden_dim = 512
- dec_hidden_dim = 512
- rnn_type = "lstm"
- enc2dec_hid = True
- attention = True
- enc_layers = 1
- dec_layers = 2
- m_dropout = 0
- enc_bidirect = True
- enc_outstate_dim = enc_hidden_dim * (2 if enc_bidirect else 1)
-
- enc = Encoder(
- input_dim=input_dim,
- embed_dim=enc_emb_dim,
- hidden_dim=enc_hidden_dim,
- rnn_type=rnn_type,
- layers=enc_layers,
- dropout=m_dropout,
- device=self.device,
- bidirectional=enc_bidirect,
- )
- dec = Decoder(
- output_dim=output_dim,
- embed_dim=dec_emb_dim,
- hidden_dim=dec_hidden_dim,
- rnn_type=rnn_type,
- layers=dec_layers,
- dropout=m_dropout,
- use_attention=attention,
- enc_outstate_dim=enc_outstate_dim,
- device=self.device,
- )
- self.model = Seq2Seq(enc, dec, pass_enc2dec_hid=enc2dec_hid, device=self.device)
- self.model = self.model.to(self.device)
- weights = torch.load(weight_path, map_location=torch.device(self.device))
-
- self.model.load_state_dict(weights)
- self.model.eval()
-
- def character_model(self, word, beam_width=1):
- in_vec = torch.from_numpy(self.in_glyph_obj.word2xlitvec(word)).to(self.device)
- ## change to active or passive beam
- p_out_list = self.model.active_beam_inference(in_vec, beam_width=beam_width)
- p_result = [
- self.tgt_glyph_obj.xlitvec2word(out.cpu().numpy()) for out in p_out_list
- ]
-
- result = self.voc_sanity.reposition(p_result)
-
- # List type
- return result
-
- def numsym_model(self, seg):
- """tgt_glyph_obj.numsym_map[x] returns a list object"""
- if len(seg) == 1:
- return [seg] + self.tgt_glyph_obj.numsym_map[seg]
-
- a = [self.tgt_glyph_obj.numsym_map[n][0] for n in seg]
- return [seg] + ["".join(a)]
-
- def _word_segementer(self, sequence):
-
- sequence = sequence.lower()
- accepted = set().union(self._numsym_set, self._inchar_set, self._natscr_set)
- # sequence = ''.join([i for i in sequence if i in accepted])
-
- segment = []
- idx = 0
- seq_ = list(sequence)
- while len(seq_):
- # for Number-Symbol
- temp = ""
- while len(seq_) and seq_[0] in self._numsym_set:
- temp += seq_[0]
- seq_.pop(0)
- if temp != "":
- segment.append(temp)
-
- # for Target Chars
- temp = ""
- while len(seq_) and seq_[0] in self._natscr_set:
- temp += seq_[0]
- seq_.pop(0)
- if temp != "":
- segment.append(temp)
-
- # for Input-Roman Chars
- temp = ""
- while len(seq_) and seq_[0] in self._inchar_set:
- temp += seq_[0]
- seq_.pop(0)
- if temp != "":
- segment.append(temp)
-
- temp = ""
- while len(seq_) and seq_[0] not in accepted:
- temp += seq_[0]
- seq_.pop(0)
- if temp != "":
- segment.append(temp)
-
- return segment
-
- def inferencer(self, sequence, beam_width=10):
-
- seg = self._word_segementer(sequence[:120])
- lit_seg = []
-
- p = 0
- while p < len(seg):
- if seg[p][0] in self._natscr_set:
- lit_seg.append([seg[p]])
- p += 1
-
- elif seg[p][0] in self._inchar_set:
- lit_seg.append(self.character_model(seg[p], beam_width=beam_width))
- p += 1
-
- elif seg[p][0] in self._numsym_set: # num & punc
- lit_seg.append(self.numsym_model(seg[p]))
- p += 1
- else:
- lit_seg.append([seg[p]])
- p += 1
-
- ## IF segment less/equal to 2 then return combinotorial,
- ## ELSE only return top1 of each result concatenated
- if len(lit_seg) == 1:
- final_result = lit_seg[0]
-
- elif len(lit_seg) == 2:
- final_result = [""]
- for seg in lit_seg:
- new_result = []
- for s in seg:
- for f in final_result:
- new_result.append(f + s)
- final_result = new_result
-
- else:
- new_result = []
- for seg in lit_seg:
- new_result.append(seg[0])
- final_result = ["".join(new_result)]
-
- return final_result
-
-
-from collections.abc import Iterable
-from pydload import dload
-import zipfile
-
-MODEL_DOWNLOAD_URL_PREFIX = "https://github.com/AI4Bharat/IndianNLP-Transliteration/releases/download/xlit_v0.5.0/"
-
-
-def is_folder_writable(folder):
- try:
- os.makedirs(folder, exist_ok=True)
- tmp_file = os.path.join(folder, ".write_test")
- with open(tmp_file, "w") as f:
- f.write("Permission Check")
- os.remove(tmp_file)
- return True
- except:
- return False
-
-
-def is_directory_writable(path):
- if os.name == "nt":
- return is_folder_writable(path)
- return os.access(path, os.W_OK | os.X_OK)
-
-
-class XlitEngine:
- """
- For Managing the top level tasks and applications of transliteration
- Global Variables: F_DIR
- """
-
- def __init__(
- self, lang2use="all", config_path="translit_models/default_lineup.json"
- ):
-
- lineup = json.load(open(os.path.join(F_DIR, config_path), encoding="utf-8"))
- self.lang_config = {}
- if isinstance(lang2use, str):
- if lang2use == "all":
- self.lang_config = lineup
- elif lang2use in lineup:
- self.lang_config[lang2use] = lineup[lang2use]
- else:
- raise Exception(
- "XlitError: The entered Langauge code not found. Available are {}".format(
- lineup.keys()
- )
- )
-
- elif isinstance(lang2use, Iterable):
- for l in lang2use:
- try:
- self.lang_config[l] = lineup[l]
- except:
- print(
- "XlitError: Language code {} not found, Skipping...".format(l)
- )
- else:
- raise Exception(
- "XlitError: lang2use must be a list of language codes (or) string of single language code"
- )
-
- if is_directory_writable(F_DIR):
- models_path = os.path.join(F_DIR, "translit_models")
- else:
- user_home = os.path.expanduser("~")
- models_path = os.path.join(user_home, ".AI4Bharat_Xlit_Models")
- os.makedirs(models_path, exist_ok=True)
- self.download_models(models_path)
-
- self.langs = {}
- self.lang_model = {}
- for la in self.lang_config:
- try:
- print("Loading {}...".format(la))
- self.lang_model[la] = XlitPiston(
- weight_path=os.path.join(
- models_path, self.lang_config[la]["weight"]
- ),
- vocab_file=os.path.join(models_path, self.lang_config[la]["vocab"]),
- tglyph_cfg_file=os.path.join(
- models_path, self.lang_config[la]["script"]
- ),
- iglyph_cfg_file="en",
- )
- self.langs[la] = self.lang_config[la]["name"]
- except Exception as error:
- print("XlitError: Failure in loading {} \n".format(la), error)
- print(XlitError.loading_err.value)
-
- def download_models(self, models_path):
- """
- Download models from GitHub Releases if not exists
- """
- for l in self.lang_config:
- lang_name = self.lang_config[l]["eng_name"]
- lang_model_path = os.path.join(models_path, lang_name)
- if not os.path.isdir(lang_model_path):
- print("Downloading model for language: %s" % lang_name)
- remote_url = MODEL_DOWNLOAD_URL_PREFIX + lang_name + ".zip"
- downloaded_zip_path = os.path.join(models_path, lang_name + ".zip")
- dload(url=remote_url, save_to_path=downloaded_zip_path, max_time=None)
-
- if not os.path.isfile(downloaded_zip_path):
- exit(
- f"ERROR: Unable to download model from {remote_url} into {models_path}"
- )
-
- with zipfile.ZipFile(downloaded_zip_path, "r") as zip_ref:
- zip_ref.extractall(models_path)
-
- if os.path.isdir(lang_model_path):
- os.remove(downloaded_zip_path)
- else:
- exit(
- f"ERROR: Unable to find models in {lang_model_path} after download"
- )
- return
-
- def translit_word(self, eng_word, lang_code="default", topk=7, beam_width=10):
- if eng_word == "":
- return []
-
- if lang_code in self.langs:
- try:
- res_list = self.lang_model[lang_code].inferencer(
- eng_word, beam_width=beam_width
- )
- return res_list[:topk]
-
- except Exception as error:
- print("XlitError:", traceback.format_exc())
- print(XlitError.internal_err.value)
- return XlitError.internal_err
-
- elif lang_code == "default":
- try:
- res_dict = {}
- for la in self.lang_model:
- res = self.lang_model[la].inferencer(
- eng_word, beam_width=beam_width
- )
- res_dict[la] = res[:topk]
- return res_dict
-
- except Exception as error:
- print("XlitError:", traceback.format_exc())
- print(XlitError.internal_err.value)
- return XlitError.internal_err
-
- else:
- print("XlitError: Unknown Langauge requested", lang_code)
- print(XlitError.lang_err.value)
- return XlitError.lang_err
-
- def translit_sentence(self, eng_sentence, lang_code="default", beam_width=10):
- if eng_sentence == "":
- return []
-
- if lang_code in self.langs:
- try:
- out_str = ""
- for word in eng_sentence.split():
- res_ = self.lang_model[lang_code].inferencer(
- word, beam_width=beam_width
- )
- out_str = out_str + res_[0] + " "
- return out_str[:-1]
-
- except Exception as error:
- print("XlitError:", traceback.format_exc())
- print(XlitError.internal_err.value)
- return XlitError.internal_err
-
- elif lang_code == "default":
- try:
- res_dict = {}
- for la in self.lang_model:
- out_str = ""
- for word in eng_sentence.split():
- res_ = self.lang_model[la].inferencer(
- word, beam_width=beam_width
- )
- out_str = out_str + res_[0] + " "
- res_dict[la] = out_str[:-1]
- return res_dict
-
- except Exception as error:
- print("XlitError:", traceback.format_exc())
- print(XlitError.internal_err.value)
- return XlitError.internal_err
-
- else:
- print("XlitError: Unknown Langauge requested", lang_code)
- print(XlitError.lang_err.value)
- return XlitError.lang_err
-
-
-if __name__ == "__main__":
-
- available_lang = [
- "bn",
- "gu",
- "hi",
- "kn",
- "gom",
- "mai",
- "ml",
- "mr",
- "pa",
- "sd",
- "si",
- "ta",
- "te",
- "ur",
- ]
-
- reg = re.compile(r"[a-zA-Z]")
- lang = "hi"
- engine = XlitEngine(
- lang
- ) # if you don't specify lang code here, this will give results in all langs available
- sent = "Hello World! ABCD क्या हाल है आपका?"
- words = [
- engine.translit_word(word, topk=1)[lang][0] if reg.match(word) else word
- for word in sent.split()
- ] # only transliterated en words, leaves rest as it is
- updated_sent = " ".join(words)
-
- print(updated_sent)
-
- # output : हेलो वर्ल्ड! क्या हाल है आपका?
-
- # y = engine.translit_sentence("Hello World !")['hi']
- # print(y)
diff --git a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/utils/inference/advanced_tts.py b/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/utils/inference/advanced_tts.py
deleted file mode 100644
index 6f8e2f5870e0f7dcd28c35c71cde58de6f1ae415..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/utils/inference/advanced_tts.py
+++ /dev/null
@@ -1,155 +0,0 @@
-
-from .tts import TextToMel, MelToWav
-from .transliterate import XlitEngine
-from .num_to_word_on_sent import normalize_nums
-
-import re
-import numpy as np
-from scipy.io.wavfile import write
-
-from mosestokenizer import *
-from indicnlp.tokenize import sentence_tokenize
-import argparse
-
-_INDIC = ["as", "bn", "gu", "hi", "kn", "ml", "mr", "or", "pa", "ta", "te"]
-_PURAM_VIRAM_LANGUAGES = ["hi", "or", "bn", "as"]
-_TRANSLITERATION_NOT_AVAILABLE_IN = ["en","or"]
-#_NUM2WORDS_NOT_AVAILABLE_IN = []
-
-def normalize_text(text, lang):
- if lang in _PURAM_VIRAM_LANGUAGES:
- text = text.replace('|', '।')
- text = text.replace('.', '।')
- return text
-
-def split_sentences(paragraph, language):
- if language == "en":
- with MosesSentenceSplitter(language) as splitter:
- return splitter([paragraph])
- elif language in _INDIC:
- return sentence_tokenize.sentence_split(paragraph, lang=language)
-
-
-
-def load_models(acoustic, vocoder, device):
- text_to_mel = TextToMel(glow_model_dir=acoustic, device=device)
- mel_to_wav = MelToWav(hifi_model_dir=vocoder, device=device)
- return text_to_mel, mel_to_wav
-
-
-def translit(text, lang):
- reg = re.compile(r'[a-zA-Z]')
- words = [engine.translit_word(word, topk=1)[lang][0] if reg.match(word) else word for word in text.split()]
- updated_sent = ' '.join(words)
- return updated_sent
-
-
-
-def run_tts(text, lang, args):
- if lang == 'hi':
- text = text.replace('।', '.') # only for hindi models
-
- if lang == 'en' and text[-1] != '.':
- text = text + '. '
-
- if args.number_conversion == 1 and lang!='en':
- print("Doing number conversion")
- text_num_to_word = normalize_nums(text, lang) # converting numbers to words in lang
- else:
- text_num_to_word = text
-
-
- if args.transliteration == 1 and lang not in _TRANSLITERATION_NOT_AVAILABLE_IN:
- print("Doing transliteration")
- text_num_to_word_and_transliterated = translit(text_num_to_word, lang) # transliterating english words to lang
- else:
- text_num_to_word_and_transliterated = text_num_to_word
-
- final_text = ' ' + text_num_to_word_and_transliterated
- print(final_text)
- mel = text_to_mel.generate_mel(final_text, args.noise_scale, args.length_scale)
- audio, sr = mel_to_wav.generate_wav(mel)
- return sr, audio
-
-def run_tts_paragraph(args):
- audio_list = []
-
- global text_to_mel
- global mel_to_wav
-
- if args.gender == 'Male':
- text_to_mel = text_to_mel_list[1]
- mel_to_wav = mel_to_wav_list[1]
- else:
- text_to_mel = text_to_mel_list[0]
- mel_to_wav = mel_to_wav_list[0]
-
-
- if args.split_sentences == 1:
- text = normalize_text(args.text, args.lang)
- split_sentences_list = split_sentences(text, args.lang)
-
- for sent in split_sentences_list:
-
- sr, audio = run_tts(sent, args.lang, args)
- audio_list.append(audio)
-
- concatenated_audio = np.concatenate([i for i in audio_list])
- if args.wav:
- write(filename=args.wav, rate=sr, data=concatenated_audio)
- return (sr, concatenated_audio)
- else:
- sr, audio = run_tts(args.text, args.lang, args)
- if args.wav:
- write(filename=args.wav, rate=sr, data=audio)
- return (sr, audio)
-
-
-def load_all_models(args):
- global engine
- if args.lang not in _TRANSLITERATION_NOT_AVAILABLE_IN:
- engine = XlitEngine(args.lang) # loading translit model globally
-
- global text_to_mel_list
- global mel_to_wav_list
-
-
- text_to_mel_list = []
- mel_to_wav_list = []
-
- for acoustic, vocoder in zip( args.acoustic.split(',') , args.vocoder.split(',') ):
- ttm, mtw = load_models(acoustic, vocoder, args.device)
- text_to_mel_list.append(ttm)
- mel_to_wav_list.append(mtw)
-
- try:
- args.noise_scale = float(args.noise_scale)
- args.length_scale = float(args.length_scale)
- except:
- pass
-
- print(args)
-
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("-a", "--acoustic", required=True, type=str)
- parser.add_argument("-v", "--vocoder", required=True, type=str)
- parser.add_argument("-d", "--device", type=str, default="cpu")
- parser.add_argument("-t", "--text", type=str, required=True)
- parser.add_argument("-w", "--wav", type=str, required=True)
- parser.add_argument("-n", "--noise-scale", default='0.667', type=str )
- parser.add_argument("-l", "--length-scale", default='1.0', type=str)
-
- parser.add_argument("-T", "--transliteration", default=1, type=int)
- parser.add_argument("-N", "--number-conversion", default=1, type=int)
- parser.add_argument("-S", "--split-sentences", default=1, type=int)
- parser.add_argument("-L", "--lang", type=str, required=True)
-
- args = parser.parse_args()
-
- load_all_models(args)
- run_tts_paragraph(args)
-
-
diff --git a/spaces/Harveenchadha/oiTrans/indic_nlp_library/docs/Makefile b/spaces/Harveenchadha/oiTrans/indic_nlp_library/docs/Makefile
deleted file mode 100644
index faf86259fdbcb0dff091c22d980623b622f2bbd4..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/oiTrans/indic_nlp_library/docs/Makefile
+++ /dev/null
@@ -1,153 +0,0 @@
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS =
-SPHINXBUILD = sphinx-build
-PAPER =
-BUILDDIR = _build
-
-# Internal variables.
-PAPEROPT_a4 = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-# the i18n builder cannot share the environment and doctrees with the others
-I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-
-.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
-
-help:
- @echo "Please use \`make ' where is one of"
- @echo " html to make standalone HTML files"
- @echo " dirhtml to make HTML files named index.html in directories"
- @echo " singlehtml to make a single large HTML file"
- @echo " pickle to make pickle files"
- @echo " json to make JSON files"
- @echo " htmlhelp to make HTML files and a HTML help project"
- @echo " qthelp to make HTML files and a qthelp project"
- @echo " devhelp to make HTML files and a Devhelp project"
- @echo " epub to make an epub"
- @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
- @echo " latexpdf to make LaTeX files and run them through pdflatex"
- @echo " text to make text files"
- @echo " man to make manual pages"
- @echo " texinfo to make Texinfo files"
- @echo " info to make Texinfo files and run them through makeinfo"
- @echo " gettext to make PO message catalogs"
- @echo " changes to make an overview of all changed/added/deprecated items"
- @echo " linkcheck to check all external links for integrity"
- @echo " doctest to run all doctests embedded in the documentation (if enabled)"
-
-clean:
- -rm -rf $(BUILDDIR)/*
-
-html:
- $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
- $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-singlehtml:
- $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
- @echo
- @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-pickle:
- $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
- @echo
- @echo "Build finished; now you can process the pickle files."
-
-json:
- $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
- @echo
- @echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
- $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
- @echo
- @echo "Build finished; now you can run HTML Help Workshop with the" \
- ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
- $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
- @echo
- @echo "Build finished; now you can run "qcollectiongenerator" with the" \
- ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
- @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/IndicNLPLibrary.qhcp"
- @echo "To view the help file:"
- @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/IndicNLPLibrary.qhc"
-
-devhelp:
- $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
- @echo
- @echo "Build finished."
- @echo "To view the help file:"
- @echo "# mkdir -p $$HOME/.local/share/devhelp/IndicNLPLibrary"
- @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/IndicNLPLibrary"
- @echo "# devhelp"
-
-epub:
- $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
- @echo
- @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-latex:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo
- @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
- @echo "Run \`make' in that directory to run these through (pdf)latex" \
- "(use \`make latexpdf' here to do that automatically)."
-
-latexpdf:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo "Running LaTeX files through pdflatex..."
- $(MAKE) -C $(BUILDDIR)/latex all-pdf
- @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-text:
- $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
- @echo
- @echo "Build finished. The text files are in $(BUILDDIR)/text."
-
-man:
- $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
- @echo
- @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
-
-texinfo:
- $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
- @echo
- @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
- @echo "Run \`make' in that directory to run these through makeinfo" \
- "(use \`make info' here to do that automatically)."
-
-info:
- $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
- @echo "Running Texinfo files through makeinfo..."
- make -C $(BUILDDIR)/texinfo info
- @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
-
-gettext:
- $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
- @echo
- @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
-
-changes:
- $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
- @echo
- @echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
- $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
- @echo
- @echo "Link check complete; look for any errors in the above output " \
- "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
- $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
- @echo "Testing of doctests in the sources finished, look at the " \
- "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/spaces/Hexamind/GDOC/src/model/container.py b/spaces/Hexamind/GDOC/src/model/container.py
deleted file mode 100644
index 29f64744b670da5977a09124a965da92812c2781..0000000000000000000000000000000000000000
--- a/spaces/Hexamind/GDOC/src/model/container.py
+++ /dev/null
@@ -1,143 +0,0 @@
-from src.model.paragraph import Paragraph
-from src.model.block import Block
-
-INFINITE = 99999
-
-
-class Container:
-
- def __init__(self, paragraphs: [Paragraph], title: Paragraph = None, level: int = 0, index: [int] = None,
- father=None, id_=0):
- if index is None:
- index = []
- self.level = level
- self.title = title
- self.paragraphs = []
- self.children = []
- self.index = index
- self.father = father # if not father, then the container is at the top of the hierarchy
- self.id_ = int(str(1) + str(father.id_) + str(id_))
- if paragraphs:
- self.paragraphs, self.children = self.create_children(paragraphs, level, index)
- self.blocks = self.get_blocks()
- self.normals, self.comments, self.tasks = self.sort_paragraphs()
-
-
- @property
- def text(self):
- text = ""
- if self.title:
- text = "Titre " + str(self.level) + " : " + self.title.text + '\n'
- for p in self.paragraphs:
- text += p.text + '\n'
- for child in self.children:
- text += child.text
- return text
-
- @property
- def text_chunks(self, chunk=500):
- text_chunks = []
- text_chunk = ""
- for p in self.paragraphs:
- if chunk < len(text_chunk) + len(p.text):
- text_chunks.append(text_chunk)
- text_chunk = ""
- else:
- text_chunk += " " + p.text
- if text_chunk and not text_chunk.isspace():
- text_chunks.append(text_chunk)
- for child in self.children:
- text_chunks += child.text_chunks
- return text_chunks
-
- def get_blocks(self):
- block = Block(level=self.level, index=self.index)
- if self.title:
- block.title = self.title.text
- for p in self.paragraphs:
- if not p.blank:
- if p.text.startswith('##### '):
- special_action = p.text.lstrip('##### ')
- block.specials.append(special_action)
- else:
- block.content += p.text
- blocks = [block] if block.content or block.specials else []
- for child in self.children:
- blocks += child.blocks
- return blocks
-
- def create_children(self, paragraphs: Paragraph, level: int, index: [int]) -> ([Paragraph], []):
- """
- creates children containers or directly attached content
- and returns the list of containers and contents of level+1
- :return:
- [Content or Container]
- """
- attached_paragraphs = []
- container_paragraphs = []
- container_title = None
- children = []
- in_children = False
- child_id = 0
- level = INFINITE
-
- while paragraphs:
- p = paragraphs.pop(0)
- if not in_children and not p.is_structure:
- attached_paragraphs.append(p)
- else:
- in_children = True
- if p.is_structure and p.level <= level: # if p is higher in hierarchy, then the child is completed
- if container_paragraphs or container_title:
- if level <= len(index):
- index = index[:level]
- index[-1] += 1
- else:
- for i in range(level-len(index)):
- index.append(1)
- children.append(Container(container_paragraphs, container_title, level, index, self, child_id))
- child_id += 1
- container_paragraphs = []
- container_title = p
- level = p.level
-
- else: # p is normal text or strictly lower in hierarchy, then the child continues to grow
- container_paragraphs.append(p)
-
- if container_paragraphs or container_title:
- if level <= len(index):
- index = index[:level]
- index[-1] += 1
- else:
- for i in range(level - len(index)):
- index.append(1)
- children.append(Container(container_paragraphs, container_title, level, index, self, child_id))
- child_id += 1
-
- return attached_paragraphs, children
-
- @property
- def structure(self):
-
- self_structure = {str(self.id_): {
- 'index': str(self.id_),
- 'canMove': True,
- 'isFolder': True,
- 'children': [p.id_ for p in self.paragraphs] + [child.id_ for child in self.children],
- 'canRename': True,
- 'data': {},
- 'level': self.level,
- 'rank': self.rank,
- 'title': self.title.text if self.title else 'root'
- }}
- paragraphs_structure = [p.structure for p in self.paragraphs]
- structure = [self_structure] + paragraphs_structure
- for child in self.children:
- structure += child.structure
- return structure
-
- def sort_paragraphs(self) -> ([Paragraph], [Paragraph], [Paragraph]):
- mapping = {'normal': [], 'comment': [], 'task': []}
- for p in self.paragraphs:
- mapping(p.type).append(p)
- return mapping['normal'], mapping['comment'], mapping['task']
diff --git a/spaces/HighCWu/starganv2vc-paddle/starganv2vc_paddle/transforms.py b/spaces/HighCWu/starganv2vc-paddle/starganv2vc_paddle/transforms.py
deleted file mode 100644
index e8fad6006dda710c670902d32e64076db562db5e..0000000000000000000000000000000000000000
--- a/spaces/HighCWu/starganv2vc-paddle/starganv2vc_paddle/transforms.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import numpy as np
-import paddle
-from paddle import nn
-import paddle.nn.functional as F
-import paddleaudio
-import paddleaudio.functional as audio_F
-import random
-
-## 1. RandomTimeStrech
-
-class TimeStrech(nn.Layer):
- def __init__(self, scale):
- super(TimeStrech, self).__init__()
- self.scale = scale
-
- def forward(self, x):
- mel_size = x.shape[-1]
-
- x = F.interpolate(x, scale_factor=(1, self.scale), align_corners=False,
- mode='bilinear').squeeze()
-
- if x.shape[-1] < mel_size:
- noise_length = (mel_size - x.shape[-1])
- random_pos = random.randint(0, x.shape[-1]) - noise_length
- if random_pos < 0:
- random_pos = 0
- noise = x[..., random_pos:random_pos + noise_length]
- x = paddle.concat([x, noise], axis=-1)
- else:
- x = x[..., :mel_size]
-
- return x.unsqueeze(1)
-
-## 2. PitchShift
-class PitchShift(nn.Layer):
- def __init__(self, shift):
- super(PitchShift, self).__init__()
- self.shift = shift
-
- def forward(self, x):
- if len(x.shape) == 2:
- x = x.unsqueeze(0)
- x = x.squeeze()
- mel_size = x.shape[1]
- shift_scale = (mel_size + self.shift) / mel_size
- x = F.interpolate(x.unsqueeze(1), scale_factor=(shift_scale, 1.), align_corners=False,
- mode='bilinear').squeeze(1)
-
- x = x[:, :mel_size]
- if x.shape[1] < mel_size:
- pad_size = mel_size - x.shape[1]
- x = paddle.cat([x, paddle.zeros(x.shape[0], pad_size, x.shape[2])], axis=1)
- x = x.squeeze()
- return x.unsqueeze(1)
-
-## 3. ShiftBias
-class ShiftBias(nn.Layer):
- def __init__(self, bias):
- super(ShiftBias, self).__init__()
- self.bias = bias
-
- def forward(self, x):
- return x + self.bias
-
-## 4. Scaling
-class SpectScaling(nn.Layer):
- def __init__(self, scale):
- super(SpectScaling, self).__init__()
- self.scale = scale
-
- def forward(self, x):
- return x * self.scale
-
-## 5. Time Flip
-class TimeFlip(nn.Layer):
- def __init__(self, length):
- super(TimeFlip, self).__init__()
- self.length = round(length)
-
- def forward(self, x):
- if self.length > 1:
- start = np.random.randint(0, x.shape[-1] - self.length)
- x_ret = x.clone()
- x_ret[..., start:start + self.length] = paddle.flip(x[..., start:start + self.length], axis=[-1])
- x = x_ret
- return x
-
-class PhaseShuffle2D(nn.Layer):
- def __init__(self, n=2):
- super(PhaseShuffle2D, self).__init__()
- self.n = n
- self.random = random.Random(1)
-
- def forward(self, x, move=None):
- # x.size = (B, C, M, L)
- if move is None:
- move = self.random.randint(-self.n, self.n)
-
- if move == 0:
- return x
- else:
- left = x[:, :, :, :move]
- right = x[:, :, :, move:]
- shuffled = paddle.concat([right, left], axis=3)
-
- return shuffled
-
-def build_transforms():
- transforms = [
- lambda M: TimeStrech(1+ (np.random.random()-0.5)*M*0.2),
- lambda M: SpectScaling(1 + (np.random.random()-1)*M*0.1),
- lambda M: PhaseShuffle2D(192),
- ]
- N, M = len(transforms), np.random.random()
- composed = nn.Sequential(
- *[trans(M) for trans in np.random.choice(transforms, N)]
- )
- return composed
diff --git a/spaces/Hoodady/3DFuse/run_nerf.py b/spaces/Hoodady/3DFuse/run_nerf.py
deleted file mode 100644
index a66ed3c600ff43614c8dab4127e28f928a580dc8..0000000000000000000000000000000000000000
--- a/spaces/Hoodady/3DFuse/run_nerf.py
+++ /dev/null
@@ -1,62 +0,0 @@
-from typing import List
-from pydantic import validator
-
-from my.config import BaseConf, SingleOrList, dispatch
-from my.utils.seed import seed_everything
-
-import numpy as np
-from voxnerf.vox import VOXRF_REGISTRY
-from voxnerf.pipelines import train
-
-
-class VoxConfig(BaseConf):
- model_type: str = "VoxRF"
- bbox_len: float = 1.5
- grid_size: SingleOrList(int) = [128, 128, 128]
- step_ratio: float = 0.5
- density_shift: float = -10.
- ray_march_weight_thres: float = 0.0001
- c: int = 3
- blend_bg_texture: bool = False
- bg_texture_hw: int = 64
-
- @validator("grid_size")
- def check_gsize(cls, grid_size):
- if isinstance(grid_size, int):
- return [grid_size, ] * 3
- else:
- assert len(grid_size) == 3
- return grid_size
-
- def make(self):
- params = self.dict()
- m_type = params.pop("model_type")
- model_fn = VOXRF_REGISTRY.get(m_type)
-
- radius = params.pop('bbox_len')
- aabb = radius * np.array([
- [-1, -1, -1],
- [1, 1, 1]
- ])
- model = model_fn(aabb=aabb, **params)
- return model
-
-
-class TrainerConfig(BaseConf):
- model: VoxConfig = VoxConfig()
- scene: str = "lego"
- n_epoch: int = 2
- bs: int = 4096
- lr: float = 0.02
-
- def run(self):
- args = self.dict()
- args.pop("model")
-
- model = self.model.make()
- train(model, **args)
-
-
-if __name__ == "__main__":
- seed_everything(0)
- dispatch(TrainerConfig)
diff --git a/spaces/ICML2022/PointCloudC/util/data_util.py b/spaces/ICML2022/PointCloudC/util/data_util.py
deleted file mode 100644
index 24734cfc8419ed3f0db313d910854e259f09b1d0..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/PointCloudC/util/data_util.py
+++ /dev/null
@@ -1,165 +0,0 @@
-import glob
-import h5py
-import numpy as np
-from torch.utils.data import Dataset
-import os
-import json
-from PointWOLF import PointWOLF
-
-
-def load_data(partition):
- all_data = []
- all_label = []
- for h5_name in glob.glob('./data/modelnet40_ply_hdf5_2048/ply_data_%s*.h5' % partition):
- f = h5py.File(h5_name)
- data = f['data'][:].astype('float32')
- label = f['label'][:].astype('int64')
- f.close()
- all_data.append(data)
- all_label.append(label)
- all_data = np.concatenate(all_data, axis=0)
- all_label = np.concatenate(all_label, axis=0)
- return all_data, all_label
-
-
-def pc_normalize(pc):
- centroid = np.mean(pc, axis=0)
- pc = pc - centroid
- m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))
- pc = pc / m
- return pc
-
-
-def translate_pointcloud(pointcloud):
- xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
- xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
-
- translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
- return translated_pointcloud
-
-
-def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
- N, C = pointcloud.shape
- pointcloud += np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
- return pointcloud
-
-
-# =========== ModelNet40 =================
-class ModelNet40(Dataset):
- def __init__(self, num_points, partition='train', args=None):
- self.data, self.label = load_data(partition)
- self.num_points = num_points
- self.partition = partition
- self.PointWOLF = PointWOLF(args) if args is not None else None
-
-
- def __getitem__(self, item):
- pointcloud = self.data[item][:self.num_points]
- label = self.label[item]
- if self.partition == 'train':
- np.random.shuffle(pointcloud)
- if self.PointWOLF is not None:
- _, pointcloud = self.PointWOLF(pointcloud)
- return pointcloud, label
-
- def __len__(self):
- return self.data.shape[0]
-
-# =========== ShapeNet Part =================
-class PartNormalDataset(Dataset):
- def __init__(self, npoints=2500, split='train', normalize=False):
- self.npoints = npoints
- self.root = './data/shapenetcore_partanno_segmentation_benchmark_v0_normal'
- self.catfile = os.path.join(self.root, 'synsetoffset2category.txt')
- self.cat = {}
- self.normalize = normalize
-
- with open(self.catfile, 'r') as f:
- for line in f:
- ls = line.strip().split()
- self.cat[ls[0]] = ls[1]
- self.cat = {k: v for k, v in self.cat.items()}
-
- self.meta = {}
- with open(os.path.join(self.root, 'train_test_split', 'shuffled_train_file_list.json'), 'r') as f:
- train_ids = set([str(d.split('/')[2]) for d in json.load(f)])
- with open(os.path.join(self.root, 'train_test_split', 'shuffled_val_file_list.json'), 'r') as f:
- val_ids = set([str(d.split('/')[2]) for d in json.load(f)])
- with open(os.path.join(self.root, 'train_test_split', 'shuffled_test_file_list.json'), 'r') as f:
- test_ids = set([str(d.split('/')[2]) for d in json.load(f)])
- for item in self.cat:
- self.meta[item] = []
- dir_point = os.path.join(self.root, self.cat[item])
- fns = sorted(os.listdir(dir_point))
-
- if split == 'trainval':
- fns = [fn for fn in fns if ((fn[0:-4] in train_ids) or (fn[0:-4] in val_ids))]
- elif split == 'train':
- fns = [fn for fn in fns if fn[0:-4] in train_ids]
- elif split == 'val':
- fns = [fn for fn in fns if fn[0:-4] in val_ids]
- elif split == 'test':
- fns = [fn for fn in fns if fn[0:-4] in test_ids]
- else:
- print('Unknown split: %s. Exiting..' % (split))
- exit(-1)
-
- for fn in fns:
- token = (os.path.splitext(os.path.basename(fn))[0])
- self.meta[item].append(os.path.join(dir_point, token + '.txt'))
-
- self.datapath = []
- for item in self.cat:
- for fn in self.meta[item]:
- self.datapath.append((item, fn))
-
- self.classes = dict(zip(self.cat, range(len(self.cat))))
- # Mapping from category ('Chair') to a list of int [10,11,12,13] as segmentation labels
- self.seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43],
- 'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46],
- 'Mug': [36, 37], 'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27],
- 'Table': [47, 48, 49], 'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40],
- 'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}
-
- self.cache = {} # from index to (point_set, cls, seg) tuple
- self.cache_size = 20000
-
- def __getitem__(self, index):
- if index in self.cache:
- point_set, normal, seg, cls = self.cache[index]
- else:
- fn = self.datapath[index]
- cat = self.datapath[index][0]
- cls = self.classes[cat]
- cls = np.array([cls]).astype(np.int32)
- data = np.loadtxt(fn[1]).astype(np.float32)
- point_set = data[:, 0:3]
- normal = data[:, 3:6]
- seg = data[:, -1].astype(np.int32)
- if len(self.cache) < self.cache_size:
- self.cache[index] = (point_set, normal, seg, cls)
-
- if self.normalize:
- point_set = pc_normalize(point_set)
-
- choice = np.random.choice(len(seg), self.npoints, replace=True)
-
- # resample
- # note that the number of points in some points clouds is less than 2048, thus use random.choice
- # remember to use the same seed during train and test for a getting stable result
- point_set = point_set[choice, :]
- seg = seg[choice]
- normal = normal[choice, :]
-
- return point_set, cls, seg, normal
-
- def __len__(self):
- return len(self.datapath)
-
-
-if __name__ == '__main__':
- train = ModelNet40(1024)
- test = ModelNet40(1024, 'test')
- for data, label in train:
- print(data.shape)
- print(label.shape)
diff --git a/spaces/ImagineAI-Real/MidJourney-Diffusion/README.md b/spaces/ImagineAI-Real/MidJourney-Diffusion/README.md
deleted file mode 100644
index 94eef97542b21abe2fc5dd553164580517670a99..0000000000000000000000000000000000000000
--- a/spaces/ImagineAI-Real/MidJourney-Diffusion/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: MidJourney Diffusion
-emoji: 🌖
-colorFrom: green
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.28.3
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/training/modules/__init__.py b/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/training/modules/__init__.py
deleted file mode 100644
index 82e1a9096a5bd8f3fb00e899d0239b078246cad4..0000000000000000000000000000000000000000
--- a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/training/modules/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import logging
-
-from saicinpainting.training.modules.ffc import FFCResNetGenerator
-from saicinpainting.training.modules.pix2pixhd import GlobalGenerator, MultiDilatedGlobalGenerator, \
- NLayerDiscriminator, MultidilatedNLayerDiscriminator
-
-def make_generator(config, kind, **kwargs):
- logging.info(f'Make generator {kind}')
-
- if kind == 'pix2pixhd_multidilated':
- return MultiDilatedGlobalGenerator(**kwargs)
-
- if kind == 'pix2pixhd_global':
- return GlobalGenerator(**kwargs)
-
- if kind == 'ffc_resnet':
- return FFCResNetGenerator(**kwargs)
-
- raise ValueError(f'Unknown generator kind {kind}')
-
-
-def make_discriminator(kind, **kwargs):
- logging.info(f'Make discriminator {kind}')
-
- if kind == 'pix2pixhd_nlayer_multidilated':
- return MultidilatedNLayerDiscriminator(**kwargs)
-
- if kind == 'pix2pixhd_nlayer':
- return NLayerDiscriminator(**kwargs)
-
- raise ValueError(f'Unknown discriminator kind {kind}')
diff --git a/spaces/Intae/deepfake/training/tools/utils.py b/spaces/Intae/deepfake/training/tools/utils.py
deleted file mode 100644
index 9b22dd6910b47bf7e2e01acbd95ee2dcf61d44b7..0000000000000000000000000000000000000000
--- a/spaces/Intae/deepfake/training/tools/utils.py
+++ /dev/null
@@ -1,121 +0,0 @@
-import cv2
-from apex.optimizers import FusedAdam, FusedSGD
-from timm.optim import AdamW
-from torch import optim
-from torch.optim import lr_scheduler
-from torch.optim.rmsprop import RMSprop
-from torch.optim.adamw import AdamW
-from torch.optim.lr_scheduler import MultiStepLR, CyclicLR
-
-from training.tools.schedulers import ExponentialLRScheduler, PolyLR, LRStepScheduler
-
-cv2.ocl.setUseOpenCL(False)
-cv2.setNumThreads(0)
-
-
-class AverageMeter(object):
- """Computes and stores the average and current value"""
-
- def __init__(self):
- self.reset()
-
- def reset(self):
- self.val = 0
- self.avg = 0
- self.sum = 0
- self.count = 0
-
- def update(self, val, n=1):
- self.val = val
- self.sum += val * n
- self.count += n
- self.avg = self.sum / self.count
-
-def create_optimizer(optimizer_config, model, master_params=None):
- """Creates optimizer and schedule from configuration
-
- Parameters
- ----------
- optimizer_config : dict
- Dictionary containing the configuration options for the optimizer.
- model : Model
- The network model.
-
- Returns
- -------
- optimizer : Optimizer
- The optimizer.
- scheduler : LRScheduler
- The learning rate scheduler.
- """
- if optimizer_config.get("classifier_lr", -1) != -1:
- # Separate classifier parameters from all others
- net_params = []
- classifier_params = []
- for k, v in model.named_parameters():
- if not v.requires_grad:
- continue
- if k.find("encoder") != -1:
- net_params.append(v)
- else:
- classifier_params.append(v)
- params = [
- {"params": net_params},
- {"params": classifier_params, "lr": optimizer_config["classifier_lr"]},
- ]
- else:
- if master_params:
- params = master_params
- else:
- params = model.parameters()
-
- if optimizer_config["type"] == "SGD":
- optimizer = optim.SGD(params,
- lr=optimizer_config["learning_rate"],
- momentum=optimizer_config["momentum"],
- weight_decay=optimizer_config["weight_decay"],
- nesterov=optimizer_config["nesterov"])
- elif optimizer_config["type"] == "FusedSGD":
- optimizer = FusedSGD(params,
- lr=optimizer_config["learning_rate"],
- momentum=optimizer_config["momentum"],
- weight_decay=optimizer_config["weight_decay"],
- nesterov=optimizer_config["nesterov"])
- elif optimizer_config["type"] == "Adam":
- optimizer = optim.Adam(params,
- lr=optimizer_config["learning_rate"],
- weight_decay=optimizer_config["weight_decay"])
- elif optimizer_config["type"] == "FusedAdam":
- optimizer = FusedAdam(params,
- lr=optimizer_config["learning_rate"],
- weight_decay=optimizer_config["weight_decay"])
- elif optimizer_config["type"] == "AdamW":
- optimizer = AdamW(params,
- lr=optimizer_config["learning_rate"],
- weight_decay=optimizer_config["weight_decay"])
- elif optimizer_config["type"] == "RmsProp":
- optimizer = RMSprop(params,
- lr=optimizer_config["learning_rate"],
- weight_decay=optimizer_config["weight_decay"])
- else:
- raise KeyError("unrecognized optimizer {}".format(optimizer_config["type"]))
-
- if optimizer_config["schedule"]["type"] == "step":
- scheduler = LRStepScheduler(optimizer, **optimizer_config["schedule"]["params"])
- elif optimizer_config["schedule"]["type"] == "clr":
- scheduler = CyclicLR(optimizer, **optimizer_config["schedule"]["params"])
- elif optimizer_config["schedule"]["type"] == "multistep":
- scheduler = MultiStepLR(optimizer, **optimizer_config["schedule"]["params"])
- elif optimizer_config["schedule"]["type"] == "exponential":
- scheduler = ExponentialLRScheduler(optimizer, **optimizer_config["schedule"]["params"])
- elif optimizer_config["schedule"]["type"] == "poly":
- scheduler = PolyLR(optimizer, **optimizer_config["schedule"]["params"])
- elif optimizer_config["schedule"]["type"] == "constant":
- scheduler = lr_scheduler.LambdaLR(optimizer, lambda epoch: 1.0)
- elif optimizer_config["schedule"]["type"] == "linear":
- def linear_lr(it):
- return it * optimizer_config["schedule"]["params"]["alpha"] + optimizer_config["schedule"]["params"]["beta"]
-
- scheduler = lr_scheduler.LambdaLR(optimizer, linear_lr)
-
- return optimizer, scheduler
diff --git a/spaces/IvaElen/nlp_proj/pages/GPT.py b/spaces/IvaElen/nlp_proj/pages/GPT.py
deleted file mode 100644
index da0b637b2dc0c78aa0ec6b68b03795fb2dd5a786..0000000000000000000000000000000000000000
--- a/spaces/IvaElen/nlp_proj/pages/GPT.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import streamlit as st
-import torch
-import numpy as np
-import transformers
-import random
-import textwrap
-
-@st.cache_data
-def load_model():
- model_finetuned = transformers.AutoModelWithLMHead.from_pretrained(
- 'tinkoff-ai/ruDialoGPT-small',
- output_attentions = False,
- output_hidden_states = False
- )
- model_finetuned.load_state_dict(torch.load('GPT_sonnik_only.pt', map_location=torch.device('cpu')))
- tokenizer = transformers.AutoTokenizer.from_pretrained('tinkoff-ai/ruDialoGPT-small')
- return model_finetuned, tokenizer
-
-def preprocess_text(text_input, tokenizer):
- prompt = tokenizer.encode(text_input, return_tensors='pt')
- return prompt
-
-def predict_sentiment(model, prompt, temp, num_generate):
- print('1')
- with torch.inference_mode():
- print('2')
- result = model.generate(
- input_ids=prompt,
- max_length=100,
- num_beams=5,
- do_sample=True,
- temperature=float(temp),
- top_k=50,
- top_p=0.6,
- no_repeat_ngram_size=3,
- num_return_sequences=num_generate,
- ).cpu().numpy()
- print(result)
- return result
-
-st.title('Text generation with dreambook')
-
-model, tokenizer = load_model()
-
-text_input = st.text_input("Enter some text about movie")
-max_len = st.slider('Length of sequence', 0, 100, 50)
-temp = st.slider('Temperature', 1, 30, 1)
-num_generate = st.text_input("Enter number of sequences")
-
-if st.button('Generate'):
- print('uirhf')
- prompt = preprocess_text(text_input, tokenizer)
- print('uirhf')
- result = predict_sentiment(model, prompt, temp, int(num_generate))
- print('uirhf')
- for i in result:
- st.write(textwrap.fill(tokenizer.decode(i), max_len))
\ No newline at end of file
diff --git a/spaces/Ivanrs/image-matching-sift-orb/app.py b/spaces/Ivanrs/image-matching-sift-orb/app.py
deleted file mode 100644
index 14c8339ff647b06f7b1bf41dc49941c791e32663..0000000000000000000000000000000000000000
--- a/spaces/Ivanrs/image-matching-sift-orb/app.py
+++ /dev/null
@@ -1,71 +0,0 @@
-from cProfile import label
-import gradio as gr
-import cv2
-import matplotlib.pyplot as plt
-from scipy import ndimage
-from scipy.ndimage.filters import convolve
-import numpy as np
-
-
-def sift(img1, img2):
- sift = cv2.SIFT_create()
-
- keypoints_1, descriptors_1 = sift.detectAndCompute(img1,None)
- keypoints_2, descriptors_2 = sift.detectAndCompute(img2,None)
-
- bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)
-
- matches = bf.match(descriptors_1,descriptors_2)
- matches = sorted(matches, key = lambda x:x.distance)
-
- img3 = cv2.drawMatches(img1, keypoints_1, img2, keypoints_2, matches[:50], img2, flags=2)
- return img3
-
-def orb(img1, img2):
- orb = cv2.ORB_create()
-
- keypoints_1, descriptors_1 = orb.detectAndCompute(img1,None)
- keypoints_2, descriptors_2 = orb.detectAndCompute(img2,None)
-
- bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)
-
- matches = bf.match(descriptors_1,descriptors_2)
- matches = sorted(matches, key = lambda x:x.distance)
-
- img3 = cv2.drawMatches(img1, keypoints_1, img2, keypoints_2, matches[:50], img2, flags=2)
- return img3
-
-def match(img1, img2):
-
- img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
- img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
-
- sift_res = sift(img1, img2)
- orb_res = orb(img1, img2)
-
- return [sift_res, orb_res]
-
-
-interface = gr.Interface(
- title = "SIFT and ORB Image Matching 🖼 👉 🖼",
- description = "Scale Invariant Feature Transform (SIFT) & Oriented FAST and Rotated BRIEF (ORB)
Select training and query images 🖼",
- article='~ Ivanrs',
- allow_flagging = "never",
- fn = match,
- inputs = [
- gr.Image(label = "Train Image", shape = [300, 200]),
- gr.Image(label = "Query Image", shape = [300, 200]),
- ],
- outputs = [
- gr.Image(label = "SIFT Output"),
- gr.Image(label = "ORB Output"),
- ],
- examples = [
- ["img1.jpg", "img2.jpg"],
- ["img3.jpg", "img4.jpg"],
- ["img5.jpg", "img6.png"],
- ["img7.jpeg", "img8.jpeg"]
- ]
-)
-
-interface.launch(share = False)
\ No newline at end of file
diff --git a/spaces/Jamel887/Rvc-tio887/lib/infer_pack/models.py b/spaces/Jamel887/Rvc-tio887/lib/infer_pack/models.py
deleted file mode 100644
index 44c08d361bcb13b84b38dc29beff5cdaddad4ea2..0000000000000000000000000000000000000000
--- a/spaces/Jamel887/Rvc-tio887/lib/infer_pack/models.py
+++ /dev/null
@@ -1,1124 +0,0 @@
-import math, pdb, os
-from time import time as ttime
-import torch
-from torch import nn
-from torch.nn import functional as F
-from lib.infer_pack import modules
-from lib.infer_pack import attentions
-from lib.infer_pack import commons
-from lib.infer_pack.commons import init_weights, get_padding
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from lib.infer_pack.commons import init_weights
-import numpy as np
-from lib.infer_pack import commons
-
-
-class TextEncoder256(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(256, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class TextEncoder768(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(768, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0,
- ):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(
- modules.ResidualCouplingLayer(
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- mean_only=True,
- )
- )
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
- def remove_weight_norm(self):
- for i in range(self.n_flows):
- self.flows[i * 2].remove_weight_norm()
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
- def remove_weight_norm(self):
- self.enc.remove_weight_norm()
-
-
-class Generator(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=0,
- ):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class SineGen(torch.nn.Module):
- """Definition of sine generator
- SineGen(samp_rate, harmonic_num = 0,
- sine_amp = 0.1, noise_std = 0.003,
- voiced_threshold = 0,
- flag_for_pulse=False)
- samp_rate: sampling rate in Hz
- harmonic_num: number of harmonic overtones (default 0)
- sine_amp: amplitude of sine-wavefrom (default 0.1)
- noise_std: std of Gaussian noise (default 0.003)
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
- Note: when flag_for_pulse is True, the first time step of a voiced
- segment is always sin(np.pi) or cos(0)
- """
-
- def __init__(
- self,
- samp_rate,
- harmonic_num=0,
- sine_amp=0.1,
- noise_std=0.003,
- voiced_threshold=0,
- flag_for_pulse=False,
- ):
- super(SineGen, self).__init__()
- self.sine_amp = sine_amp
- self.noise_std = noise_std
- self.harmonic_num = harmonic_num
- self.dim = self.harmonic_num + 1
- self.sampling_rate = samp_rate
- self.voiced_threshold = voiced_threshold
-
- def _f02uv(self, f0):
- # generate uv signal
- uv = torch.ones_like(f0)
- uv = uv * (f0 > self.voiced_threshold)
- return uv
-
- def forward(self, f0, upp):
- """sine_tensor, uv = forward(f0)
- input F0: tensor(batchsize=1, length, dim=1)
- f0 for unvoiced steps should be 0
- output sine_tensor: tensor(batchsize=1, length, dim)
- output uv: tensor(batchsize=1, length, 1)
- """
- with torch.no_grad():
- f0 = f0[:, None].transpose(1, 2)
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
- # fundamental component
- f0_buf[:, :, 0] = f0[:, :, 0]
- for idx in np.arange(self.harmonic_num):
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
- idx + 2
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
- rand_ini = torch.rand(
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
- )
- rand_ini[:, 0] = 0
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
- tmp_over_one *= upp
- tmp_over_one = F.interpolate(
- tmp_over_one.transpose(2, 1),
- scale_factor=upp,
- mode="linear",
- align_corners=True,
- ).transpose(2, 1)
- rad_values = F.interpolate(
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(
- 2, 1
- ) #######
- tmp_over_one %= 1
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
- cumsum_shift = torch.zeros_like(rad_values)
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
- sine_waves = torch.sin(
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
- )
- sine_waves = sine_waves * self.sine_amp
- uv = self._f02uv(f0)
- uv = F.interpolate(
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(2, 1)
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
- noise = noise_amp * torch.randn_like(sine_waves)
- sine_waves = sine_waves * uv + noise
- return sine_waves, uv, noise
-
-
-class SourceModuleHnNSF(torch.nn.Module):
- """SourceModule for hn-nsf
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
- add_noise_std=0.003, voiced_threshod=0)
- sampling_rate: sampling_rate in Hz
- harmonic_num: number of harmonic above F0 (default: 0)
- sine_amp: amplitude of sine source signal (default: 0.1)
- add_noise_std: std of additive Gaussian noise (default: 0.003)
- note that amplitude of noise in unvoiced is decided
- by sine_amp
- voiced_threshold: threhold to set U/V given F0 (default: 0)
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
- F0_sampled (batchsize, length, 1)
- Sine_source (batchsize, length, 1)
- noise_source (batchsize, length 1)
- uv (batchsize, length, 1)
- """
-
- def __init__(
- self,
- sampling_rate,
- harmonic_num=0,
- sine_amp=0.1,
- add_noise_std=0.003,
- voiced_threshod=0,
- is_half=True,
- ):
- super(SourceModuleHnNSF, self).__init__()
-
- self.sine_amp = sine_amp
- self.noise_std = add_noise_std
- self.is_half = is_half
- # to produce sine waveforms
- self.l_sin_gen = SineGen(
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
- )
-
- # to merge source harmonics into a single excitation
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
- self.l_tanh = torch.nn.Tanh()
-
- def forward(self, x, upp=None):
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
- if self.is_half:
- sine_wavs = sine_wavs.half()
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
- return sine_merge, None, None # noise, uv
-
-
-class GeneratorNSF(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels,
- sr,
- is_half=False,
- ):
- super(GeneratorNSF, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
-
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
- self.m_source = SourceModuleHnNSF(
- sampling_rate=sr, harmonic_num=0, is_half=is_half
- )
- self.noise_convs = nn.ModuleList()
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- c_cur = upsample_initial_channel // (2 ** (i + 1))
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
- if i + 1 < len(upsample_rates):
- stride_f0 = np.prod(upsample_rates[i + 1 :])
- self.noise_convs.append(
- Conv1d(
- 1,
- c_cur,
- kernel_size=stride_f0 * 2,
- stride=stride_f0,
- padding=stride_f0 // 2,
- )
- )
- else:
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- self.upp = np.prod(upsample_rates)
-
- def forward(self, x, f0, g=None):
- har_source, noi_source, uv = self.m_source(f0, self.upp)
- har_source = har_source.transpose(1, 2)
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- x_source = self.noise_convs[i](har_source)
- x = x + x_source
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-sr2sr = {
- "32k": 32000,
- "40k": 40000,
- "48k": 48000,
-}
-
-
-class SynthesizerTrnMs256NSFsid(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr,
- **kwargs
- ):
- super().__init__()
- if type(sr) == type("strr"):
- sr = sr2sr[sr]
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder256(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- sr=sr,
- is_half=kwargs["is_half"],
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
- ): # 这里ds是id,[bs,1]
- # print(1,pitch.shape)#[bs,t]
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
- # print(-2,pitchf.shape,z_slice.shape)
- o = self.dec(z_slice, pitchf, g=g)
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
- return o, x_mask, (z, z_p, m_p, logs_p)
-
-
-class SynthesizerTrnMs768NSFsid(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr,
- **kwargs
- ):
- super().__init__()
- if type(sr) == type("strr"):
- sr = sr2sr[sr]
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder768(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- sr=sr,
- is_half=kwargs["is_half"],
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
- ): # 这里ds是id,[bs,1]
- # print(1,pitch.shape)#[bs,t]
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
- # print(-2,pitchf.shape,z_slice.shape)
- o = self.dec(z_slice, pitchf, g=g)
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
- return o, x_mask, (z, z_p, m_p, logs_p)
-
-
-class SynthesizerTrnMs256NSFsid_nono(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr=None,
- **kwargs
- ):
- super().__init__()
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder256(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=False,
- )
- self.dec = Generator(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- o = self.dec(z_slice, g=g)
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, phone, phone_lengths, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], g=g)
- return o, x_mask, (z, z_p, m_p, logs_p)
-
-
-class SynthesizerTrnMs768NSFsid_nono(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr=None,
- **kwargs
- ):
- super().__init__()
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder768(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=False,
- )
- self.dec = Generator(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- o = self.dec(z_slice, g=g)
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, phone, phone_lengths, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], g=g)
- return o, x_mask, (z, z_p, m_p, logs_p)
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2, 3, 5, 7, 11, 17]
- # periods = [3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class MultiPeriodDiscriminatorV2(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminatorV2, self).__init__()
- # periods = [2, 3, 5, 7, 11, 17]
- periods = [2, 3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ]
- )
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(
- Conv2d(
- 1,
- 32,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 32,
- 128,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 128,
- 512,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 512,
- 1024,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 1024,
- 1024,
- (kernel_size, 1),
- 1,
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- ]
- )
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
diff --git a/spaces/JeffJing/ZookChatBot/steamship/utils/metadata.py b/spaces/JeffJing/ZookChatBot/steamship/utils/metadata.py
deleted file mode 100644
index fc4f669233a4cc4e913b193af377625794e33bed..0000000000000000000000000000000000000000
--- a/spaces/JeffJing/ZookChatBot/steamship/utils/metadata.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import hashlib
-import json
-from typing import Dict, List, Optional, Union
-
-Metadata = Union[int, float, bool, str, List, Dict]
-
-
-def str_to_metadata(s: str) -> Optional[Metadata]:
- if s is None:
- return None
- return json.loads(s)
-
-
-def metadata_to_str(m: Metadata) -> Optional[str]:
- if m is None:
- return None
- return json.dumps(m)
-
-
-def hash_dict(d: Dict) -> str:
- """Returns the MD5 hash of a dictionary."""
- dhash = hashlib.md5() # noqa: S303
- # Sort arguments so so that the string representation is always the same.
- encoded = json.dumps(d, sort_keys=True).encode()
- dhash.update(encoded)
- return dhash.hexdigest()
diff --git a/spaces/JeffJing/ZookChatBot/tls_client/__version__.py b/spaces/JeffJing/ZookChatBot/tls_client/__version__.py
deleted file mode 100644
index ad8733c7eacb2f117a53318bfbdcccc736b20042..0000000000000000000000000000000000000000
--- a/spaces/JeffJing/ZookChatBot/tls_client/__version__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# _____ __ __ ___ _ _ _
-# /__ \/ / / _\ / __\ (_) ___ _ __ | |_
-# / /\/ / \ \ _____ / / | | |/ _ \ '_ \| __|
-# / / / /____\ \_____/ /___| | | __/ | | | |_
-# \/ \____/\__/ \____/|_|_|\___|_| |_|\__|
-
-__title__ = "tls_client"
-__description__ = "Advanced Python HTTP Client."
-__version__ = "0.1.8"
-__author__ = "Florian Zager"
-__license__ = "MIT"
\ No newline at end of file
diff --git a/spaces/Kaludi/ChatGPT-BingChat-GPT3-Prompt-Generator_App/app.py b/spaces/Kaludi/ChatGPT-BingChat-GPT3-Prompt-Generator_App/app.py
deleted file mode 100644
index c68cb07bfd156c09e0a92afcbe9769100c0596c9..0000000000000000000000000000000000000000
--- a/spaces/Kaludi/ChatGPT-BingChat-GPT3-Prompt-Generator_App/app.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import streamlit as st
-import random
-from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
-
-tokenizer = AutoTokenizer.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum")
-model = AutoModelForSeq2SeqLM.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True)
-
-def generate(prompt, max_new_tokens):
- batch = tokenizer(prompt, return_tensors="pt")
- generated_ids = model.generate(batch["input_ids"], max_new_tokens=max_new_tokens)
- output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
- return output[0]
-
-st.title("ChatGPT-BingChat Prompt Generator")
-st.write("This app generates ChatGPT/BingChat & GPT-3 prompts using [this](https://huggingface.co/Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum) model trained by [Kaludi](https://huggingface.co/Kaludi/). Enter a role and a prompt will be generated based on it.")
-prompt = st.text_input("Enter a Role, Example: Virtual Assistant", placeholder="Text here", value="")
-max_new_tokens = st.slider("Select Max Tokens in Response", min_value=100, max_value=500, value=150, step=10)
-if st.button("Generate"):
- output = generate(prompt, max_new_tokens)
- st.write("Generated Prompt:", box=True)
- st.write("{}
".format(output), unsafe_allow_html=True)
-st.write("")
-st.write("Examples:
",unsafe_allow_html=True, box=True)
-st.write("", unsafe_allow_html=True)
-with open("examples.txt", "r") as f:
- examples = f.readlines()
- random_examples = random.sample(examples, 5)
- for example in random_examples:
- example = example.strip()
- st.write("• {}
".format(example), unsafe_allow_html=True)
diff --git a/spaces/Kangarroar/ApplioRVC-Inference/diffq/utils.py b/spaces/Kangarroar/ApplioRVC-Inference/diffq/utils.py
deleted file mode 100644
index be6ab5253c38564140bc202077292bb99f9f397b..0000000000000000000000000000000000000000
--- a/spaces/Kangarroar/ApplioRVC-Inference/diffq/utils.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import inspect
-from typing import Optional, List
-
-
-def simple_repr(obj, attrs: Optional[List[str]] = None, overrides={}):
- """
- Return a simple representation string for `obj`.
- If `attrs` is not None, it should be a list of attributes to include.
- """
- params = inspect.signature(obj.__class__).parameters
- attrs_repr = []
- if attrs is None:
- attrs = params.keys()
- for attr in attrs:
- display = False
- if attr in overrides:
- value = overrides[attr]
- elif hasattr(obj, attr):
- value = getattr(obj, attr)
- else:
- continue
- if attr in params:
- param = params[attr]
- if param.default is inspect._empty or value != param.default:
- display = True
- else:
- display = True
-
- if display:
- attrs_repr.append(f"{attr}={value}")
- return f"{obj.__class__.__name__}({','.join(attrs_repr)})"
diff --git a/spaces/KenjieDec/RemBG/rembg/commands/__init__.py b/spaces/KenjieDec/RemBG/rembg/commands/__init__.py
deleted file mode 100644
index 64f8993e9b710c7150d16ee4361fc0d406d72f55..0000000000000000000000000000000000000000
--- a/spaces/KenjieDec/RemBG/rembg/commands/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from importlib import import_module
-from pathlib import Path
-from pkgutil import iter_modules
-
-command_functions = []
-
-package_dir = Path(__file__).resolve().parent
-for _b, module_name, _p in iter_modules([str(package_dir)]):
- module = import_module(f"{__name__}.{module_name}")
- for attribute_name in dir(module):
- attribute = getattr(module, attribute_name)
- if attribute_name.endswith("_command"):
- command_functions.append(attribute)
diff --git a/spaces/Kevin676/AutoGPT/autogpt/speech/base.py b/spaces/Kevin676/AutoGPT/autogpt/speech/base.py
deleted file mode 100644
index d74fa51be75b5078134c510b393a06deb0267b2a..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/AutoGPT/autogpt/speech/base.py
+++ /dev/null
@@ -1,50 +0,0 @@
-"""Base class for all voice classes."""
-import abc
-from threading import Lock
-
-from autogpt.config import AbstractSingleton
-
-
-class VoiceBase(AbstractSingleton):
- """
- Base class for all voice classes.
- """
-
- def __init__(self):
- """
- Initialize the voice class.
- """
- self._url = None
- self._headers = None
- self._api_key = None
- self._voices = []
- self._mutex = Lock()
- self._setup()
-
- def say(self, text: str, voice_index: int = 0) -> bool:
- """
- Say the given text.
-
- Args:
- text (str): The text to say.
- voice_index (int): The index of the voice to use.
- """
- with self._mutex:
- return self._speech(text, voice_index)
-
- @abc.abstractmethod
- def _setup(self) -> None:
- """
- Setup the voices, API key, etc.
- """
- pass
-
- @abc.abstractmethod
- def _speech(self, text: str, voice_index: int = 0) -> bool:
- """
- Play the given text.
-
- Args:
- text (str): The text to play.
- """
- pass
diff --git a/spaces/Kevin676/Real-Time-Voice-Cloning/encoder/data_objects/utterance.py b/spaces/Kevin676/Real-Time-Voice-Cloning/encoder/data_objects/utterance.py
deleted file mode 100644
index 0768c3420f422a7464f305b4c1fb6752c57ceda7..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/Real-Time-Voice-Cloning/encoder/data_objects/utterance.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import numpy as np
-
-
-class Utterance:
- def __init__(self, frames_fpath, wave_fpath):
- self.frames_fpath = frames_fpath
- self.wave_fpath = wave_fpath
-
- def get_frames(self):
- return np.load(self.frames_fpath)
-
- def random_partial(self, n_frames):
- """
- Crops the frames into a partial utterance of n_frames
-
- :param n_frames: The number of frames of the partial utterance
- :return: the partial utterance frames and a tuple indicating the start and end of the
- partial utterance in the complete utterance.
- """
- frames = self.get_frames()
- if frames.shape[0] == n_frames:
- start = 0
- else:
- start = np.random.randint(0, frames.shape[0] - n_frames)
- end = start + n_frames
- return frames[start:end], (start, end)
\ No newline at end of file
diff --git a/spaces/Kirihasan/rvc-jjjo/infer_pack/modules.py b/spaces/Kirihasan/rvc-jjjo/infer_pack/modules.py
deleted file mode 100644
index 960481cedad9a6106f2bf0b9e86e82b120f7b33f..0000000000000000000000000000000000000000
--- a/spaces/Kirihasan/rvc-jjjo/infer_pack/modules.py
+++ /dev/null
@@ -1,522 +0,0 @@
-import copy
-import math
-import numpy as np
-import scipy
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm
-
-from infer_pack import commons
-from infer_pack.commons import init_weights, get_padding
-from infer_pack.transforms import piecewise_rational_quadratic_transform
-
-
-LRELU_SLOPE = 0.1
-
-
-class LayerNorm(nn.Module):
- def __init__(self, channels, eps=1e-5):
- super().__init__()
- self.channels = channels
- self.eps = eps
-
- self.gamma = nn.Parameter(torch.ones(channels))
- self.beta = nn.Parameter(torch.zeros(channels))
-
- def forward(self, x):
- x = x.transpose(1, -1)
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
- return x.transpose(1, -1)
-
-
-class ConvReluNorm(nn.Module):
- def __init__(
- self,
- in_channels,
- hidden_channels,
- out_channels,
- kernel_size,
- n_layers,
- p_dropout,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.hidden_channels = hidden_channels
- self.out_channels = out_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
- assert n_layers > 1, "Number of layers should be larger than 0."
-
- self.conv_layers = nn.ModuleList()
- self.norm_layers = nn.ModuleList()
- self.conv_layers.append(
- nn.Conv1d(
- in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
- )
- )
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
- for _ in range(n_layers - 1):
- self.conv_layers.append(
- nn.Conv1d(
- hidden_channels,
- hidden_channels,
- kernel_size,
- padding=kernel_size // 2,
- )
- )
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask):
- x_org = x
- for i in range(self.n_layers):
- x = self.conv_layers[i](x * x_mask)
- x = self.norm_layers[i](x)
- x = self.relu_drop(x)
- x = x_org + self.proj(x)
- return x * x_mask
-
-
-class DDSConv(nn.Module):
- """
- Dialted and Depth-Separable Convolution
- """
-
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
- super().__init__()
- self.channels = channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
-
- self.drop = nn.Dropout(p_dropout)
- self.convs_sep = nn.ModuleList()
- self.convs_1x1 = nn.ModuleList()
- self.norms_1 = nn.ModuleList()
- self.norms_2 = nn.ModuleList()
- for i in range(n_layers):
- dilation = kernel_size**i
- padding = (kernel_size * dilation - dilation) // 2
- self.convs_sep.append(
- nn.Conv1d(
- channels,
- channels,
- kernel_size,
- groups=channels,
- dilation=dilation,
- padding=padding,
- )
- )
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
- self.norms_1.append(LayerNorm(channels))
- self.norms_2.append(LayerNorm(channels))
-
- def forward(self, x, x_mask, g=None):
- if g is not None:
- x = x + g
- for i in range(self.n_layers):
- y = self.convs_sep[i](x * x_mask)
- y = self.norms_1[i](y)
- y = F.gelu(y)
- y = self.convs_1x1[i](y)
- y = self.norms_2[i](y)
- y = F.gelu(y)
- y = self.drop(y)
- x = x + y
- return x * x_mask
-
-
-class WN(torch.nn.Module):
- def __init__(
- self,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- p_dropout=0,
- ):
- super(WN, self).__init__()
- assert kernel_size % 2 == 1
- self.hidden_channels = hidden_channels
- self.kernel_size = (kernel_size,)
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
- self.p_dropout = p_dropout
-
- self.in_layers = torch.nn.ModuleList()
- self.res_skip_layers = torch.nn.ModuleList()
- self.drop = nn.Dropout(p_dropout)
-
- if gin_channels != 0:
- cond_layer = torch.nn.Conv1d(
- gin_channels, 2 * hidden_channels * n_layers, 1
- )
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
-
- for i in range(n_layers):
- dilation = dilation_rate**i
- padding = int((kernel_size * dilation - dilation) / 2)
- in_layer = torch.nn.Conv1d(
- hidden_channels,
- 2 * hidden_channels,
- kernel_size,
- dilation=dilation,
- padding=padding,
- )
- in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
- self.in_layers.append(in_layer)
-
- # last one is not necessary
- if i < n_layers - 1:
- res_skip_channels = 2 * hidden_channels
- else:
- res_skip_channels = hidden_channels
-
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
- self.res_skip_layers.append(res_skip_layer)
-
- def forward(self, x, x_mask, g=None, **kwargs):
- output = torch.zeros_like(x)
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
-
- if g is not None:
- g = self.cond_layer(g)
-
- for i in range(self.n_layers):
- x_in = self.in_layers[i](x)
- if g is not None:
- cond_offset = i * 2 * self.hidden_channels
- g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
- else:
- g_l = torch.zeros_like(x_in)
-
- acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
- acts = self.drop(acts)
-
- res_skip_acts = self.res_skip_layers[i](acts)
- if i < self.n_layers - 1:
- res_acts = res_skip_acts[:, : self.hidden_channels, :]
- x = (x + res_acts) * x_mask
- output = output + res_skip_acts[:, self.hidden_channels :, :]
- else:
- output = output + res_skip_acts
- return output * x_mask
-
- def remove_weight_norm(self):
- if self.gin_channels != 0:
- torch.nn.utils.remove_weight_norm(self.cond_layer)
- for l in self.in_layers:
- torch.nn.utils.remove_weight_norm(l)
- for l in self.res_skip_layers:
- torch.nn.utils.remove_weight_norm(l)
-
-
-class ResBlock1(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
- super(ResBlock1, self).__init__()
- self.convs1 = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[2],
- padding=get_padding(kernel_size, dilation[2]),
- )
- ),
- ]
- )
- self.convs1.apply(init_weights)
-
- self.convs2 = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- ]
- )
- self.convs2.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c1, c2 in zip(self.convs1, self.convs2):
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c1(xt)
- xt = F.leaky_relu(xt, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c2(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs1:
- remove_weight_norm(l)
- for l in self.convs2:
- remove_weight_norm(l)
-
-
-class ResBlock2(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
- super(ResBlock2, self).__init__()
- self.convs = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]),
- )
- ),
- ]
- )
- self.convs.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c in self.convs:
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs:
- remove_weight_norm(l)
-
-
-class Log(nn.Module):
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
- logdet = torch.sum(-y, [1, 2])
- return y, logdet
- else:
- x = torch.exp(x) * x_mask
- return x
-
-
-class Flip(nn.Module):
- def forward(self, x, *args, reverse=False, **kwargs):
- x = torch.flip(x, [1])
- if not reverse:
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
- return x, logdet
- else:
- return x
-
-
-class ElementwiseAffine(nn.Module):
- def __init__(self, channels):
- super().__init__()
- self.channels = channels
- self.m = nn.Parameter(torch.zeros(channels, 1))
- self.logs = nn.Parameter(torch.zeros(channels, 1))
-
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = self.m + torch.exp(self.logs) * x
- y = y * x_mask
- logdet = torch.sum(self.logs * x_mask, [1, 2])
- return y, logdet
- else:
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
- return x
-
-
-class ResidualCouplingLayer(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=0,
- gin_channels=0,
- mean_only=False,
- ):
- assert channels % 2 == 0, "channels should be divisible by 2"
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.half_channels = channels // 2
- self.mean_only = mean_only
-
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
- self.enc = WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=p_dropout,
- gin_channels=gin_channels,
- )
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
- self.post.weight.data.zero_()
- self.post.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
- h = self.pre(x0) * x_mask
- h = self.enc(h, x_mask, g=g)
- stats = self.post(h) * x_mask
- if not self.mean_only:
- m, logs = torch.split(stats, [self.half_channels] * 2, 1)
- else:
- m = stats
- logs = torch.zeros_like(m)
-
- if not reverse:
- x1 = m + x1 * torch.exp(logs) * x_mask
- x = torch.cat([x0, x1], 1)
- logdet = torch.sum(logs, [1, 2])
- return x, logdet
- else:
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
- x = torch.cat([x0, x1], 1)
- return x
-
- def remove_weight_norm(self):
- self.enc.remove_weight_norm()
-
-
-class ConvFlow(nn.Module):
- def __init__(
- self,
- in_channels,
- filter_channels,
- kernel_size,
- n_layers,
- num_bins=10,
- tail_bound=5.0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.num_bins = num_bins
- self.tail_bound = tail_bound
- self.half_channels = in_channels // 2
-
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
- self.proj = nn.Conv1d(
- filter_channels, self.half_channels * (num_bins * 3 - 1), 1
- )
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
- h = self.pre(x0)
- h = self.convs(h, x_mask, g=g)
- h = self.proj(h) * x_mask
-
- b, c, t = x0.shape
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
-
- unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
- self.filter_channels
- )
- unnormalized_derivatives = h[..., 2 * self.num_bins :]
-
- x1, logabsdet = piecewise_rational_quadratic_transform(
- x1,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=reverse,
- tails="linear",
- tail_bound=self.tail_bound,
- )
-
- x = torch.cat([x0, x1], 1) * x_mask
- logdet = torch.sum(logabsdet * x_mask, [1, 2])
- if not reverse:
- return x, logdet
- else:
- return x
diff --git a/spaces/LAYEK-143/TEXT-TO-IMAGE-AI/app.py b/spaces/LAYEK-143/TEXT-TO-IMAGE-AI/app.py
deleted file mode 100644
index a9c4b5863de810a28a6daff5f4d6aae94555136a..0000000000000000000000000000000000000000
--- a/spaces/LAYEK-143/TEXT-TO-IMAGE-AI/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/jbilcke-hf/sdxl-cinematic-2").launch()
\ No newline at end of file
diff --git a/spaces/Lbin123/Lbingo/src/lib/bots/bing/index.ts b/spaces/Lbin123/Lbingo/src/lib/bots/bing/index.ts
deleted file mode 100644
index 2c4afae01a345b8415935228566cb30d695e768d..0000000000000000000000000000000000000000
--- a/spaces/Lbin123/Lbingo/src/lib/bots/bing/index.ts
+++ /dev/null
@@ -1,421 +0,0 @@
-import { fetch, WebSocket, debug } from '@/lib/isomorphic'
-import WebSocketAsPromised from 'websocket-as-promised'
-import {
- SendMessageParams,
- BingConversationStyle,
- ConversationResponse,
- ChatResponseMessage,
- ConversationInfo,
- InvocationEventType,
- ChatError,
- ErrorCode,
- ChatUpdateCompleteResponse,
- ImageInfo,
- KBlobResponse
-} from './types'
-
-import { convertMessageToMarkdown, websocketUtils, streamAsyncIterable } from './utils'
-import { WatchDog, createChunkDecoder } from '@/lib/utils'
-
-type Params = SendMessageParams<{ bingConversationStyle: BingConversationStyle }>
-
-const OPTIONS_SETS = [
- 'nlu_direct_response_filter',
- 'deepleo',
- 'disable_emoji_spoken_text',
- 'responsible_ai_policy_235',
- 'enablemm',
- 'iycapbing',
- 'iyxapbing',
- 'objopinion',
- 'rweasgv2',
- 'dagslnv1',
- 'dv3sugg',
- 'autosave',
- 'iyoloxap',
- 'iyoloneutral',
- 'clgalileo',
- 'gencontentv3',
-]
-
-export class BingWebBot {
- protected conversationContext?: ConversationInfo
- protected cookie: string
- protected ua: string
- protected endpoint = ''
- private lastText = ''
- private asyncTasks: Array> = []
-
- constructor(opts: {
- cookie: string
- ua: string
- bingConversationStyle?: BingConversationStyle
- conversationContext?: ConversationInfo
- }) {
- const { cookie, ua, conversationContext } = opts
- this.cookie = cookie?.includes(';') ? cookie : `_EDGE_V=1; _U=${cookie}`
- this.ua = ua
- this.conversationContext = conversationContext
- }
-
- static buildChatRequest(conversation: ConversationInfo) {
- const optionsSets = OPTIONS_SETS
- if (conversation.conversationStyle === BingConversationStyle.Precise) {
- optionsSets.push('h3precise')
- } else if (conversation.conversationStyle === BingConversationStyle.Creative) {
- optionsSets.push('h3imaginative')
- }
- return {
- arguments: [
- {
- source: 'cib',
- optionsSets,
- allowedMessageTypes: [
- 'Chat',
- 'InternalSearchQuery',
- 'Disengaged',
- 'InternalLoaderMessage',
- 'SemanticSerp',
- 'GenerateContentQuery',
- 'SearchQuery',
- ],
- sliceIds: [
- 'winmuid1tf',
- 'anssupfor_c',
- 'imgchatgptv2',
- 'tts2cf',
- 'contansperf',
- 'mlchatpc8500w',
- 'mlchatpc2',
- 'ctrlworkpay',
- 'winshortmsgtf',
- 'cibctrl',
- 'sydtransctrl',
- 'sydconfigoptc',
- '0705trt4',
- '517opinion',
- '628ajcopus0',
- '330uaugs0',
- '529rwea',
- '0626snptrcs0',
- '424dagslnv1',
- ],
- isStartOfSession: conversation.invocationId === 0,
- message: {
- author: 'user',
- inputMethod: 'Keyboard',
- text: conversation.prompt,
- imageUrl: conversation.imageUrl,
- messageType: 'Chat',
- },
- conversationId: conversation.conversationId,
- conversationSignature: conversation.conversationSignature,
- participant: { id: conversation.clientId },
- },
- ],
- invocationId: conversation.invocationId.toString(),
- target: 'chat',
- type: InvocationEventType.StreamInvocation,
- }
- }
-
- async createConversation(): Promise {
- const headers = {
- 'Accept-Encoding': 'gzip, deflate, br, zsdch',
- 'User-Agent': this.ua,
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
- cookie: this.cookie,
- }
-
- let resp: ConversationResponse | undefined
- try {
- const response = await fetch(this.endpoint + '/api/create', { method: 'POST', headers, redirect: 'error', mode: 'cors', credentials: 'include' })
- if (response.status === 404) {
- throw new ChatError('Not Found', ErrorCode.NOTFOUND_ERROR)
- }
- resp = await response.json() as ConversationResponse
- } catch (err) {
- console.error('create conversation error', err)
- }
-
- if (!resp?.result) {
- throw new ChatError('Invalid response', ErrorCode.UNKOWN_ERROR)
- }
-
- const { value, message } = resp.result || {}
- if (value !== 'Success') {
- const errorMsg = `${value}: ${message}`
- if (value === 'UnauthorizedRequest') {
- throw new ChatError(errorMsg, ErrorCode.BING_UNAUTHORIZED)
- }
- if (value === 'Forbidden') {
- throw new ChatError(errorMsg, ErrorCode.BING_FORBIDDEN)
- }
- throw new ChatError(errorMsg, ErrorCode.UNKOWN_ERROR)
- }
- return resp
- }
-
- private async createContext(conversationStyle: BingConversationStyle) {
- if (!this.conversationContext) {
- const conversation = await this.createConversation()
- this.conversationContext = {
- conversationId: conversation.conversationId,
- conversationSignature: conversation.conversationSignature,
- clientId: conversation.clientId,
- invocationId: 0,
- conversationStyle,
- prompt: '',
- }
- }
- return this.conversationContext
- }
-
- async sendMessage(params: Params) {
- try {
- await this.createContext(params.options.bingConversationStyle)
- Object.assign(this.conversationContext!, { prompt: params.prompt, imageUrl: params.imageUrl })
- return this.sydneyProxy(params)
- } catch (error) {
- params.onEvent({
- type: 'ERROR',
- error: error instanceof ChatError ? error : new ChatError('Catch Error', ErrorCode.UNKOWN_ERROR),
- })
- }
- }
-
- private async sydneyProxy(params: Params) {
- const abortController = new AbortController()
- const response = await fetch(this.endpoint + '/api/sydney', {
- method: 'POST',
- headers: {
- 'Content-Type': 'application/json',
- },
- signal: abortController.signal,
- body: JSON.stringify(this.conversationContext!)
- })
- if (response.status !== 200) {
- params.onEvent({
- type: 'ERROR',
- error: new ChatError(
- 'Unknown error',
- ErrorCode.UNKOWN_ERROR,
- ),
- })
- }
- params.signal?.addEventListener('abort', () => {
- abortController.abort()
- })
-
- const textDecoder = createChunkDecoder()
- for await (const chunk of streamAsyncIterable(response.body!)) {
- this.parseEvents(params, websocketUtils.unpackMessage(textDecoder(chunk)))
- }
- }
-
- async sendWs() {
- const wsConfig: ConstructorParameters[1] = {
- packMessage: websocketUtils.packMessage,
- unpackMessage: websocketUtils.unpackMessage,
- createWebSocket: (url) => new WebSocket(url, {
- headers: {
- 'accept-language': 'zh-CN,zh;q=0.9',
- 'cache-control': 'no-cache',
- 'User-Agent': this.ua,
- pragma: 'no-cache',
- cookie: this.cookie,
- }
- })
- }
- const wsp = new WebSocketAsPromised('wss://sydney.bing.com/sydney/ChatHub', wsConfig)
-
- wsp.open().then(() => {
- wsp.sendPacked({ protocol: 'json', version: 1 })
- wsp.sendPacked({ type: 6 })
- wsp.sendPacked(BingWebBot.buildChatRequest(this.conversationContext!))
- })
-
- return wsp
- }
-
- private async useWs(params: Params) {
- const wsp = await this.sendWs()
- const watchDog = new WatchDog()
- wsp.onUnpackedMessage.addListener((events) => {
- watchDog.watch(() => {
- wsp.sendPacked({ type: 6 })
- })
- this.parseEvents(params, events)
- })
-
- wsp.onClose.addListener(() => {
- watchDog.reset()
- params.onEvent({ type: 'DONE' })
- wsp.removeAllListeners()
- })
-
- params.signal?.addEventListener('abort', () => {
- wsp.removeAllListeners()
- wsp.close()
- })
- }
-
- private async createImage(prompt: string, id: string) {
- try {
- const headers = {
- 'Accept-Encoding': 'gzip, deflate, br, zsdch',
- 'User-Agent': this.ua,
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
- cookie: this.cookie,
- }
- const query = new URLSearchParams({
- prompt,
- id
- })
- const response = await fetch(this.endpoint + '/api/image?' + query.toString(),
- {
- method: 'POST',
- headers,
- mode: 'cors',
- credentials: 'include'
- })
- .then(res => res.text())
- if (response) {
- this.lastText += '\n' + response
- }
- } catch (err) {
- console.error('Create Image Error', err)
- }
- }
-
- private buildKnowledgeApiPayload(imageUrl: string, conversationStyle: BingConversationStyle) {
- const imageInfo: ImageInfo = {}
- let imageBase64: string | undefined = undefined
- const knowledgeRequest = {
- imageInfo,
- knowledgeRequest: {
- invokedSkills: [
- 'ImageById'
- ],
- subscriptionId: 'Bing.Chat.Multimodal',
- invokedSkillsRequestData: {
- enableFaceBlur: true
- },
- convoData: {
- convoid: this.conversationContext?.conversationId,
- convotone: conversationStyle,
- }
- },
- }
-
- if (imageUrl.startsWith('data:image/')) {
- imageBase64 = imageUrl.replace('data:image/', '');
- const partIndex = imageBase64.indexOf(',')
- if (partIndex) {
- imageBase64 = imageBase64.substring(partIndex + 1)
- }
- } else {
- imageInfo.url = imageUrl
- }
- return { knowledgeRequest, imageBase64 }
- }
-
- async uploadImage(imageUrl: string, conversationStyle: BingConversationStyle = BingConversationStyle.Creative): Promise {
- if (!imageUrl) {
- return
- }
- await this.createContext(conversationStyle)
- const payload = this.buildKnowledgeApiPayload(imageUrl, conversationStyle)
-
- const response = await fetch(this.endpoint + '/api/kblob',
- {
- headers: {
- 'Content-Type': 'application/json',
- },
- method: 'POST',
- mode: 'cors',
- credentials: 'include',
- body: JSON.stringify(payload),
- })
- .then(res => res.json())
- .catch(e => {
- console.log('Error', e)
- })
- return response
- }
-
- private async generateContent(message: ChatResponseMessage) {
- if (message.contentType === 'IMAGE') {
- this.asyncTasks.push(this.createImage(message.text, message.messageId))
- }
- }
-
- private async parseEvents(params: Params, events: any) {
- const conversation = this.conversationContext!
-
- events?.forEach(async (event: ChatUpdateCompleteResponse) => {
- debug('bing event', event)
- if (event.type === 3) {
- await Promise.all(this.asyncTasks)
- this.asyncTasks = []
- params.onEvent({ type: 'UPDATE_ANSWER', data: { text: this.lastText } })
- params.onEvent({ type: 'DONE' })
- conversation.invocationId = parseInt(event.invocationId, 10) + 1
- } else if (event.type === 1) {
- const messages = event.arguments[0].messages
- if (messages) {
- const text = convertMessageToMarkdown(messages[0])
- this.lastText = text
- params.onEvent({ type: 'UPDATE_ANSWER', data: { text, spokenText: messages[0].text, throttling: event.arguments[0].throttling } })
- }
- } else if (event.type === 2) {
- const messages = event.item.messages as ChatResponseMessage[] | undefined
- if (!messages) {
- params.onEvent({
- type: 'ERROR',
- error: new ChatError(
- event.item.result.error || 'Unknown error',
- event.item.result.value === 'Throttled' ? ErrorCode.THROTTLE_LIMIT
- : event.item.result.value === 'CaptchaChallenge' ? (this.conversationContext?.conversationId?.includes('BingProdUnAuthenticatedUsers') ? ErrorCode.BING_UNAUTHORIZED : ErrorCode.BING_CAPTCHA)
- : ErrorCode.UNKOWN_ERROR
- ),
- })
- return
- }
- const limited = messages.some((message) =>
- message.contentOrigin === 'TurnLimiter'
- || message.messageType === 'Disengaged'
- )
- if (limited) {
- params.onEvent({
- type: 'ERROR',
- error: new ChatError(
- 'Sorry, you have reached chat limit in this conversation.',
- ErrorCode.CONVERSATION_LIMIT,
- ),
- })
- return
- }
-
- const lastMessage = event.item.messages.at(-1) as ChatResponseMessage
- const specialMessage = event.item.messages.find(message => message.author === 'bot' && message.contentType === 'IMAGE')
- if (specialMessage) {
- this.generateContent(specialMessage)
- }
-
- if (lastMessage) {
- const text = convertMessageToMarkdown(lastMessage)
- this.lastText = text
- params.onEvent({
- type: 'UPDATE_ANSWER',
- data: { text, throttling: event.item.throttling, suggestedResponses: lastMessage.suggestedResponses, sourceAttributions: lastMessage.sourceAttributions },
- })
- }
- }
- })
- }
-
- resetConversation() {
- this.conversationContext = undefined
- }
-}
diff --git a/spaces/LeoLeoLeo1/ChuanhuChatGPT/chatgpt - windows.bat b/spaces/LeoLeoLeo1/ChuanhuChatGPT/chatgpt - windows.bat
deleted file mode 100644
index 0b78fdc3a559abd692e3a9e9af5e482124d13a99..0000000000000000000000000000000000000000
--- a/spaces/LeoLeoLeo1/ChuanhuChatGPT/chatgpt - windows.bat
+++ /dev/null
@@ -1,14 +0,0 @@
-@echo off
-echo Opening ChuanhuChatGPT...
-
-REM Open powershell via bat
-start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py"
-
-REM The web page can be accessed with delayed start http://127.0.0.1:7860/
-ping -n 5 127.0.0.1>nul
-
-REM access chargpt via your default browser
-start "" "http://127.0.0.1:7860/"
-
-
-echo Finished opening ChuanhuChatGPT (http://127.0.0.1:7860/).
\ No newline at end of file
diff --git a/spaces/Luelll/ChuanhuChatGPT/assets/custom.css b/spaces/Luelll/ChuanhuChatGPT/assets/custom.css
deleted file mode 100644
index c094258d4a9e61a01ec3f58a2549315d2614c709..0000000000000000000000000000000000000000
--- a/spaces/Luelll/ChuanhuChatGPT/assets/custom.css
+++ /dev/null
@@ -1,500 +0,0 @@
-:root {
- --chatbot-color-light: #000000;
- --chatbot-color-dark: #FFFFFF;
- --chatbot-background-color-light: #F3F3F3;
- --chatbot-background-color-dark: #121111;
- --message-user-background-color-light: #95EC69;
- --message-user-background-color-dark: #26B561;
- --message-bot-background-color-light: #FFFFFF;
- --message-bot-background-color-dark: #2C2C2C;
-}
-
-#app_title {
- font-weight: var(--prose-header-text-weight);
- font-size: var(--text-xxl);
- line-height: 1.3;
- text-align: left;
- margin-top: 6px;
- white-space: nowrap;
-}
-#description {
- text-align: center;
- margin: 32px 0 4px 0;
-}
-
-/* gradio的页脚信息 */
-footer {
- /* display: none !important; */
- margin-top: .2em !important;
- font-size: 85%;
-}
-#footer {
- text-align: center;
-}
-#footer div {
- display: inline-block;
-}
-#footer .versions{
- font-size: 85%;
- opacity: 0.60;
-}
-
-#float_display {
- position: absolute;
- max-height: 30px;
-}
-/* user_info */
-#user_info {
- white-space: nowrap;
- position: absolute; left: 8em; top: .2em;
- z-index: var(--layer-2);
- box-shadow: var(--block-shadow);
- border: none; border-radius: var(--block-label-radius);
- background: var(--color-accent);
- padding: var(--block-label-padding);
- font-size: var(--block-label-text-size); line-height: var(--line-sm);
- width: auto; min-height: 30px!important;
- opacity: 1;
- transition: opacity 0.3s ease-in-out;
-}
-#user_info .wrap {
- opacity: 0;
-}
-#user_info p {
- color: white;
- font-weight: var(--block-label-text-weight);
-}
-#user_info.hideK {
- opacity: 0;
- transition: opacity 1s ease-in-out;
-}
-
-/* status_display */
-#status_display {
- display: flex;
- min-height: 2em;
- align-items: flex-end;
- justify-content: flex-end;
-}
-#status_display p {
- font-size: .85em;
- font-family: ui-monospace, "SF Mono", "SFMono-Regular", "Menlo", "Consolas", "Liberation Mono", "Microsoft Yahei UI", "Microsoft Yahei", monospace;
- /* Windows下中文的monospace会fallback为新宋体,实在太丑,这里折中使用微软雅黑 */
- color: var(--body-text-color-subdued);
-}
-
-#status_display {
- transition: all 0.6s;
-}
-#chuanhu_chatbot {
- transition: height 0.3s ease;
-}
-
-/* usage_display */
-.insert_block {
- position: relative;
- margin: 0;
- padding: .5em 1em;
- box-shadow: var(--block-shadow);
- border-width: var(--block-border-width);
- border-color: var(--block-border-color);
- border-radius: var(--block-radius);
- background: var(--block-background-fill);
- width: 100%;
- line-height: var(--line-sm);
- min-height: 2em;
-}
-#usage_display p, #usage_display span {
- margin: 0;
- font-size: .85em;
- color: var(--body-text-color-subdued);
-}
-.progress-bar {
- background-color: var(--input-background-fill);;
- margin: .5em 0 !important;
- height: 20px;
- border-radius: 10px;
- overflow: hidden;
-}
-.progress {
- background-color: var(--block-title-background-fill);
- height: 100%;
- border-radius: 10px;
- text-align: right;
- transition: width 0.5s ease-in-out;
-}
-.progress-text {
- /* color: white; */
- color: var(--color-accent) !important;
- font-size: 1em !important;
- font-weight: bold;
- padding-right: 10px;
- line-height: 20px;
-}
-
-.apSwitch {
- top: 2px;
- display: inline-block;
- height: 24px;
- position: relative;
- width: 48px;
- border-radius: 12px;
-}
-.apSwitch input {
- display: none !important;
-}
-.apSlider {
- background-color: var(--neutral-200);
- bottom: 0;
- cursor: pointer;
- left: 0;
- position: absolute;
- right: 0;
- top: 0;
- transition: .4s;
- font-size: 18px;
- border-radius: 12px;
-}
-.apSlider::before {
- bottom: -1.5px;
- left: 1px;
- position: absolute;
- transition: .4s;
- content: "🌞";
-}
-input:checked + .apSlider {
- background-color: var(--primary-600);
-}
-input:checked + .apSlider::before {
- transform: translateX(23px);
- content:"🌚";
-}
-
-/* Override Slider Styles (for webkit browsers like Safari and Chrome)
- * 好希望这份提案能早日实现 https://github.com/w3c/csswg-drafts/issues/4410
- * 进度滑块在各个平台还是太不统一了
- */
-input[type="range"] {
- -webkit-appearance: none;
- height: 4px;
- background: var(--input-background-fill);
- border-radius: 5px;
- background-image: linear-gradient(var(--primary-500),var(--primary-500));
- background-size: 0% 100%;
- background-repeat: no-repeat;
-}
-input[type="range"]::-webkit-slider-thumb {
- -webkit-appearance: none;
- height: 20px;
- width: 20px;
- border-radius: 50%;
- border: solid 0.5px #ddd;
- background-color: white;
- cursor: ew-resize;
- box-shadow: var(--input-shadow);
- transition: background-color .1s ease;
-}
-input[type="range"]::-webkit-slider-thumb:hover {
- background: var(--neutral-50);
-}
-input[type=range]::-webkit-slider-runnable-track {
- -webkit-appearance: none;
- box-shadow: none;
- border: none;
- background: transparent;
-}
-
-#submit_btn, #cancel_btn {
- height: 42px !important;
-}
-#submit_btn::before {
- content: url("data:image/svg+xml, %3Csvg width='21px' height='20px' viewBox='0 0 21 20' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='page' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cg id='send' transform='translate(0.435849, 0.088463)' fill='%23FFFFFF' fill-rule='nonzero'%3E %3Cpath d='M0.579148261,0.0428666046 C0.301105539,-0.0961547561 -0.036517765,0.122307382 0.0032026237,0.420210298 L1.4927172,18.1553639 C1.5125774,18.4334066 1.79062012,18.5922882 2.04880264,18.4929872 L8.24518329,15.8913017 L11.6412765,19.7441794 C11.8597387,19.9825018 12.2370824,19.8832008 12.3165231,19.5852979 L13.9450591,13.4882182 L19.7839562,11.0255541 C20.0619989,10.8865327 20.0818591,10.4694687 19.7839562,10.3105871 L0.579148261,0.0428666046 Z M11.6138902,17.0883151 L9.85385903,14.7195502 L0.718169621,0.618812241 L12.69945,12.9346347 L11.6138902,17.0883151 Z' id='shape'%3E%3C/path%3E %3C/g%3E %3C/g%3E %3C/svg%3E");
- height: 21px;
-}
-#cancel_btn::before {
- content: url("data:image/svg+xml,%3Csvg width='21px' height='21px' viewBox='0 0 21 21' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='pg' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cpath d='M10.2072007,20.088463 C11.5727865,20.088463 12.8594566,19.8259823 14.067211,19.3010209 C15.2749653,18.7760595 16.3386126,18.0538087 17.2581528,17.1342685 C18.177693,16.2147282 18.8982283,15.1527965 19.4197586,13.9484733 C19.9412889,12.7441501 20.202054,11.4557644 20.202054,10.0833163 C20.202054,8.71773046 19.9395733,7.43106036 19.4146119,6.22330603 C18.8896505,5.01555169 18.1673997,3.95018885 17.2478595,3.0272175 C16.3283192,2.10424615 15.2646719,1.3837109 14.0569176,0.865611739 C12.8491633,0.34751258 11.5624932,0.088463 10.1969073,0.088463 C8.83132146,0.088463 7.54636692,0.34751258 6.34204371,0.865611739 C5.1377205,1.3837109 4.07407321,2.10424615 3.15110186,3.0272175 C2.22813051,3.95018885 1.5058797,5.01555169 0.984349419,6.22330603 C0.46281914,7.43106036 0.202054,8.71773046 0.202054,10.0833163 C0.202054,11.4557644 0.4645347,12.7441501 0.9894961,13.9484733 C1.5144575,15.1527965 2.23670831,16.2147282 3.15624854,17.1342685 C4.07578877,18.0538087 5.1377205,18.7760595 6.34204371,19.3010209 C7.54636692,19.8259823 8.83475258,20.088463 10.2072007,20.088463 Z M10.2072007,18.2562448 C9.07493099,18.2562448 8.01471483,18.0452309 7.0265522,17.6232031 C6.03838956,17.2011753 5.17031614,16.6161693 4.42233192,15.8681851 C3.6743477,15.1202009 3.09105726,14.2521274 2.67246059,13.2639648 C2.25386392,12.2758022 2.04456558,11.215586 2.04456558,10.0833163 C2.04456558,8.95104663 2.25386392,7.89083047 2.67246059,6.90266784 C3.09105726,5.9145052 3.6743477,5.04643178 4.42233192,4.29844756 C5.17031614,3.55046334 6.036674,2.9671729 7.02140552,2.54857623 C8.00613703,2.12997956 9.06463763,1.92068122 10.1969073,1.92068122 C11.329177,1.92068122 12.3911087,2.12997956 13.3827025,2.54857623 C14.3742962,2.9671729 15.2440852,3.55046334 15.9920694,4.29844756 C16.7400537,5.04643178 17.3233441,5.9145052 17.7419408,6.90266784 C18.1605374,7.89083047 18.3698358,8.95104663 18.3698358,10.0833163 C18.3698358,11.215586 18.1605374,12.2758022 17.7419408,13.2639648 C17.3233441,14.2521274 16.7400537,15.1202009 15.9920694,15.8681851 C15.2440852,16.6161693 14.3760118,17.2011753 13.3878492,17.6232031 C12.3996865,18.0452309 11.3394704,18.2562448 10.2072007,18.2562448 Z M7.65444721,13.6242324 L12.7496608,13.6242324 C13.0584616,13.6242324 13.3003556,13.5384544 13.4753427,13.3668984 C13.6503299,13.1953424 13.7378234,12.9585951 13.7378234,12.6566565 L13.7378234,7.49968276 C13.7378234,7.19774418 13.6503299,6.96099688 13.4753427,6.78944087 C13.3003556,6.61788486 13.0584616,6.53210685 12.7496608,6.53210685 L7.65444721,6.53210685 C7.33878414,6.53210685 7.09345904,6.61788486 6.91847191,6.78944087 C6.74348478,6.96099688 6.65599121,7.19774418 6.65599121,7.49968276 L6.65599121,12.6566565 C6.65599121,12.9585951 6.74348478,13.1953424 6.91847191,13.3668984 C7.09345904,13.5384544 7.33878414,13.6242324 7.65444721,13.6242324 Z' id='shape' fill='%23FF3B30' fill-rule='nonzero'%3E%3C/path%3E %3C/g%3E %3C/svg%3E");
- height: 21px;
-}
-/* list */
-ol:not(.options), ul:not(.options) {
- padding-inline-start: 2em !important;
-}
-
-/* 亮色(默认) */
-#chuanhu_chatbot {
- background-color: var(--chatbot-background-color-light) !important;
- color: var(--chatbot-color-light) !important;
-}
-[data-testid = "bot"] {
- background-color: var(--message-bot-background-color-light) !important;
-}
-[data-testid = "user"] {
- background-color: var(--message-user-background-color-light) !important;
-}
-/* 暗色 */
-.dark #chuanhu_chatbot {
- background-color: var(--chatbot-background-color-dark) !important;
- color: var(--chatbot-color-dark) !important;
-}
-.dark [data-testid = "bot"] {
- background-color: var(--message-bot-background-color-dark) !important;
-}
-.dark [data-testid = "user"] {
- background-color: var(--message-user-background-color-dark) !important;
-}
-
-/* 屏幕宽度大于等于500px的设备 */
-/* update on 2023.4.8: 高度的细致调整已写入JavaScript */
-@media screen and (min-width: 500px) {
- #chuanhu_chatbot {
- height: calc(100vh - 200px);
- }
- #chuanhu_chatbot .wrap {
- max-height: calc(100vh - 200px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
- }
-}
-/* 屏幕宽度小于500px的设备 */
-@media screen and (max-width: 499px) {
- #chuanhu_chatbot {
- height: calc(100vh - 140px);
- }
- #chuanhu_chatbot .wrap {
- max-height: calc(100vh - 140px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
- }
- [data-testid = "bot"] {
- max-width: 95% !important;
- }
- #app_title h1{
- letter-spacing: -1px; font-size: 22px;
- }
-}
-#chuanhu_chatbot .wrap {
- overflow-x: hidden;
-}
-/* 对话气泡 */
-.message {
- border-radius: var(--radius-xl) !important;
- border: none;
- padding: var(--spacing-xl) !important;
- font-size: var(--text-md) !important;
- line-height: var(--line-md) !important;
- min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
- min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
-}
-[data-testid = "bot"] {
- max-width: 85%;
- border-bottom-left-radius: 0 !important;
-}
-[data-testid = "user"] {
- max-width: 85%;
- width: auto !important;
- border-bottom-right-radius: 0 !important;
-}
-
-.message p {
- margin-top: 0.6em !important;
- margin-bottom: 0.6em !important;
-}
-.message p:first-child { margin-top: 0 !important; }
-.message p:last-of-type { margin-bottom: 0 !important; }
-
-.message .md-message {
- display: block;
- padding: 0 !important;
-}
-.message .raw-message {
- display: block;
- padding: 0 !important;
- white-space: pre-wrap;
-}
-.raw-message.hideM, .md-message.hideM {
- display: none;
-}
-
-/* custom buttons */
-.chuanhu-btn {
- border-radius: 5px;
- /* background-color: #E6E6E6 !important; */
- color: rgba(120, 120, 120, 0.64) !important;
- padding: 4px !important;
- position: absolute;
- right: -22px;
- cursor: pointer !important;
- transition: color .2s ease, background-color .2s ease;
-}
-.chuanhu-btn:hover {
- background-color: rgba(167, 167, 167, 0.25) !important;
- color: unset !important;
-}
-.chuanhu-btn:active {
- background-color: rgba(167, 167, 167, 0.5) !important;
-}
-.chuanhu-btn:focus {
- outline: none;
-}
-.copy-bot-btn {
- /* top: 18px; */
- bottom: 0;
-}
-.toggle-md-btn {
- /* top: 0; */
- bottom: 20px;
-}
-.copy-code-btn {
- position: relative;
- float: right;
- font-size: 1em;
- cursor: pointer;
-}
-
-.message-wrap>div img{
- border-radius: 10px !important;
-}
-
-/* history message */
-.wrap>.history-message {
- padding: 10px !important;
-}
-.history-message {
- /* padding: 0 !important; */
- opacity: 80%;
- display: flex;
- flex-direction: column;
-}
-.history-message>.history-message {
- padding: 0 !important;
-}
-.history-message>.message-wrap {
- padding: 0 !important;
- margin-bottom: 16px;
-}
-.history-message>.message {
- margin-bottom: 16px;
-}
-.wrap>.history-message::after {
- content: "";
- display: block;
- height: 2px;
- background-color: var(--body-text-color-subdued);
- margin-bottom: 10px;
- margin-top: -10px;
- clear: both;
-}
-.wrap>.history-message>:last-child::after {
- content: "仅供查看";
- display: block;
- text-align: center;
- color: var(--body-text-color-subdued);
- font-size: 0.8em;
-}
-
-/* 表格 */
-table {
- margin: 1em 0;
- border-collapse: collapse;
- empty-cells: show;
-}
-td,th {
- border: 1.2px solid var(--border-color-primary) !important;
- padding: 0.2em;
-}
-thead {
- background-color: rgba(175,184,193,0.2);
-}
-thead th {
- padding: .5em .2em;
-}
-/* 行内代码 */
-code {
- display: inline;
- white-space: break-spaces;
- border-radius: 6px;
- margin: 0 2px 0 2px;
- padding: .2em .4em .1em .4em;
- background-color: rgba(175,184,193,0.2);
-}
-/* 代码块 */
-pre code {
- display: block;
- overflow: auto;
- white-space: pre;
- background-color: hsla(0, 0%, 0%, 80%)!important;
- border-radius: 10px;
- padding: 1.4em 1.2em 0em 1.4em;
- margin: 0.6em 2em 1em 0.2em;
- color: #FFF;
- box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
-}
-.message pre {
- padding: 0 !important;
-}
-/* 代码高亮样式 */
-.highlight .hll { background-color: #49483e }
-.highlight .c { color: #75715e } /* Comment */
-.highlight .err { color: #960050; background-color: #1e0010 } /* Error */
-.highlight .k { color: #66d9ef } /* Keyword */
-.highlight .l { color: #ae81ff } /* Literal */
-.highlight .n { color: #f8f8f2 } /* Name */
-.highlight .o { color: #f92672 } /* Operator */
-.highlight .p { color: #f8f8f2 } /* Punctuation */
-.highlight .ch { color: #75715e } /* Comment.Hashbang */
-.highlight .cm { color: #75715e } /* Comment.Multiline */
-.highlight .cp { color: #75715e } /* Comment.Preproc */
-.highlight .cpf { color: #75715e } /* Comment.PreprocFile */
-.highlight .c1 { color: #75715e } /* Comment.Single */
-.highlight .cs { color: #75715e } /* Comment.Special */
-.highlight .gd { color: #f92672 } /* Generic.Deleted */
-.highlight .ge { font-style: italic } /* Generic.Emph */
-.highlight .gi { color: #a6e22e } /* Generic.Inserted */
-.highlight .gs { font-weight: bold } /* Generic.Strong */
-.highlight .gu { color: #75715e } /* Generic.Subheading */
-.highlight .kc { color: #66d9ef } /* Keyword.Constant */
-.highlight .kd { color: #66d9ef } /* Keyword.Declaration */
-.highlight .kn { color: #f92672 } /* Keyword.Namespace */
-.highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
-.highlight .kr { color: #66d9ef } /* Keyword.Reserved */
-.highlight .kt { color: #66d9ef } /* Keyword.Type */
-.highlight .ld { color: #e6db74 } /* Literal.Date */
-.highlight .m { color: #ae81ff } /* Literal.Number */
-.highlight .s { color: #e6db74 } /* Literal.String */
-.highlight .na { color: #a6e22e } /* Name.Attribute */
-.highlight .nb { color: #f8f8f2 } /* Name.Builtin */
-.highlight .nc { color: #a6e22e } /* Name.Class */
-.highlight .no { color: #66d9ef } /* Name.Constant */
-.highlight .nd { color: #a6e22e } /* Name.Decorator */
-.highlight .ni { color: #f8f8f2 } /* Name.Entity */
-.highlight .ne { color: #a6e22e } /* Name.Exception */
-.highlight .nf { color: #a6e22e } /* Name.Function */
-.highlight .nl { color: #f8f8f2 } /* Name.Label */
-.highlight .nn { color: #f8f8f2 } /* Name.Namespace */
-.highlight .nx { color: #a6e22e } /* Name.Other */
-.highlight .py { color: #f8f8f2 } /* Name.Property */
-.highlight .nt { color: #f92672 } /* Name.Tag */
-.highlight .nv { color: #f8f8f2 } /* Name.Variable */
-.highlight .ow { color: #f92672 } /* Operator.Word */
-.highlight .w { color: #f8f8f2 } /* Text.Whitespace */
-.highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
-.highlight .mf { color: #ae81ff } /* Literal.Number.Float */
-.highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
-.highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
-.highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
-.highlight .sa { color: #e6db74 } /* Literal.String.Affix */
-.highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
-.highlight .sc { color: #e6db74 } /* Literal.String.Char */
-.highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
-.highlight .sd { color: #e6db74 } /* Literal.String.Doc */
-.highlight .s2 { color: #e6db74 } /* Literal.String.Double */
-.highlight .se { color: #ae81ff } /* Literal.String.Escape */
-.highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
-.highlight .si { color: #e6db74 } /* Literal.String.Interpol */
-.highlight .sx { color: #e6db74 } /* Literal.String.Other */
-.highlight .sr { color: #e6db74 } /* Literal.String.Regex */
-.highlight .s1 { color: #e6db74 } /* Literal.String.Single */
-.highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
-.highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
-.highlight .fm { color: #a6e22e } /* Name.Function.Magic */
-.highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
-.highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
-.highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
-.highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
-.highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
diff --git a/spaces/Mahiruoshi/Lovelive-Nijigasaku-Chat-iSTFT-GPT3/monotonic_align/setup.py b/spaces/Mahiruoshi/Lovelive-Nijigasaku-Chat-iSTFT-GPT3/monotonic_align/setup.py
deleted file mode 100644
index eb6008bdab38e472c2bd88b764dce1a8e3229a42..0000000000000000000000000000000000000000
--- a/spaces/Mahiruoshi/Lovelive-Nijigasaku-Chat-iSTFT-GPT3/monotonic_align/setup.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# cython:language_level=3
-from distutils.core import setup
-from Cython.Build import cythonize
-import numpy
-
-setup(
- name = 'monotonic_align',
- ext_modules = cythonize("core.pyx"),
- include_dirs=[numpy.get_include()]
-)
diff --git a/spaces/Marshalls/testmtd/models/residualflower2_model.py b/spaces/Marshalls/testmtd/models/residualflower2_model.py
deleted file mode 100644
index 987621a950dbb8856dab926ca1e075c2b56640c1..0000000000000000000000000000000000000000
--- a/spaces/Marshalls/testmtd/models/residualflower2_model.py
+++ /dev/null
@@ -1,136 +0,0 @@
-import torch
-from torch import nn
-from .transformer import BasicTransformerModel
-from models import BaseModel
-from models.flowplusplus import FlowPlusPlus
-import ast
-
-from .util.generation import autoregressive_generation_multimodal
-import argparse
-from argparse import Namespace
-import models
-
-class Residualflower2Model(BaseModel):
- def __init__(self, opt):
- super().__init__(opt)
-
- opt_vars = vars(opt)
- mean_vars = self.get_argvars(opt.mean_model, opt)
- mean_opt = opt_vars.copy()
- for k,v in mean_vars.items():
- val = mean_opt["mean_"+k]
- if k not in mean_opt:
- mean_opt[k] = val
- del mean_opt["mean_"+k]
- mean_opt = Namespace(**mean_opt)
- self.mean_model = models.create_model_by_name(opt.mean_model, mean_opt)
-
- residual_vars = self.get_argvars(opt.residual_model, opt)
- residual_opt = opt_vars.copy()
- for k,v in residual_vars.items():
- val = residual_opt["residual_"+k]
- if k not in residual_opt:
- residual_opt[k] = val
- del residual_opt["residual_"+k]
- residual_opt = Namespace(**residual_opt)
- self.residual_model = models.create_model_by_name(opt.residual_model, residual_opt)
-
- self.mean_loss = nn.MSELoss()
- self.mse_loss = 0
- self.nll_loss = 0
-
- def name(self):
- return "Transflower"
-
- @staticmethod
- def get_argvars(model_name, opt):
- temp_parser = argparse.ArgumentParser()
- model_option_setter = models.get_option_setter(model_name)
- vs = vars(model_option_setter(temp_parser, opt).parse_args([]))
- return vs
-
- @staticmethod
- def modify_commandline_options(parser, opt):
- parser.add_argument('--dropout', type=float, default=0.1)
- parser.add_argument('--mean_model', type=str, default="transformer")
- parser.add_argument('--residual_model', type=str, default="transflower")
- opt2, _ = parser.parse_known_args()
- mean_vars = Residualflower2Model.get_argvars(opt2.mean_model, opt)
- for k,v in mean_vars.items():
- # print(k)
- if type(v) != type(True):
- if type(v) != type(None):
- parser.add_argument('--mean_'+k, type=type(v), default=v)
- else:
- parser.add_argument('--mean_'+k, default=v)
- else:
- parser.add_argument('--mean_'+k, action="store_true")
- residual_vars = Residualflower2Model.get_argvars(opt2.residual_model, opt)
- for k,v in residual_vars.items():
- if type(v) != type(True):
- if type(v) != type(None):
- parser.add_argument('--residual_'+k, type=type(v), default=v)
- else:
- parser.add_argument('--residual_'+k, default=v)
- else:
- parser.add_argument('--residual_'+k, action="store_true")
- return parser
-
- def forward(self, data):
- # in lightning, forward defines the prediction/inference actions
- predicted_means = self.mean_model(data)
- predicted_residuals = self.residual_model(data)
- outputs = []
- for i, mod in enumerate(self.output_mods):
- outputs.append(predicted_means[i]+predicted_residuals[i])
- return outputs
-
- #def generate(self,features, teacher_forcing=False):
- # inputs_ = []
- # for i,mod in enumerate(self.input_mods):
- # input_ = features["in_"+mod]
- # input_ = torch.from_numpy(input_).float().cuda()
- # input_shape = input_.shape
- # input_ = input_.reshape((input_shape[0]*input_shape[1], input_shape[2], input_shape[3])).permute(2,0,1).to(self.device)
- # inputs_.append(input_)
- # output_seq = autoregressive_generation_multimodal(inputs_, self, autoreg_mods=self.output_mods, teacher_forcing=teacher_forcing)
- # return output_seq
-
- def training_step(self, batch, batch_idx):
- self.set_inputs(batch)
-
- self.mean_model.set_inputs(batch)
- predicted_means = self.mean_model(self.inputs)
- mse_loss = 0
- for i, mod in enumerate(self.output_mods):
- mse_loss += 100*self.mean_loss(predicted_means[i], self.targets[i])
-
- for i, mod in enumerate(self.output_mods):
- # import pdb;pdb.set_trace()
- batch["out_"+mod] = batch["out_"+mod] - predicted_means[i].permute(1,0,2)
-
- # self.residual_model.set_inputs(batch)
- nll_loss = self.residual_model.training_step(batch, batch_idx)
- loss = mse_loss + nll_loss
- self.mse_loss = mse_loss
- self.nll_loss = nll_loss
- # print("mse_loss: ", mse_loss)
- # print("nll_loss: ", nll_loss)
- self.log('mse_loss', mse_loss)
- self.log('nll_loss', nll_loss)
- self.log('loss', loss)
- return loss
-
- def test_step(self, batch, batch_idx):
- self.eval()
- loss = self.training_step(batch, batch_idx)
- # print(loss)
- return {"test_loss": loss, "test_mse_loss": self.mse_loss, "test_nll_loss": self.nll_loss}
-
- def test_epoch_end(self, outputs):
- avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
- avg_mse_loss = torch.stack([x['test_mse_loss'] for x in outputs]).mean()
- avg_nll_loss = torch.stack([x['test_nll_loss'] for x in outputs]).mean()
- logs = {'test_loss': avg_loss, 'test_mse_loss': avg_mse_loss, 'test_nll_loss': avg_nll_loss}
-
- return {'log': logs}
diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/roipoint_pool3d.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/roipoint_pool3d.py
deleted file mode 100644
index 0a21412c0728431c04b84245bc2e3109eea9aefc..0000000000000000000000000000000000000000
--- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/roipoint_pool3d.py
+++ /dev/null
@@ -1,77 +0,0 @@
-from torch import nn as nn
-from torch.autograd import Function
-
-from ..utils import ext_loader
-
-ext_module = ext_loader.load_ext('_ext', ['roipoint_pool3d_forward'])
-
-
-class RoIPointPool3d(nn.Module):
- """Encode the geometry-specific features of each 3D proposal.
-
- Please refer to `Paper of PartA2 `_
- for more details.
-
- Args:
- num_sampled_points (int, optional): Number of samples in each roi.
- Default: 512.
- """
-
- def __init__(self, num_sampled_points=512):
- super().__init__()
- self.num_sampled_points = num_sampled_points
-
- def forward(self, points, point_features, boxes3d):
- """
- Args:
- points (torch.Tensor): Input points whose shape is (B, N, C).
- point_features (torch.Tensor): Features of input points whose shape
- is (B, N, C).
- boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7).
-
- Returns:
- pooled_features (torch.Tensor): The output pooled features whose
- shape is (B, M, 512, 3 + C).
- pooled_empty_flag (torch.Tensor): Empty flag whose shape is (B, M).
- """
- return RoIPointPool3dFunction.apply(points, point_features, boxes3d,
- self.num_sampled_points)
-
-
-class RoIPointPool3dFunction(Function):
-
- @staticmethod
- def forward(ctx, points, point_features, boxes3d, num_sampled_points=512):
- """
- Args:
- points (torch.Tensor): Input points whose shape is (B, N, C).
- point_features (torch.Tensor): Features of input points whose shape
- is (B, N, C).
- boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7).
- num_sampled_points (int, optional): The num of sampled points.
- Default: 512.
-
- Returns:
- pooled_features (torch.Tensor): The output pooled features whose
- shape is (B, M, 512, 3 + C).
- pooled_empty_flag (torch.Tensor): Empty flag whose shape is (B, M).
- """
- assert len(points.shape) == 3 and points.shape[2] == 3
- batch_size, boxes_num, feature_len = points.shape[0], boxes3d.shape[
- 1], point_features.shape[2]
- pooled_boxes3d = boxes3d.view(batch_size, -1, 7)
- pooled_features = point_features.new_zeros(
- (batch_size, boxes_num, num_sampled_points, 3 + feature_len))
- pooled_empty_flag = point_features.new_zeros(
- (batch_size, boxes_num)).int()
-
- ext_module.roipoint_pool3d_forward(points.contiguous(),
- pooled_boxes3d.contiguous(),
- point_features.contiguous(),
- pooled_features, pooled_empty_flag)
-
- return pooled_features, pooled_empty_flag
-
- @staticmethod
- def backward(ctx, grad_out):
- raise NotImplementedError
diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/core/seg/builder.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/core/seg/builder.py
deleted file mode 100644
index db61f03d4abb2072f2532ce4429c0842495e015b..0000000000000000000000000000000000000000
--- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/core/seg/builder.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from annotator.uniformer.mmcv.utils import Registry, build_from_cfg
-
-PIXEL_SAMPLERS = Registry('pixel sampler')
-
-
-def build_pixel_sampler(cfg, **default_args):
- """Build pixel sampler for segmentation map."""
- return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args)
diff --git a/spaces/MingGatsby/Grounding_DINO_demo/groundingdino/util/logger.py b/spaces/MingGatsby/Grounding_DINO_demo/groundingdino/util/logger.py
deleted file mode 100644
index 18145f54c927abd59b95f3fa6e6da8002bc2ce97..0000000000000000000000000000000000000000
--- a/spaces/MingGatsby/Grounding_DINO_demo/groundingdino/util/logger.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-import functools
-import logging
-import os
-import sys
-
-from termcolor import colored
-
-
-class _ColorfulFormatter(logging.Formatter):
- def __init__(self, *args, **kwargs):
- self._root_name = kwargs.pop("root_name") + "."
- self._abbrev_name = kwargs.pop("abbrev_name", "")
- if len(self._abbrev_name):
- self._abbrev_name = self._abbrev_name + "."
- super(_ColorfulFormatter, self).__init__(*args, **kwargs)
-
- def formatMessage(self, record):
- record.name = record.name.replace(self._root_name, self._abbrev_name)
- log = super(_ColorfulFormatter, self).formatMessage(record)
- if record.levelno == logging.WARNING:
- prefix = colored("WARNING", "red", attrs=["blink"])
- elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
- prefix = colored("ERROR", "red", attrs=["blink", "underline"])
- else:
- return log
- return prefix + " " + log
-
-
-# so that calling setup_logger multiple times won't add many handlers
-@functools.lru_cache()
-def setup_logger(output=None, distributed_rank=0, *, color=True, name="imagenet", abbrev_name=None):
- """
- Initialize the detectron2 logger and set its verbosity level to "INFO".
-
- Args:
- output (str): a file name or a directory to save log. If None, will not save log file.
- If ends with ".txt" or ".log", assumed to be a file name.
- Otherwise, logs will be saved to `output/log.txt`.
- name (str): the root module name of this logger
-
- Returns:
- logging.Logger: a logger
- """
- logger = logging.getLogger(name)
- logger.setLevel(logging.DEBUG)
- logger.propagate = False
-
- if abbrev_name is None:
- abbrev_name = name
-
- plain_formatter = logging.Formatter(
- "[%(asctime)s.%(msecs)03d]: %(message)s", datefmt="%m/%d %H:%M:%S"
- )
- # stdout logging: master only
- if distributed_rank == 0:
- ch = logging.StreamHandler(stream=sys.stdout)
- ch.setLevel(logging.DEBUG)
- if color:
- formatter = _ColorfulFormatter(
- colored("[%(asctime)s.%(msecs)03d]: ", "green") + "%(message)s",
- datefmt="%m/%d %H:%M:%S",
- root_name=name,
- abbrev_name=str(abbrev_name),
- )
- else:
- formatter = plain_formatter
- ch.setFormatter(formatter)
- logger.addHandler(ch)
-
- # file logging: all workers
- if output is not None:
- if output.endswith(".txt") or output.endswith(".log"):
- filename = output
- else:
- filename = os.path.join(output, "log.txt")
- if distributed_rank > 0:
- filename = filename + f".rank{distributed_rank}"
- os.makedirs(os.path.dirname(filename), exist_ok=True)
-
- fh = logging.StreamHandler(_cached_log_stream(filename))
- fh.setLevel(logging.DEBUG)
- fh.setFormatter(plain_formatter)
- logger.addHandler(fh)
-
- return logger
-
-
-# cache the opened file object, so that different calls to `setup_logger`
-# with the same file name can safely write to the same file.
-@functools.lru_cache(maxsize=None)
-def _cached_log_stream(filename):
- return open(filename, "a")
diff --git a/spaces/Miuzarte/SUI-svc-4.0/hubert/hubert_model_onnx.py b/spaces/Miuzarte/SUI-svc-4.0/hubert/hubert_model_onnx.py
deleted file mode 100644
index d18f3c2a0fc29592a573a9780308d38f059640b9..0000000000000000000000000000000000000000
--- a/spaces/Miuzarte/SUI-svc-4.0/hubert/hubert_model_onnx.py
+++ /dev/null
@@ -1,217 +0,0 @@
-import copy
-import random
-from typing import Optional, Tuple
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as t_func
-from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
-
-
-class Hubert(nn.Module):
- def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
- super().__init__()
- self._mask = mask
- self.feature_extractor = FeatureExtractor()
- self.feature_projection = FeatureProjection()
- self.positional_embedding = PositionalConvEmbedding()
- self.norm = nn.LayerNorm(768)
- self.dropout = nn.Dropout(0.1)
- self.encoder = TransformerEncoder(
- nn.TransformerEncoderLayer(
- 768, 12, 3072, activation="gelu", batch_first=True
- ),
- 12,
- )
- self.proj = nn.Linear(768, 256)
-
- self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
- self.label_embedding = nn.Embedding(num_label_embeddings, 256)
-
- def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
- mask = None
- if self.training and self._mask:
- mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
- x[mask] = self.masked_spec_embed.to(x.dtype)
- return x, mask
-
- def encode(
- self, x: torch.Tensor, layer: Optional[int] = None
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- x = self.feature_extractor(x)
- x = self.feature_projection(x.transpose(1, 2))
- x, mask = self.mask(x)
- x = x + self.positional_embedding(x)
- x = self.dropout(self.norm(x))
- x = self.encoder(x, output_layer=layer)
- return x, mask
-
- def logits(self, x: torch.Tensor) -> torch.Tensor:
- logits = torch.cosine_similarity(
- x.unsqueeze(2),
- self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
- dim=-1,
- )
- return logits / 0.1
-
-
-class HubertSoft(Hubert):
- def __init__(self):
- super().__init__()
-
- def units(self, wav: torch.Tensor) -> torch.Tensor:
- wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
- x, _ = self.encode(wav)
- return self.proj(x)
-
- def forward(self, x):
- return self.units(x)
-
-class FeatureExtractor(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
- self.norm0 = nn.GroupNorm(512, 512)
- self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
- self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = t_func.gelu(self.norm0(self.conv0(x)))
- x = t_func.gelu(self.conv1(x))
- x = t_func.gelu(self.conv2(x))
- x = t_func.gelu(self.conv3(x))
- x = t_func.gelu(self.conv4(x))
- x = t_func.gelu(self.conv5(x))
- x = t_func.gelu(self.conv6(x))
- return x
-
-
-class FeatureProjection(nn.Module):
- def __init__(self):
- super().__init__()
- self.norm = nn.LayerNorm(512)
- self.projection = nn.Linear(512, 768)
- self.dropout = nn.Dropout(0.1)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.norm(x)
- x = self.projection(x)
- x = self.dropout(x)
- return x
-
-
-class PositionalConvEmbedding(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv = nn.Conv1d(
- 768,
- 768,
- kernel_size=128,
- padding=128 // 2,
- groups=16,
- )
- self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.conv(x.transpose(1, 2))
- x = t_func.gelu(x[:, :, :-1])
- return x.transpose(1, 2)
-
-
-class TransformerEncoder(nn.Module):
- def __init__(
- self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
- ) -> None:
- super(TransformerEncoder, self).__init__()
- self.layers = nn.ModuleList(
- [copy.deepcopy(encoder_layer) for _ in range(num_layers)]
- )
- self.num_layers = num_layers
-
- def forward(
- self,
- src: torch.Tensor,
- mask: torch.Tensor = None,
- src_key_padding_mask: torch.Tensor = None,
- output_layer: Optional[int] = None,
- ) -> torch.Tensor:
- output = src
- for layer in self.layers[:output_layer]:
- output = layer(
- output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
- )
- return output
-
-
-def _compute_mask(
- shape: Tuple[int, int],
- mask_prob: float,
- mask_length: int,
- device: torch.device,
- min_masks: int = 0,
-) -> torch.Tensor:
- batch_size, sequence_length = shape
-
- if mask_length < 1:
- raise ValueError("`mask_length` has to be bigger than 0.")
-
- if mask_length > sequence_length:
- raise ValueError(
- f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
- )
-
- # compute number of masked spans in batch
- num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
- num_masked_spans = max(num_masked_spans, min_masks)
-
- # make sure num masked indices <= sequence_length
- if num_masked_spans * mask_length > sequence_length:
- num_masked_spans = sequence_length // mask_length
-
- # SpecAugment mask to fill
- mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
-
- # uniform distribution to sample from, make sure that offset samples are < sequence_length
- uniform_dist = torch.ones(
- (batch_size, sequence_length - (mask_length - 1)), device=device
- )
-
- # get random indices to mask
- mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
-
- # expand masked indices to masked spans
- mask_indices = (
- mask_indices.unsqueeze(dim=-1)
- .expand((batch_size, num_masked_spans, mask_length))
- .reshape(batch_size, num_masked_spans * mask_length)
- )
- offsets = (
- torch.arange(mask_length, device=device)[None, None, :]
- .expand((batch_size, num_masked_spans, mask_length))
- .reshape(batch_size, num_masked_spans * mask_length)
- )
- mask_idxs = mask_indices + offsets
-
- # scatter indices to mask
- mask = mask.scatter(1, mask_idxs, True)
-
- return mask
-
-
-def hubert_soft(
- path: str,
-) -> HubertSoft:
- r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
- Args:
- path (str): path of a pretrained model
- """
- hubert = HubertSoft()
- checkpoint = torch.load(path)
- consume_prefix_in_state_dict_if_present(checkpoint, "module.")
- hubert.load_state_dict(checkpoint)
- hubert.eval()
- return hubert
diff --git a/spaces/Mozira/voice-models/app.py b/spaces/Mozira/voice-models/app.py
deleted file mode 100644
index b9e789d017d492519a153f8d038c928b682b4483..0000000000000000000000000000000000000000
--- a/spaces/Mozira/voice-models/app.py
+++ /dev/null
@@ -1,187 +0,0 @@
-import os
-import json
-import argparse
-import traceback
-import logging
-import gradio as gr
-import numpy as np
-import librosa
-import torch
-import asyncio
-import edge_tts
-from datetime import datetime
-from fairseq import checkpoint_utils
-from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono
-from vc_infer_pipeline import VC
-from config import (
- is_half,
- device
-)
-logging.getLogger("numba").setLevel(logging.WARNING)
-limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces
-
-def create_vc_fn(tgt_sr, net_g, vc, if_f0, file_index, file_big_npy):
- def vc_fn(
- input_audio,
- f0_up_key,
- f0_method,
- index_rate,
- tts_mode,
- tts_text,
- tts_voice
- ):
- try:
- if tts_mode:
- if len(tts_text) > 100 and limitation:
- return "Text terlalu panjang!", None
- if tts_text is None or tts_voice is None:
- return "Masukkan teks atau rekaman suara", None
- asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3"))
- audio, sr = librosa.load("tts.mp3", sr=16000, mono=True)
- else:
- if args.files:
- audio, sr = librosa.load(input_audio, sr=16000, mono=True)
- else:
- if input_audio is None:
- return "Kamu perlu mengunggah Audio", None
- sampling_rate, audio = input_audio
- duration = audio.shape[0] / sampling_rate
- if duration > 20 and limitation:
- return "Unggah file audio yang berdurasi kurang dari 20 detik. Jika kamu perlu membuat file audio yang lebih panjang, gunakan Colab.", None
- audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
- if len(audio.shape) > 1:
- audio = librosa.to_mono(audio.transpose(1, 0))
- if sampling_rate != 16000:
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
- times = [0, 0, 0]
- f0_up_key = int(f0_up_key)
- audio_opt = vc.pipeline(
- hubert_model,
- net_g,
- 0,
- audio,
- times,
- f0_up_key,
- f0_method,
- file_index,
- file_big_npy,
- index_rate,
- if_f0,
- )
- print(
- f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s"
- )
- return "Berhasil", (tgt_sr, audio_opt)
- except:
- info = traceback.format_exc()
- print(info)
- return info, (None, None)
- return vc_fn
-
-def load_hubert():
- global hubert_model
- models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
- ["hubert_base.pt"],
- suffix="",
- )
- hubert_model = models[0]
- hubert_model = hubert_model.to(device)
- if is_half:
- hubert_model = hubert_model.half()
- else:
- hubert_model = hubert_model.float()
- hubert_model.eval()
-
-def change_to_tts_mode(tts_mode):
- if tts_mode:
- return gr.Audio.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True)
- else:
- return gr.Audio.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False)
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--api', action="store_true", default=False)
- parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
- parser.add_argument("--files", action="store_true", default=False, help="load audio from path")
- args, unknown = parser.parse_known_args()
- load_hubert()
- models = []
- tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
- voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
- with open("weights/model_info.json", "r", encoding="utf-8") as f:
- models_info = json.load(f)
- for name, info in models_info.items():
- if not info['enable']:
- continue
- title = info['title']
- author = info.get("author", None)
- cover = f"weights/{name}/{info['cover']}"
- index = f"weights/{name}/{info['feature_retrieval_library']}"
- npy = f"weights/{name}/{info['feature_file']}"
- cpt = torch.load(f"weights/{name}/{name}.pth", map_location="cpu")
- tgt_sr = cpt["config"][-1]
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
- if_f0 = cpt.get("f0", 1)
- if if_f0 == 1:
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
- else:
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
- del net_g.enc_q
- print(net_g.load_state_dict(cpt["weight"], strict=False)) # jangan menambahkan baris
- net_g.eval().to(device)
- if is_half:
- net_g = net_g.half()
- else:
- net_g = net_g.float()
- vc = VC(tgt_sr, device, is_half)
- models.append((name, title, author, cover, create_vc_fn(tgt_sr, net_g, vc, if_f0, index, npy)))
- with gr.Blocks() as app:
- gr.Markdown(
- "# Voice Model\n"
- "## Input audio harus bersih dan jelas tanpa noise.\n"
- "\n\n"
- "[](https://colab.research.google.com/drive/1IId-5xRfWj4t4chV1vD2nPqmoNeEJy-Y?usp=share_link)\n\n"
- "[](https://huggingface.co/spaces/Mozira/voice-models?duplicate=true)\n\n"
- "[](https://github.com/Sagramine/Training-AI-Voice)\n\n"
- )
- with gr.Tabs():
- for (name, title, author, cover, vc_fn) in models:
- with gr.TabItem(name):
- with gr.Row():
- gr.Markdown(
- ''
- f'
{title}
\n'+
- (f'
Model author: {author}
' if author else "")+
- (f'

' if cover else "")+
- '
'
- )
- with gr.Row():
- with gr.Column():
- if args.files:
- vc_input = gr.Textbox(label="Input audio path")
- else:
- vc_input = gr.Audio(label="Input audio"+' (kurang dari 20 detik)' if limitation else '')
- vc_transpose = gr.Number(label="Transpose", value=0)
- vc_f0method = gr.Radio(
- label="Algoritma ekstraksi pitch, PM lebih cepat tetapi Harvest lebih baik untuk frekuensi rendah",
- choices=["pm", "harvest"],
- value="pm",
- interactive=True,
- )
- vc_index_ratio = gr.Slider(
- minimum=0,
- maximum=1,
- label="Retrieval feature ratio",
- value=0.6,
- interactive=True,
- )
- tts_mode = gr.Checkbox(label="tts (gunakan input text-edge sebagai suara)", value=False)
- tts_text = gr.Textbox(visible=False,label="TTS text (100 words limitation)" if limitation else "TTS text")
- tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female")
- vc_submit = gr.Button("Generate", variant="primary")
- with gr.Column():
- vc_output1 = gr.Textbox(label="Output Message")
- vc_output2 = gr.Audio(label="Output Audio")
- vc_submit.click(vc_fn, [vc_input, vc_transpose, vc_f0method, vc_index_ratio, tts_mode, tts_text, tts_voice], [vc_output1, vc_output2])
- tts_mode.change(change_to_tts_mode, [tts_mode], [vc_input, tts_text, tts_voice])
- app.queue(concurrency_count=1, max_size=20, api_open=args.api).launch(share=args.share)
\ No newline at end of file
diff --git a/spaces/MrVicente/RA-BART/kgs_binding/kg_qa_binding_utils.py b/spaces/MrVicente/RA-BART/kgs_binding/kg_qa_binding_utils.py
deleted file mode 100644
index de33fea8d4a1390a33a79a23cef17c17b9c8118f..0000000000000000000000000000000000000000
--- a/spaces/MrVicente/RA-BART/kgs_binding/kg_qa_binding_utils.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#############################
-# Imports
-#############################
-
-# Python modules
-from typing import List, Tuple
-from enum import Enum
-
-# Remote modules
-
-# Local modules
-from .kg_base_wrapper import KGBaseHandler
-from .swow_handler import SwowHandler
-from .conceptnet_handler import ConceptNetHandler
-from utils import read_json_file_2_dict, Data_Type
-
-#############################
-# Constants
-#############################
-
-#############################
-# Stuff
-#############################
-
-class KGType(Enum):
- SWOW = 'swow'
- CSKG = 'cskg'
- CONCEPTNET = 'conceptnet'
-
-def load_kg_handler(kg_type: KGType):
- if kg_type.value == KGType.SWOW.value:
- return SwowHandler()
- elif kg_type.value == KGType.CONCEPTNET.value:
- return ConceptNetHandler()
- else:
- raise NotImplementedError()
-
-def _load_data_paths_metadata():
- try:
- data = read_json_file_2_dict('data_config.json', store_dir='run_config')
- except:
- data = None
- return data
-
-def from_relations_path_2_relations(dataset_types: List[Data_Type], metadata):
- relations = []
- print('metadata:', metadata)
- for dataset_type in dataset_types:
- qa_meta_data = metadata[dataset_type.value]
- filename_path, dir_data = qa_meta_data['local']
- print(filename_path, dir)
- data = read_json_file_2_dict(filename_path, dir_data)
- relations.extend(data)
- return relations
-
-def KGHandler_to_str(kg_handler: KGBaseHandler) -> str:
- if isinstance(kg_handler, SwowHandler):
- return 'swow'
- elif isinstance(kg_handler, ConceptNetHandler):
- return 'conceptnet'
- else:
- raise NotImplementedError()
-
-def get_kg_qa_data_metadata(kg_handler: KGBaseHandler) -> Tuple[str, str]:
- kg_qa_data_path = _load_data_paths_metadata()
- if isinstance(kg_handler, SwowHandler):
- swow = kg_qa_data_path["swow"]
- return swow
- elif isinstance(kg_handler, ConceptNetHandler):
- conceptnet = kg_qa_data_path["conceptnet"]
- return conceptnet
- else:
- raise NotImplementedError()
\ No newline at end of file
diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/bert/tf1_checkpoint_converter_lib.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/bert/tf1_checkpoint_converter_lib.py
deleted file mode 100644
index 122e455210ae70cd9af04912b95a600a3d23d09a..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/nlp/bert/tf1_checkpoint_converter_lib.py
+++ /dev/null
@@ -1,195 +0,0 @@
-# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-r"""Convert checkpoints created by Estimator (tf1) to be Keras compatible."""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import numpy as np
-import tensorflow.compat.v1 as tf # TF 1.x
-
-# Mapping between old <=> new names. The source pattern in original variable
-# name will be replaced by destination pattern.
-BERT_NAME_REPLACEMENTS = (
- ("bert", "bert_model"),
- ("embeddings/word_embeddings", "word_embeddings/embeddings"),
- ("embeddings/token_type_embeddings",
- "embedding_postprocessor/type_embeddings"),
- ("embeddings/position_embeddings",
- "embedding_postprocessor/position_embeddings"),
- ("embeddings/LayerNorm", "embedding_postprocessor/layer_norm"),
- ("attention/self", "self_attention"),
- ("attention/output/dense", "self_attention_output"),
- ("attention/output/LayerNorm", "self_attention_layer_norm"),
- ("intermediate/dense", "intermediate"),
- ("output/dense", "output"),
- ("output/LayerNorm", "output_layer_norm"),
- ("pooler/dense", "pooler_transform"),
-)
-
-BERT_V2_NAME_REPLACEMENTS = (
- ("bert/", ""),
- ("encoder", "transformer"),
- ("embeddings/word_embeddings", "word_embeddings/embeddings"),
- ("embeddings/token_type_embeddings", "type_embeddings/embeddings"),
- ("embeddings/position_embeddings", "position_embedding/embeddings"),
- ("embeddings/LayerNorm", "embeddings/layer_norm"),
- ("attention/self", "self_attention"),
- ("attention/output/dense", "self_attention/attention_output"),
- ("attention/output/LayerNorm", "self_attention_layer_norm"),
- ("intermediate/dense", "intermediate"),
- ("output/dense", "output"),
- ("output/LayerNorm", "output_layer_norm"),
- ("pooler/dense", "pooler_transform"),
- ("cls/predictions/output_bias", "cls/predictions/output_bias/bias"),
- ("cls/seq_relationship/output_bias", "predictions/transform/logits/bias"),
- ("cls/seq_relationship/output_weights",
- "predictions/transform/logits/kernel"),
-)
-
-BERT_PERMUTATIONS = ()
-
-BERT_V2_PERMUTATIONS = (("cls/seq_relationship/output_weights", (1, 0)),)
-
-
-def _bert_name_replacement(var_name, name_replacements):
- """Gets the variable name replacement."""
- for src_pattern, tgt_pattern in name_replacements:
- if src_pattern in var_name:
- old_var_name = var_name
- var_name = var_name.replace(src_pattern, tgt_pattern)
- tf.logging.info("Converted: %s --> %s", old_var_name, var_name)
- return var_name
-
-
-def _has_exclude_patterns(name, exclude_patterns):
- """Checks if a string contains substrings that match patterns to exclude."""
- for p in exclude_patterns:
- if p in name:
- return True
- return False
-
-
-def _get_permutation(name, permutations):
- """Checks whether a variable requires transposition by pattern matching."""
- for src_pattern, permutation in permutations:
- if src_pattern in name:
- tf.logging.info("Permuted: %s --> %s", name, permutation)
- return permutation
-
- return None
-
-
-def _get_new_shape(name, shape, num_heads):
- """Checks whether a variable requires reshape by pattern matching."""
- if "self_attention/attention_output/kernel" in name:
- return tuple([num_heads, shape[0] // num_heads, shape[1]])
- if "self_attention/attention_output/bias" in name:
- return shape
-
- patterns = [
- "self_attention/query", "self_attention/value", "self_attention/key"
- ]
- for pattern in patterns:
- if pattern in name:
- if "kernel" in name:
- return tuple([shape[0], num_heads, shape[1] // num_heads])
- if "bias" in name:
- return tuple([num_heads, shape[0] // num_heads])
- return None
-
-
-def create_v2_checkpoint(model, src_checkpoint, output_path):
- """Converts a name-based matched TF V1 checkpoint to TF V2 checkpoint."""
- # Uses streaming-restore in eager model to read V1 name-based checkpoints.
- model.load_weights(src_checkpoint).assert_existing_objects_matched()
- checkpoint = tf.train.Checkpoint(model=model)
- checkpoint.save(output_path)
-
-
-def convert(checkpoint_from_path,
- checkpoint_to_path,
- num_heads,
- name_replacements,
- permutations,
- exclude_patterns=None):
- """Migrates the names of variables within a checkpoint.
-
- Args:
- checkpoint_from_path: Path to source checkpoint to be read in.
- checkpoint_to_path: Path to checkpoint to be written out.
- num_heads: The number of heads of the model.
- name_replacements: A list of tuples of the form (match_str, replace_str)
- describing variable names to adjust.
- permutations: A list of tuples of the form (match_str, permutation)
- describing permutations to apply to given variables. Note that match_str
- should match the original variable name, not the replaced one.
- exclude_patterns: A list of string patterns to exclude variables from
- checkpoint conversion.
-
- Returns:
- A dictionary that maps the new variable names to the Variable objects.
- A dictionary that maps the old variable names to the new variable names.
- """
- with tf.Graph().as_default():
- tf.logging.info("Reading checkpoint_from_path %s", checkpoint_from_path)
- reader = tf.train.NewCheckpointReader(checkpoint_from_path)
- name_shape_map = reader.get_variable_to_shape_map()
- new_variable_map = {}
- conversion_map = {}
- for var_name in name_shape_map:
- if exclude_patterns and _has_exclude_patterns(var_name, exclude_patterns):
- continue
- # Get the original tensor data.
- tensor = reader.get_tensor(var_name)
-
- # Look up the new variable name, if any.
- new_var_name = _bert_name_replacement(var_name, name_replacements)
-
- # See if we need to reshape the underlying tensor.
- new_shape = None
- if num_heads > 0:
- new_shape = _get_new_shape(new_var_name, tensor.shape, num_heads)
- if new_shape:
- tf.logging.info("Veriable %s has a shape change from %s to %s",
-
- var_name, tensor.shape, new_shape)
- tensor = np.reshape(tensor, new_shape)
-
- # See if we need to permute the underlying tensor.
- permutation = _get_permutation(var_name, permutations)
- if permutation:
- tensor = np.transpose(tensor, permutation)
-
- # Create a new variable with the possibly-reshaped or transposed tensor.
- var = tf.Variable(tensor, name=var_name)
-
- # Save the variable into the new variable map.
- new_variable_map[new_var_name] = var
-
- # Keep a list of converter variables for sanity checking.
- if new_var_name != var_name:
- conversion_map[var_name] = new_var_name
-
- saver = tf.train.Saver(new_variable_map)
-
- with tf.Session() as sess:
- sess.run(tf.global_variables_initializer())
- tf.logging.info("Writing checkpoint_to_path %s", checkpoint_to_path)
- saver.save(sess, checkpoint_to_path, write_meta_graph=False)
-
- tf.logging.info("Summary:")
- tf.logging.info(" Converted %d variable name(s).", len(new_variable_map))
- tf.logging.info(" Converted: %s", str(conversion_map))
diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/xlnet/preprocess_pretrain_data.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/xlnet/preprocess_pretrain_data.py
deleted file mode 100644
index 9bf5367611ca656e88c969e4711334911e9cedd0..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/nlp/xlnet/preprocess_pretrain_data.py
+++ /dev/null
@@ -1,998 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Script to pre-process pre-training data into tfrecords."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import json
-import os
-import random
-
-from absl import app
-from absl import flags
-import absl.logging as _logging # pylint: disable=unused-import
-
-import numpy as np
-
-
-import tensorflow.google as tf
-from official.nlp.xlnet import preprocess_utils
-import sentencepiece as spm
-
-
-special_symbols = {
- "" : 0,
- "" : 1,
- "" : 2,
- "" : 3,
- "" : 4,
- "" : 5,
- "" : 6,
- "" : 7,
- "" : 8,
-}
-
-VOCAB_SIZE = 32000
-UNK_ID = special_symbols[""]
-CLS_ID = special_symbols[""]
-SEP_ID = special_symbols[""]
-MASK_ID = special_symbols[""]
-EOD_ID = special_symbols[""]
-
-
-def _int64_feature(values):
- return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
-
-
-def _float_feature(values):
- return tf.train.Feature(float_list=tf.train.FloatList(value=values))
-
-
-def format_filename(prefix, bsz_per_host, seq_len, bi_data, suffix,
- mask_alpha=5, mask_beta=1, reuse_len=None, uncased=False,
- fixed_num_predict=None):
- """docs."""
- if reuse_len is None:
- reuse_len_str = ""
- else:
- reuse_len_str = "reuse-{}.".format(reuse_len)
- if not uncased:
- uncased_str = ""
- else:
- uncased_str = "uncased."
- if bi_data:
- bi_data_str = "bi"
- else:
- bi_data_str = "uni"
- if fixed_num_predict is not None:
- fnp_str = "fnp-{}.".format(fixed_num_predict)
- else:
- fnp_str = ""
-
- file_name = "{}.bsz-{}.seqlen-{}.{}{}{}.alpha-{}.beta-{}.{}{}".format(
- prefix, bsz_per_host, seq_len, reuse_len_str, uncased_str, bi_data_str,
- mask_alpha, mask_beta, fnp_str, suffix)
-
- return file_name
-
-
-def _create_data(idx, input_paths):
- # Load sentence-piece model
- sp = spm.SentencePieceProcessor()
- sp.Load(FLAGS.sp_path)
-
- input_shards = []
- total_line_cnt = 0
- for input_path in input_paths:
- input_data, sent_ids = [], []
- sent_id, line_cnt = True, 0
- tf.logging.info("Processing %s", input_path)
- for line in tf.gfile.Open(input_path):
- if line_cnt % 100000 == 0:
- tf.logging.info("Loading line %d", line_cnt)
- line_cnt += 1
-
- if not line.strip():
- if FLAGS.use_eod:
- sent_id = not sent_id
- cur_sent = [EOD_ID]
- else:
- continue
- else:
- if FLAGS.from_raw_text:
- cur_sent = preprocess_utils.preprocess_text(
- line.strip(), lower=FLAGS.uncased)
- cur_sent = preprocess_utils.encode_ids(sp, cur_sent)
- else:
- cur_sent = list(map(int, line.strip().split()))
-
- input_data.extend(cur_sent)
- sent_ids.extend([sent_id] * len(cur_sent))
- sent_id = not sent_id
-
- tf.logging.info("Finish with line %d", line_cnt)
- if line_cnt == 0:
- continue
-
- input_data = np.array(input_data, dtype=np.int64)
- sent_ids = np.array(sent_ids, dtype=np.bool)
-
- total_line_cnt += line_cnt
- input_shards.append((input_data, sent_ids))
-
- tf.logging.info("[Task %d] Total number line: %d", idx, total_line_cnt)
-
- tfrecord_dir = os.path.join(FLAGS.save_dir, "tfrecords")
-
- filenames, num_batch = [], 0
-
- # Randomly shuffle input shards (with a fixed but distinct random seed)
- np.random.seed(100 * FLAGS.task + FLAGS.pass_id)
-
- perm_indices = np.random.permutation(len(input_shards))
- tf.logging.info("Using perm indices %s for pass %d",
- perm_indices.tolist(), FLAGS.pass_id)
-
- input_data_list, sent_ids_list = [], []
- prev_sent_id = None
- for perm_idx in perm_indices:
- input_data, sent_ids = input_shards[perm_idx]
- # make sure the `send_ids[0] == not prev_sent_id`
- if prev_sent_id is not None and sent_ids[0] == prev_sent_id:
- sent_ids = np.logical_not(sent_ids)
-
- # append to temporary list
- input_data_list.append(input_data)
- sent_ids_list.append(sent_ids)
-
- # update `prev_sent_id`
- prev_sent_id = sent_ids[-1]
-
- input_data = np.concatenate(input_data_list)
- sent_ids = np.concatenate(sent_ids_list)
-
- file_name, cur_num_batch = create_tfrecords(
- save_dir=tfrecord_dir,
- basename="{}-{}-{}".format(FLAGS.split, idx, FLAGS.pass_id),
- data=[input_data, sent_ids],
- bsz_per_host=FLAGS.bsz_per_host,
- seq_len=FLAGS.seq_len,
- bi_data=FLAGS.bi_data,
- sp=sp,
- )
-
- filenames.append(file_name)
- num_batch += cur_num_batch
-
- record_info = {
- "filenames": filenames,
- "num_batch": num_batch
- }
-
- return record_info
-
-
-def create_data(_):
- # Validate FLAGS
- assert FLAGS.bsz_per_host % FLAGS.num_core_per_host == 0
- if not FLAGS.use_tpu:
- FLAGS.num_core_per_host = 1 # forced to be one
-
- # Make workdirs
- if not tf.gfile.Exists(FLAGS.save_dir):
- tf.gfile.MakeDirs(FLAGS.save_dir)
-
- tfrecord_dir = os.path.join(FLAGS.save_dir, "tfrecords")
- if not tf.gfile.Exists(tfrecord_dir):
- tf.gfile.MakeDirs(tfrecord_dir)
-
- # Create and dump corpus_info from task 0
- if FLAGS.task == 0 and FLAGS.pass_id == 0:
- corpus_info = {
- "vocab_size": VOCAB_SIZE,
- "bsz_per_host": FLAGS.bsz_per_host,
- "num_core_per_host": FLAGS.num_core_per_host,
- "seq_len": FLAGS.seq_len,
- "reuse_len": FLAGS.reuse_len,
- "uncased": FLAGS.uncased,
- "bi_data": FLAGS.bi_data,
- "mask_alpha": FLAGS.mask_alpha,
- "mask_beta": FLAGS.mask_beta,
- "num_predict": FLAGS.num_predict,
- "use_eod": FLAGS.use_eod,
- "sp_path": FLAGS.sp_path,
- "input_glob": FLAGS.input_glob,
- }
- corpus_info_path = os.path.join(FLAGS.save_dir, "corpus_info.json")
- with tf.gfile.Open(corpus_info_path, "w") as fp:
- json.dump(corpus_info, fp)
-
- # Interleavely split the work into FLAGS.num_task splits
- file_paths = sorted(tf.gfile.Glob(FLAGS.input_glob))
- tf.logging.info("Use glob: %s", FLAGS.input_glob)
- tf.logging.info("Find %d files: %s", len(file_paths), file_paths)
-
- task_file_paths = file_paths[FLAGS.task::FLAGS.num_task]
- if not task_file_paths:
- tf.logging.info("Exit: task %d has no file to process.", FLAGS.task)
- return
-
- tf.logging.info("Task %d process %d files: %s",
- FLAGS.task, len(task_file_paths), task_file_paths)
- record_info = _create_data(FLAGS.task, task_file_paths)
-
- record_prefix = "record_info-{}-{}-{}".format(
- FLAGS.split, FLAGS.task, FLAGS.pass_id)
- record_name = format_filename(
- prefix=record_prefix,
- bsz_per_host=FLAGS.bsz_per_host,
- seq_len=FLAGS.seq_len,
- mask_alpha=FLAGS.mask_alpha,
- mask_beta=FLAGS.mask_beta,
- reuse_len=FLAGS.reuse_len,
- bi_data=FLAGS.bi_data,
- suffix="json",
- uncased=FLAGS.uncased,
- fixed_num_predict=FLAGS.num_predict)
- record_info_path = os.path.join(tfrecord_dir, record_name)
-
- with tf.gfile.Open(record_info_path, "w") as fp:
- json.dump(record_info, fp)
-
-
-def batchify(data, bsz_per_host, sent_ids=None):
- num_step = len(data) // bsz_per_host
- data = data[:bsz_per_host * num_step]
- data = data.reshape(bsz_per_host, num_step)
- if sent_ids is not None:
- sent_ids = sent_ids[:bsz_per_host * num_step]
- sent_ids = sent_ids.reshape(bsz_per_host, num_step)
-
- if sent_ids is not None:
- return data, sent_ids
- return data
-
-
-def _split_a_and_b(data, sent_ids, begin_idx, tot_len, extend_target=False):
- """Split two segments from `data` starting from the index `begin_idx`."""
-
- data_len = data.shape[0]
- if begin_idx + tot_len >= data_len:
- tf.logging.info("[_split_a_and_b] returns None: "
- "begin_idx %d + tot_len %d >= data_len %d",
- begin_idx, tot_len, data_len)
- return None
-
- end_idx = begin_idx + 1
- cut_points = []
- while end_idx < data_len:
- if sent_ids[end_idx] != sent_ids[end_idx - 1]:
- if end_idx - begin_idx >= tot_len: break
- cut_points.append(end_idx)
- end_idx += 1
-
- a_begin = begin_idx
- if len(cut_points) == 0 or random.random() < 0.5:
- label = 0
- if len(cut_points) == 0:
- a_end = end_idx
- else:
- a_end = random.choice(cut_points)
-
- b_len = max(1, tot_len - (a_end - a_begin))
- # (zihangd): `data_len - 1` to account for extend_target
- b_begin = random.randint(0, data_len - 1 - b_len)
- b_end = b_begin + b_len
- while b_begin > 0 and sent_ids[b_begin - 1] == sent_ids[b_begin]:
- b_begin -= 1
- # (zihangd): `data_len - 1` to account for extend_target
- while b_end < data_len - 1 and sent_ids[b_end - 1] == sent_ids[b_end]:
- b_end += 1
-
- new_begin = a_end
- else:
- label = 1
- a_end = random.choice(cut_points)
- b_begin = a_end
- b_end = end_idx
-
- new_begin = b_end
-
- while a_end - a_begin + b_end - b_begin > tot_len:
- if a_end - a_begin > b_end - b_begin:
- # delete the right side only for the LM objective
- a_end -= 1
- else:
- b_end -= 1
-
- ret = [data[a_begin: a_end], data[b_begin: b_end], label, new_begin]
-
- if extend_target:
- if a_end >= data_len or b_end >= data_len:
- tf.logging.info("[_split_a_and_b] returns None: "
- "a_end %d or b_end %d >= data_len %d",
- a_end, b_end, data_len)
- return None
- a_target = data[a_begin + 1: a_end + 1]
- b_target = data[b_begin: b_end + 1]
- ret.extend([a_target, b_target])
-
- return ret
-
-
-def _is_start_piece(piece):
- special_pieces = set(list('!"#$%&\"()*+,-./:;?@[\\]^_`{|}~'))
- if (piece.startswith("▁") or piece.startswith("<")
- or piece in special_pieces):
- return True
- else:
- return False
-
-
-def _sample_mask(sp, seg, reverse=False, max_gram=5, goal_num_predict=None):
- """Sample `goal_num_predict` tokens for partial prediction.
- About `mask_beta` tokens are chosen in a context of `mask_alpha` tokens."""
-
- seg_len = len(seg)
- mask = np.array([False] * seg_len, dtype=np.bool)
-
- num_predict = 0
-
- ngrams = np.arange(1, max_gram + 1, dtype=np.int64)
- pvals = 1. / np.arange(1, max_gram + 1)
- pvals /= pvals.sum(keepdims=True)
-
- if reverse:
- seg = np.flip(seg, 0)
-
- cur_len = 0
- while cur_len < seg_len:
- if goal_num_predict is not None and num_predict >= goal_num_predict: break
-
- n = np.random.choice(ngrams, p=pvals)
- if goal_num_predict is not None:
- n = min(n, goal_num_predict - num_predict)
- ctx_size = (n * FLAGS.mask_alpha) // FLAGS.mask_beta
- l_ctx = np.random.choice(ctx_size)
- r_ctx = ctx_size - l_ctx
-
- # Find the start position of a complete token
- beg = cur_len + l_ctx
- while beg < seg_len and not _is_start_piece(sp.IdToPiece(seg[beg].item())):
- beg += 1
- if beg >= seg_len:
- break
-
- # Find the end position of the n-gram (start pos of the n+1-th gram)
- end = beg + 1
- cnt_ngram = 1
- while end < seg_len:
- cnt_ngram += 1
- if cnt_ngram > n:
- break
- end += 1
- if end >= seg_len:
- break
-
- # Update
- mask[beg:end] = True
- num_predict += end - beg
-
- cur_len = end + r_ctx
-
- while goal_num_predict is not None and num_predict < goal_num_predict:
- i = np.random.randint(seg_len)
- if not mask[i]:
- mask[i] = True
- num_predict += 1
-
- if reverse:
- mask = np.flip(mask, 0)
-
- return mask
-
-
-def _sample_mask_ngram(sp, seg, reverse=False, max_gram=5,
- goal_num_predict=None):
- """Sample `goal_num_predict` tokens for partial prediction.
- About `mask_beta` tokens are chosen in a context of `mask_alpha` tokens."""
-
- seg_len = len(seg)
- mask = np.array([False] * seg_len, dtype=np.bool)
-
- num_predict = 0
-
- ngrams = np.arange(1, max_gram + 1, dtype=np.int64)
- pvals = 1. / np.arange(1, max_gram + 1)
- pvals /= pvals.sum(keepdims=True)
-
- if reverse:
- seg = np.flip(seg, 0)
-
- cur_len = 0
- while cur_len < seg_len:
- if goal_num_predict is not None and num_predict >= goal_num_predict: break
-
- n = np.random.choice(ngrams, p=pvals)
- if goal_num_predict is not None:
- n = min(n, goal_num_predict - num_predict)
- ctx_size = (n * FLAGS.mask_alpha) // FLAGS.mask_beta
- l_ctx = np.random.choice(ctx_size)
- r_ctx = ctx_size - l_ctx
-
- # Find the start position of a complete token
- beg = cur_len + l_ctx
- while beg < seg_len and not _is_start_piece(sp.IdToPiece(seg[beg].item())):
- beg += 1
- if beg >= seg_len:
- break
-
- # Find the end position of the n-gram (start pos of the n+1-th gram)
- end = beg
- cnt_ngram = 0
- while end < seg_len:
- if _is_start_piece(sp.IdToPiece(seg[end].item())):
- cnt_ngram += 1
- if cnt_ngram > n:
- break
-
- # select current piece
- mask[end] = True
-
- # update the end pointer and increment num_predict
- end += 1
- num_predict += 1
-
- if goal_num_predict is not None and num_predict >= goal_num_predict:
- break
-
- cur_len = end + r_ctx
-
- while goal_num_predict is not None and num_predict < goal_num_predict:
- i = np.random.randint(seg_len)
- if not mask[i]:
- mask[i] = True
- num_predict += 1
-
- if reverse:
- mask = np.flip(mask, 0)
-
- return mask
-
-
-def create_tfrecords(save_dir, basename, data, bsz_per_host, seq_len,
- bi_data, sp):
- data, sent_ids = data[0], data[1]
-
- num_core = FLAGS.num_core_per_host
- bsz_per_core = bsz_per_host // num_core
-
- if bi_data:
- assert bsz_per_host % (2 * FLAGS.num_core_per_host) == 0
- fwd_data, fwd_sent_ids = batchify(data, bsz_per_host // 2, sent_ids)
-
- fwd_data = fwd_data.reshape(num_core, 1, bsz_per_core // 2, -1)
- fwd_sent_ids = fwd_sent_ids.reshape(num_core, 1, bsz_per_core // 2, -1)
-
- bwd_data = fwd_data[:, :, :, ::-1]
- bwd_sent_ids = fwd_sent_ids[:, :, :, ::-1]
-
- data = np.concatenate(
- [fwd_data, bwd_data], 1).reshape(bsz_per_host, -1)
- sent_ids = np.concatenate(
- [fwd_sent_ids, bwd_sent_ids], 1).reshape(bsz_per_host, -1)
- else:
- data, sent_ids = batchify(data, bsz_per_host, sent_ids)
-
- tf.logging.info("Raw data shape %s.", data.shape)
-
- file_name = format_filename(
- prefix=basename,
- bsz_per_host=bsz_per_host,
- seq_len=seq_len,
- bi_data=bi_data,
- suffix="tfrecords",
- mask_alpha=FLAGS.mask_alpha,
- mask_beta=FLAGS.mask_beta,
- reuse_len=FLAGS.reuse_len,
- uncased=FLAGS.uncased,
- fixed_num_predict=FLAGS.num_predict
- )
- save_path = os.path.join(save_dir, file_name)
- record_writer = tf.python_io.TFRecordWriter(save_path)
- tf.logging.info("Start writing %s.", save_path)
-
- num_batch = 0
- reuse_len = FLAGS.reuse_len
-
- # [sep] x 2 + [cls]
- assert reuse_len < seq_len - 3
-
- data_len = data.shape[1]
- sep_array = np.array([SEP_ID], dtype=np.int64)
- cls_array = np.array([CLS_ID], dtype=np.int64)
-
- i = 0
- while i + seq_len <= data_len:
- if num_batch % 500 == 0:
- tf.logging.info("Processing batch %d", num_batch)
-
- all_ok = True
- features = []
- for idx in range(bsz_per_host):
- inp = data[idx, i: i + reuse_len]
- tgt = data[idx, i + 1: i + reuse_len + 1]
-
- results = _split_a_and_b(
- data[idx],
- sent_ids[idx],
- begin_idx=i + reuse_len,
- tot_len=seq_len - reuse_len - 3,
- extend_target=True)
- if results is None:
- tf.logging.info("Break out with seq idx %d", i)
- all_ok = False
- break
-
- # unpack the results
- (a_data, b_data, label, _, a_target, b_target) = tuple(results)
-
- # sample ngram spans to predict
- reverse = bi_data and (idx // (bsz_per_core // 2)) % 2 == 1
- if FLAGS.num_predict is None:
- num_predict_0 = num_predict_1 = None
- else:
- num_predict_1 = FLAGS.num_predict // 2
- num_predict_0 = FLAGS.num_predict - num_predict_1
- mask_0 = _sample_mask(sp, inp, reverse=reverse,
- goal_num_predict=num_predict_0)
- mask_1 = _sample_mask(sp, np.concatenate([a_data, sep_array, b_data,
- sep_array, cls_array]),
- reverse=reverse, goal_num_predict=num_predict_1)
-
- # concatenate data
- cat_data = np.concatenate([inp, a_data, sep_array, b_data,
- sep_array, cls_array])
- seg_id = ([0] * (reuse_len + a_data.shape[0]) + [0] +
- [1] * b_data.shape[0] + [1] + [2])
- assert cat_data.shape[0] == seq_len
- assert mask_0.shape[0] == seq_len // 2
- assert mask_1.shape[0] == seq_len // 2
-
- # the last two CLS's are not used, just for padding purposes
- tgt = np.concatenate([tgt, a_target, b_target, cls_array, cls_array])
- assert tgt.shape[0] == seq_len
-
- is_masked = np.concatenate([mask_0, mask_1], 0)
- if FLAGS.num_predict is not None:
- assert np.sum(is_masked) == FLAGS.num_predict
-
- feature = {
- "input": _int64_feature(cat_data),
- "is_masked": _int64_feature(is_masked),
- "target": _int64_feature(tgt),
- "seg_id": _int64_feature(seg_id),
- "label": _int64_feature([label]),
- }
- features.append(feature)
-
- if all_ok:
- assert len(features) == bsz_per_host
- for feature in features:
- example = tf.train.Example(features=tf.train.Features(feature=feature))
- record_writer.write(example.SerializeToString())
- num_batch += 1
- else:
- break
-
- i += reuse_len
-
- record_writer.close()
- tf.logging.info("Done writing %s. Num of batches: %d", save_path, num_batch)
-
- return save_path, num_batch
-
-
-################
-# get_input_fn #
-################
-def _convert_example(example, use_bfloat16):
- """Cast int64 into int32 and float32 to bfloat16 if use_bfloat16."""
- for key in list(example.keys()):
- val = example[key]
- if tf.keras.backend.is_sparse(val):
- val = tf.sparse.to_dense(val)
- if val.dtype == tf.int64:
- val = tf.cast(val, tf.int32)
- if use_bfloat16 and val.dtype == tf.float32:
- val = tf.cast(val, tf.bfloat16)
-
- example[key] = val
-
-
-def parse_files_to_dataset(parser, file_names, split, num_batch, num_hosts,
- host_id, num_core_per_host, bsz_per_core):
- # list of file pathes
- num_files = len(file_names)
- num_files_per_host = num_files // num_hosts
- my_start_file_id = host_id * num_files_per_host
- my_end_file_id = (host_id + 1) * num_files_per_host
- if host_id == num_hosts - 1:
- my_end_file_id = num_files
- file_paths = file_names[my_start_file_id: my_end_file_id]
- tf.logging.info("Host %d handles %d files", host_id, len(file_paths))
-
- assert split == "train"
- dataset = tf.data.Dataset.from_tensor_slices(file_paths)
-
- # file-level shuffle
- if len(file_paths) > 1:
- dataset = dataset.shuffle(len(file_paths))
-
- # Note: we cannot perform sample-level shuffle here because this will violate
- # the consecutive requirement of data stream.
- dataset = tf.data.TFRecordDataset(dataset)
-
- # Note: since we are doing online preprocessing, the parsed result of
- # the same input at each time will be different. Thus, cache processed data
- # is not helpful. It will use a lot of memory and lead to contrainer OOM.
- # So, change to cache non-parsed raw data instead.
- dataset = dataset.cache().map(parser).repeat()
- dataset = dataset.batch(bsz_per_core, drop_remainder=True)
- dataset = dataset.prefetch(num_core_per_host * bsz_per_core)
-
- return dataset
-
-
-def _local_perm(inputs, targets, is_masked, perm_size, seq_len):
- """
- Sample a permutation of the factorization order, and create an
- attention mask accordingly.
-
- Args:
- inputs: int64 Tensor in shape [seq_len], input ids.
- targets: int64 Tensor in shape [seq_len], target ids.
- is_masked: bool Tensor in shape [seq_len]. True means being selected
- for partial prediction.
- perm_size: the length of longest permutation. Could be set to be reuse_len.
- Should not be larger than reuse_len or there will be data leaks.
- seq_len: int, sequence length.
- """
-
- # Generate permutation indices
- index = tf.range(seq_len, dtype=tf.int64)
- index = tf.transpose(tf.reshape(index, [-1, perm_size]))
- index = tf.random_shuffle(index)
- index = tf.reshape(tf.transpose(index), [-1])
-
- # `perm_mask` and `target_mask`
- # non-functional tokens
- non_func_tokens = tf.logical_not(tf.logical_or(
- tf.equal(inputs, SEP_ID),
- tf.equal(inputs, CLS_ID)))
-
- non_mask_tokens = tf.logical_and(tf.logical_not(is_masked), non_func_tokens)
- masked_or_func_tokens = tf.logical_not(non_mask_tokens)
-
- # Set the permutation indices of non-masked (& non-funcional) tokens to the
- # smallest index (-1):
- # (1) they can be seen by all other positions
- # (2) they cannot see masked positions, so there won"t be information leak
- smallest_index = -tf.ones([seq_len], dtype=tf.int64)
- rev_index = tf.where(non_mask_tokens, smallest_index, index)
-
- # Create `target_mask`: non-funcional and maksed tokens
- # 1: use mask as input and have loss
- # 0: use token (or [SEP], [CLS]) as input and do not have loss
- target_tokens = tf.logical_and(masked_or_func_tokens, non_func_tokens)
- target_mask = tf.cast(target_tokens, tf.float32)
-
- # Create `perm_mask`
- # `target_tokens` cannot see themselves
- self_rev_index = tf.where(target_tokens, rev_index, rev_index + 1)
-
- # 1: cannot attend if i <= j and j is not non-masked (masked_or_func_tokens)
- # 0: can attend if i > j or j is non-masked
- perm_mask = tf.logical_and(
- self_rev_index[:, None] <= rev_index[None, :],
- masked_or_func_tokens)
- perm_mask = tf.cast(perm_mask, tf.float32)
-
- # new target: [next token] for LM and [curr token] (self) for PLM
- new_targets = tf.concat([inputs[0: 1], targets[: -1]],
- axis=0)
-
- # construct inputs_k
- inputs_k = inputs
-
- # construct inputs_q
- inputs_q = target_mask
-
- return perm_mask, new_targets, target_mask, inputs_k, inputs_q
-
-
-def get_dataset(params, num_hosts, num_core_per_host, split, file_names,
- num_batch, seq_len, reuse_len, perm_size, mask_alpha,
- mask_beta, use_bfloat16=False, num_predict=None):
-
- bsz_per_core = params["batch_size"]
- if num_hosts > 1:
- host_id = params["context"].current_host
- else:
- host_id = 0
-
- #### Function used to parse tfrecord
- def parser(record):
- """function used to parse tfrecord."""
-
- record_spec = {
- "input": tf.FixedLenFeature([seq_len], tf.int64),
- "target": tf.FixedLenFeature([seq_len], tf.int64),
- "seg_id": tf.FixedLenFeature([seq_len], tf.int64),
- "label": tf.FixedLenFeature([1], tf.int64),
- "is_masked": tf.FixedLenFeature([seq_len], tf.int64),
- }
-
- # retrieve serialized example
- example = tf.parse_single_example(
- serialized=record,
- features=record_spec)
-
- inputs = example.pop("input")
- target = example.pop("target")
- is_masked = tf.cast(example.pop("is_masked"), tf.bool)
-
- non_reuse_len = seq_len - reuse_len
- assert perm_size <= reuse_len and perm_size <= non_reuse_len
-
- perm_mask_0, target_0, target_mask_0, input_k_0, input_q_0 = _local_perm(
- inputs[:reuse_len],
- target[:reuse_len],
- is_masked[:reuse_len],
- perm_size,
- reuse_len)
-
- perm_mask_1, target_1, target_mask_1, input_k_1, input_q_1 = _local_perm(
- inputs[reuse_len:],
- target[reuse_len:],
- is_masked[reuse_len:],
- perm_size,
- non_reuse_len)
-
- perm_mask_0 = tf.concat([perm_mask_0, tf.ones([reuse_len, non_reuse_len])],
- axis=1)
- perm_mask_1 = tf.concat([tf.zeros([non_reuse_len, reuse_len]), perm_mask_1],
- axis=1)
- perm_mask = tf.concat([perm_mask_0, perm_mask_1], axis=0)
- target = tf.concat([target_0, target_1], axis=0)
- target_mask = tf.concat([target_mask_0, target_mask_1], axis=0)
- input_k = tf.concat([input_k_0, input_k_1], axis=0)
- input_q = tf.concat([input_q_0, input_q_1], axis=0)
-
- if num_predict is not None:
- indices = tf.range(seq_len, dtype=tf.int64)
- bool_target_mask = tf.cast(target_mask, tf.bool)
- indices = tf.boolean_mask(indices, bool_target_mask)
-
- ##### extra padding due to CLS/SEP introduced after prepro
- actual_num_predict = tf.shape(indices)[0]
- pad_len = num_predict - actual_num_predict
-
- ##### target_mapping
- target_mapping = tf.one_hot(indices, seq_len, dtype=tf.float32)
- paddings = tf.zeros([pad_len, seq_len], dtype=target_mapping.dtype)
- target_mapping = tf.concat([target_mapping, paddings], axis=0)
- example["target_mapping"] = tf.reshape(target_mapping,
- [num_predict, seq_len])
-
- ##### target
- target = tf.boolean_mask(target, bool_target_mask)
- paddings = tf.zeros([pad_len], dtype=target.dtype)
- target = tf.concat([target, paddings], axis=0)
- example["target"] = tf.reshape(target, [num_predict])
-
- ##### target mask
- target_mask = tf.concat(
- [tf.ones([actual_num_predict], dtype=tf.float32),
- tf.zeros([pad_len], dtype=tf.float32)],
- axis=0)
- example["target_mask"] = tf.reshape(target_mask, [num_predict])
- else:
- example["target"] = tf.reshape(target, [seq_len])
- example["target_mask"] = tf.reshape(target_mask, [seq_len])
-
- # reshape back to fixed shape
- example["perm_mask"] = tf.reshape(perm_mask, [seq_len, seq_len])
- example["input_k"] = tf.reshape(input_k, [seq_len])
- example["input_q"] = tf.reshape(input_q, [seq_len])
-
- _convert_example(example, use_bfloat16)
-
- for k, v in example.items():
- tf.logging.info("%s: %s", k, v)
-
- return example
-
- # Get dataset
- dataset = parse_files_to_dataset(
- parser=parser,
- file_names=file_names,
- split=split,
- num_batch=num_batch,
- num_hosts=num_hosts,
- host_id=host_id,
- num_core_per_host=num_core_per_host,
- bsz_per_core=bsz_per_core)
-
- return dataset
-
-
-def get_input_fn(
- tfrecord_dir,
- split,
- bsz_per_host,
- seq_len,
- reuse_len,
- bi_data,
- num_hosts=1,
- num_core_per_host=1,
- perm_size=None,
- mask_alpha=None,
- mask_beta=None,
- uncased=False,
- num_passes=None,
- use_bfloat16=False,
- num_predict=None):
-
- # Merge all record infos into a single one
- record_glob_base = format_filename(
- prefix="record_info-{}-*".format(split),
- bsz_per_host=bsz_per_host,
- seq_len=seq_len,
- bi_data=bi_data,
- suffix="json",
- mask_alpha=mask_alpha,
- mask_beta=mask_beta,
- reuse_len=reuse_len,
- uncased=uncased,
- fixed_num_predict=num_predict)
-
- record_info = {"num_batch": 0, "filenames": []}
-
- tfrecord_dirs = tfrecord_dir.split(",")
- tf.logging.info("Use the following tfrecord dirs: %s", tfrecord_dirs)
-
- for idx, record_dir in enumerate(tfrecord_dirs):
- record_glob = os.path.join(record_dir, record_glob_base)
- tf.logging.info("[%d] Record glob: %s", idx, record_glob)
-
- record_paths = sorted(tf.gfile.Glob(record_glob))
- tf.logging.info("[%d] Num of record info path: %d",
- idx, len(record_paths))
-
- cur_record_info = {"num_batch": 0, "filenames": []}
-
- for record_info_path in record_paths:
- if num_passes is not None:
- record_info_name = os.path.basename(record_info_path)
- fields = record_info_name.split(".")[0].split("-")
- pass_id = int(fields[-1])
- if len(fields) == 5 and pass_id >= num_passes:
- tf.logging.info("Skip pass %d: %s", pass_id, record_info_name)
- continue
-
- with tf.gfile.Open(record_info_path, "r") as fp:
- info = json.load(fp)
- if num_passes is not None:
- eff_num_passes = min(num_passes, len(info["filenames"]))
- ratio = eff_num_passes / len(info["filenames"])
- cur_record_info["num_batch"] += int(info["num_batch"] * ratio)
- cur_record_info["filenames"] += info["filenames"][:eff_num_passes]
- else:
- cur_record_info["num_batch"] += info["num_batch"]
- cur_record_info["filenames"] += info["filenames"]
-
- # overwrite directory for `cur_record_info`
- new_filenames = []
- for filename in cur_record_info["filenames"]:
- basename = os.path.basename(filename)
- new_filename = os.path.join(record_dir, basename)
- new_filenames.append(new_filename)
- cur_record_info["filenames"] = new_filenames
-
- tf.logging.info("[Dir %d] Number of chosen batches: %s",
- idx, cur_record_info["num_batch"])
- tf.logging.info("[Dir %d] Number of chosen files: %s",
- idx, len(cur_record_info["filenames"]))
- tf.logging.info(cur_record_info["filenames"])
-
- # add `cur_record_info` to global `record_info`
- record_info["num_batch"] += cur_record_info["num_batch"]
- record_info["filenames"] += cur_record_info["filenames"]
-
- tf.logging.info("Total number of batches: %d",
- record_info["num_batch"])
- tf.logging.info("Total number of files: %d",
- len(record_info["filenames"]))
- tf.logging.info(record_info["filenames"])
-
- def input_fn(params):
- """docs."""
- assert params["batch_size"] * num_core_per_host == bsz_per_host
-
- dataset = get_dataset(
- params=params,
- num_hosts=num_hosts,
- num_core_per_host=num_core_per_host,
- split=split,
- file_names=record_info["filenames"],
- num_batch=record_info["num_batch"],
- seq_len=seq_len,
- reuse_len=reuse_len,
- perm_size=perm_size,
- mask_alpha=mask_alpha,
- mask_beta=mask_beta,
- use_bfloat16=use_bfloat16,
- num_predict=num_predict)
-
- return dataset
-
- return input_fn, record_info
-
-
-if __name__ == "__main__":
- FLAGS = flags.FLAGS
- flags.DEFINE_bool("use_tpu", True, help="whether to use TPUs")
- flags.DEFINE_integer("bsz_per_host", 32, help="batch size per host.")
- flags.DEFINE_integer("num_core_per_host", 8, help="num TPU cores per host.")
-
- flags.DEFINE_integer("seq_len", 512,
- help="Sequence length.")
- flags.DEFINE_integer("reuse_len", 256,
- help="Number of token that can be reused as memory. "
- "Could be half of `seq_len`.")
- flags.DEFINE_bool("uncased", False, help="Use uncased inputs or not.")
- flags.DEFINE_bool("bi_data", True,
- help="whether to create bidirectional data")
- flags.DEFINE_integer("mask_alpha", default=6,
- help="How many tokens to form a group.")
- flags.DEFINE_integer("mask_beta", default=1,
- help="How many tokens to mask within each group.")
- flags.DEFINE_bool("use_eod", True,
- help="whether to append EOD at the end of a doc.")
- flags.DEFINE_bool("from_raw_text", True,
- help="Whether the input is raw text or encoded ids.")
- flags.DEFINE_integer("num_predict", default=85,
- help="Num of tokens to predict.")
-
- flags.DEFINE_string("input_glob", "data/example/*.txt",
- help="Input file glob.")
- flags.DEFINE_string("sp_path", "", help="Path to the sentence piece model.")
- flags.DEFINE_string("save_dir", "proc_data/example",
- help="Directory for saving the processed data.")
- flags.DEFINE_enum("split", "train", ["train", "dev", "test"],
- help="Save the data as which split.")
-
- flags.DEFINE_integer("pass_id", 0, help="ID of the current pass."
- "Different passes sample different negative segment.")
- flags.DEFINE_integer("num_task", 1, help="Number of total tasks.")
- flags.DEFINE_integer("task", 0, help="The Task ID. This value is used when "
- "using multiple workers to identify each worker.")
-
- tf.logging.set_verbosity(tf.logging.INFO)
- app.run(create_data)
diff --git a/spaces/NCTCMumbai/NCTC/models/research/adv_imagenet_models/inception_resnet_v2.py b/spaces/NCTCMumbai/NCTC/models/research/adv_imagenet_models/inception_resnet_v2.py
deleted file mode 100644
index 2f690e8d2f70ecde9a55f40375a7f74cd25651c7..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/research/adv_imagenet_models/inception_resnet_v2.py
+++ /dev/null
@@ -1,358 +0,0 @@
-# Copyright 2017 The TensorFlow Authors All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-
-"""Contains the definition of the Inception Resnet V2 architecture.
-
-As described in http://arxiv.org/abs/1602.07261.
-
- Inception-v4, Inception-ResNet and the Impact of Residual Connections
- on Learning
- Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
-"""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-
-import tensorflow as tf
-
-slim = tf.contrib.slim
-
-
-def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
- """Builds the 35x35 resnet block."""
- with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
- with tf.variable_scope('Branch_0'):
- tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
- with tf.variable_scope('Branch_1'):
- tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
- tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
- with tf.variable_scope('Branch_2'):
- tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
- tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
- tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
- mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2])
- up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
- activation_fn=None, scope='Conv2d_1x1')
- net += scale * up
- if activation_fn:
- net = activation_fn(net)
- return net
-
-
-def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
- """Builds the 17x17 resnet block."""
- with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
- with tf.variable_scope('Branch_0'):
- tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
- with tf.variable_scope('Branch_1'):
- tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
- tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
- scope='Conv2d_0b_1x7')
- tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
- scope='Conv2d_0c_7x1')
- mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
- up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
- activation_fn=None, scope='Conv2d_1x1')
- net += scale * up
- if activation_fn:
- net = activation_fn(net)
- return net
-
-
-def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
- """Builds the 8x8 resnet block."""
- with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
- with tf.variable_scope('Branch_0'):
- tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
- with tf.variable_scope('Branch_1'):
- tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
- tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
- scope='Conv2d_0b_1x3')
- tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
- scope='Conv2d_0c_3x1')
- mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
- up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
- activation_fn=None, scope='Conv2d_1x1')
- net += scale * up
- if activation_fn:
- net = activation_fn(net)
- return net
-
-
-def inception_resnet_v2_base(inputs,
- final_endpoint='Conv2d_7b_1x1',
- output_stride=16,
- align_feature_maps=False,
- scope=None):
- """Inception model from http://arxiv.org/abs/1602.07261.
-
- Constructs an Inception Resnet v2 network from inputs to the given final
- endpoint. This method can construct the network up to the final inception
- block Conv2d_7b_1x1.
-
- Args:
- inputs: a tensor of size [batch_size, height, width, channels].
- final_endpoint: specifies the endpoint to construct the network up to. It
- can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
- 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
- 'Mixed_5b', 'Mixed_6a', 'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1']
- output_stride: A scalar that specifies the requested ratio of input to
- output spatial resolution. Only supports 8 and 16.
- align_feature_maps: When true, changes all the VALID paddings in the network
- to SAME padding so that the feature maps are aligned.
- scope: Optional variable_scope.
-
- Returns:
- tensor_out: output tensor corresponding to the final_endpoint.
- end_points: a set of activations for external use, for example summaries or
- losses.
-
- Raises:
- ValueError: if final_endpoint is not set to one of the predefined values,
- or if the output_stride is not 8 or 16, or if the output_stride is 8 and
- we request an end point after 'PreAuxLogits'.
- """
- if output_stride != 8 and output_stride != 16:
- raise ValueError('output_stride must be 8 or 16.')
-
- padding = 'SAME' if align_feature_maps else 'VALID'
-
- end_points = {}
-
- def add_and_check_final(name, net):
- end_points[name] = net
- return name == final_endpoint
-
- with tf.variable_scope(scope, 'InceptionResnetV2', [inputs]):
- with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
- stride=1, padding='SAME'):
- # 149 x 149 x 32
- net = slim.conv2d(inputs, 32, 3, stride=2, padding=padding,
- scope='Conv2d_1a_3x3')
- if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points
-
- # 147 x 147 x 32
- net = slim.conv2d(net, 32, 3, padding=padding,
- scope='Conv2d_2a_3x3')
- if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points
- # 147 x 147 x 64
- net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
- if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points
- # 73 x 73 x 64
- net = slim.max_pool2d(net, 3, stride=2, padding=padding,
- scope='MaxPool_3a_3x3')
- if add_and_check_final('MaxPool_3a_3x3', net): return net, end_points
- # 73 x 73 x 80
- net = slim.conv2d(net, 80, 1, padding=padding,
- scope='Conv2d_3b_1x1')
- if add_and_check_final('Conv2d_3b_1x1', net): return net, end_points
- # 71 x 71 x 192
- net = slim.conv2d(net, 192, 3, padding=padding,
- scope='Conv2d_4a_3x3')
- if add_and_check_final('Conv2d_4a_3x3', net): return net, end_points
- # 35 x 35 x 192
- net = slim.max_pool2d(net, 3, stride=2, padding=padding,
- scope='MaxPool_5a_3x3')
- if add_and_check_final('MaxPool_5a_3x3', net): return net, end_points
-
- # 35 x 35 x 320
- with tf.variable_scope('Mixed_5b'):
- with tf.variable_scope('Branch_0'):
- tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1')
- with tf.variable_scope('Branch_1'):
- tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')
- tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,
- scope='Conv2d_0b_5x5')
- with tf.variable_scope('Branch_2'):
- tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')
- tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,
- scope='Conv2d_0b_3x3')
- tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,
- scope='Conv2d_0c_3x3')
- with tf.variable_scope('Branch_3'):
- tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME',
- scope='AvgPool_0a_3x3')
- tower_pool_1 = slim.conv2d(tower_pool, 64, 1,
- scope='Conv2d_0b_1x1')
- net = tf.concat(
- [tower_conv, tower_conv1_1, tower_conv2_2, tower_pool_1], 3)
-
- if add_and_check_final('Mixed_5b', net): return net, end_points
- # TODO(alemi): Register intermediate endpoints
- net = slim.repeat(net, 10, block35, scale=0.17)
-
- # 17 x 17 x 1088 if output_stride == 8,
- # 33 x 33 x 1088 if output_stride == 16
- use_atrous = output_stride == 8
-
- with tf.variable_scope('Mixed_6a'):
- with tf.variable_scope('Branch_0'):
- tower_conv = slim.conv2d(net, 384, 3, stride=1 if use_atrous else 2,
- padding=padding,
- scope='Conv2d_1a_3x3')
- with tf.variable_scope('Branch_1'):
- tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
- tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3,
- scope='Conv2d_0b_3x3')
- tower_conv1_2 = slim.conv2d(tower_conv1_1, 384, 3,
- stride=1 if use_atrous else 2,
- padding=padding,
- scope='Conv2d_1a_3x3')
- with tf.variable_scope('Branch_2'):
- tower_pool = slim.max_pool2d(net, 3, stride=1 if use_atrous else 2,
- padding=padding,
- scope='MaxPool_1a_3x3')
- net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3)
-
- if add_and_check_final('Mixed_6a', net): return net, end_points
-
- # TODO(alemi): register intermediate endpoints
- with slim.arg_scope([slim.conv2d], rate=2 if use_atrous else 1):
- net = slim.repeat(net, 20, block17, scale=0.10)
- if add_and_check_final('PreAuxLogits', net): return net, end_points
-
- if output_stride == 8:
- # TODO(gpapan): Properly support output_stride for the rest of the net.
- raise ValueError('output_stride==8 is only supported up to the '
- 'PreAuxlogits end_point for now.')
-
- # 8 x 8 x 2080
- with tf.variable_scope('Mixed_7a'):
- with tf.variable_scope('Branch_0'):
- tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
- tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
- padding=padding,
- scope='Conv2d_1a_3x3')
- with tf.variable_scope('Branch_1'):
- tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
- tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2,
- padding=padding,
- scope='Conv2d_1a_3x3')
- with tf.variable_scope('Branch_2'):
- tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
- tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
- scope='Conv2d_0b_3x3')
- tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2,
- padding=padding,
- scope='Conv2d_1a_3x3')
- with tf.variable_scope('Branch_3'):
- tower_pool = slim.max_pool2d(net, 3, stride=2,
- padding=padding,
- scope='MaxPool_1a_3x3')
- net = tf.concat(
- [tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3)
-
- if add_and_check_final('Mixed_7a', net): return net, end_points
-
- # TODO(alemi): register intermediate endpoints
- net = slim.repeat(net, 9, block8, scale=0.20)
- net = block8(net, activation_fn=None)
-
- # 8 x 8 x 1536
- net = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1')
- if add_and_check_final('Conv2d_7b_1x1', net): return net, end_points
-
- raise ValueError('final_endpoint (%s) not recognized', final_endpoint)
-
-
-def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
- dropout_keep_prob=0.8,
- reuse=None,
- scope='InceptionResnetV2',
- create_aux_logits=True):
- """Creates the Inception Resnet V2 model.
-
- Args:
- inputs: a 4-D tensor of size [batch_size, height, width, 3].
- num_classes: number of predicted classes.
- is_training: whether is training or not.
- dropout_keep_prob: float, the fraction to keep before final layer.
- reuse: whether or not the network and its variables should be reused. To be
- able to reuse 'scope' must be given.
- scope: Optional variable_scope.
- create_aux_logits: Whether to include the auxilliary logits.
-
- Returns:
- logits: the logits outputs of the model.
- end_points: the set of end_points from the inception model.
- """
- end_points = {}
-
- with tf.variable_scope(scope, 'InceptionResnetV2', [inputs, num_classes],
- reuse=reuse) as scope:
- with slim.arg_scope([slim.batch_norm, slim.dropout],
- is_training=is_training):
-
- net, end_points = inception_resnet_v2_base(inputs, scope=scope)
-
- if create_aux_logits:
- with tf.variable_scope('AuxLogits'):
- aux = end_points['PreAuxLogits']
- aux = slim.avg_pool2d(aux, 5, stride=3, padding='VALID',
- scope='Conv2d_1a_3x3')
- aux = slim.conv2d(aux, 128, 1, scope='Conv2d_1b_1x1')
- aux = slim.conv2d(aux, 768, aux.get_shape()[1:3],
- padding='VALID', scope='Conv2d_2a_5x5')
- aux = slim.flatten(aux)
- aux = slim.fully_connected(aux, num_classes, activation_fn=None,
- scope='Logits')
- end_points['AuxLogits'] = aux
-
- with tf.variable_scope('Logits'):
- net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
- scope='AvgPool_1a_8x8')
- net = slim.flatten(net)
-
- net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
- scope='Dropout')
-
- end_points['PreLogitsFlatten'] = net
- logits = slim.fully_connected(net, num_classes, activation_fn=None,
- scope='Logits')
- end_points['Logits'] = logits
- end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
-
- return logits, end_points
-inception_resnet_v2.default_image_size = 299
-
-
-def inception_resnet_v2_arg_scope(weight_decay=0.00004,
- batch_norm_decay=0.9997,
- batch_norm_epsilon=0.001):
- """Returns the scope with the default parameters for inception_resnet_v2.
-
- Args:
- weight_decay: the weight decay for weights variables.
- batch_norm_decay: decay for the moving average of batch_norm momentums.
- batch_norm_epsilon: small float added to variance to avoid dividing by zero.
-
- Returns:
- a arg_scope with the parameters needed for inception_resnet_v2.
- """
- # Set weight_decay for weights in conv2d and fully_connected layers.
- with slim.arg_scope([slim.conv2d, slim.fully_connected],
- weights_regularizer=slim.l2_regularizer(weight_decay),
- biases_regularizer=slim.l2_regularizer(weight_decay)):
-
- batch_norm_params = {
- 'decay': batch_norm_decay,
- 'epsilon': batch_norm_epsilon,
- }
- # Set activation_fn and parameters for batch_norm.
- with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu,
- normalizer_fn=slim.batch_norm,
- normalizer_params=batch_norm_params) as scope:
- return scope
diff --git a/spaces/NCTCMumbai/NCTC/models/research/attention_ocr/python/utils.py b/spaces/NCTCMumbai/NCTC/models/research/attention_ocr/python/utils.py
deleted file mode 100644
index 10d93ad21e1444736bf4562ef0df1c939617a5c1..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/research/attention_ocr/python/utils.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright 2017 The TensorFlow Authors All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-
-"""Functions to support building models for StreetView text transcription."""
-
-import tensorflow as tf
-from tensorflow.contrib import slim
-
-
-def logits_to_log_prob(logits):
- """Computes log probabilities using numerically stable trick.
-
- This uses two numerical stability tricks:
- 1) softmax(x) = softmax(x - c) where c is a constant applied to all
- arguments. If we set c = max(x) then the softmax is more numerically
- stable.
- 2) log softmax(x) is not numerically stable, but we can stabilize it
- by using the identity log softmax(x) = x - log sum exp(x)
-
- Args:
- logits: Tensor of arbitrary shape whose last dimension contains logits.
-
- Returns:
- A tensor of the same shape as the input, but with corresponding log
- probabilities.
- """
-
- with tf.variable_scope('log_probabilities'):
- reduction_indices = len(logits.shape.as_list()) - 1
- max_logits = tf.reduce_max(
- logits, reduction_indices=reduction_indices, keep_dims=True)
- safe_logits = tf.subtract(logits, max_logits)
- sum_exp = tf.reduce_sum(
- tf.exp(safe_logits),
- reduction_indices=reduction_indices,
- keep_dims=True)
- log_probs = tf.subtract(safe_logits, tf.log(sum_exp))
- return log_probs
-
-
-def variables_to_restore(scope=None, strip_scope=False):
- """Returns a list of variables to restore for the specified list of methods.
-
- It is supposed that variable name starts with the method's scope (a prefix
- returned by _method_scope function).
-
- Args:
- methods_names: a list of names of configurable methods.
- strip_scope: if True will return variable names without method's scope.
- If methods_names is None will return names unchanged.
- model_scope: a scope for a whole model.
-
- Returns:
- a dictionary mapping variable names to variables for restore.
- """
- if scope:
- variable_map = {}
- method_variables = slim.get_variables_to_restore(include=[scope])
- for var in method_variables:
- if strip_scope:
- var_name = var.op.name[len(scope) + 1:]
- else:
- var_name = var.op.name
- variable_map[var_name] = var
-
- return variable_map
- else:
- return {v.op.name: v for v in slim.get_variables_to_restore()}
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/scripts/mean_pool.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/scripts/mean_pool.py
deleted file mode 100644
index 4eea048ef3455cb3c897e74c18778c78fdc9fcbf..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/scripts/mean_pool.py
+++ /dev/null
@@ -1,99 +0,0 @@
-#!/usr/bin/env python3 -u
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import argparse
-import os
-import os.path as osp
-import math
-import numpy as np
-import tqdm
-import torch
-import torch.nn.functional as F
-from shutil import copyfile
-
-from npy_append_array import NpyAppendArray
-
-
-def get_parser():
- parser = argparse.ArgumentParser(
- description="mean pools representations by compressing uniform splits of the data"
- )
- # fmt: off
- parser.add_argument('source', help='directory with features')
- parser.add_argument('--split', help='which split to read', required=True)
- parser.add_argument('--save-dir', help='where to save the output', required=True)
- parser.add_argument('--subsample-rate', type=float, default=0.5, help='size to subsample data to')
-
- parser.add_argument('--remove-extra', action='store_true', help='if true, removes extra states that cant be pooled, otherwise pads with 0s')
- # fmt: on
-
- return parser
-
-
-def main():
- parser = get_parser()
- args = parser.parse_args()
-
- source_path = osp.join(args.source, args.split)
-
- print(f"data path: {source_path}")
-
- features = np.load(source_path + ".npy", mmap_mode="r")
-
- os.makedirs(args.save_dir, exist_ok=True)
- save_path = osp.join(args.save_dir, args.split)
-
- copyfile(source_path + ".tsv", save_path + ".tsv")
-
- if os.path.exists(source_path + ".phn"):
- copyfile(source_path + ".phn", save_path + ".phn")
- if os.path.exists(source_path + ".wrd"):
- copyfile(source_path + ".wrd", save_path + ".wrd")
-
- if os.path.exists(osp.join(args.source, "dict.phn.txt")):
- copyfile(
- osp.join(args.source, "dict.phn.txt"),
- osp.join(args.save_dir, "dict.phn.txt"),
- )
-
- if osp.exists(save_path + ".npy"):
- os.remove(save_path + ".npy")
- npaa = NpyAppendArray(save_path + ".npy")
-
- with open(source_path + ".lengths", "r") as lf:
- lengths = lf.readlines()
-
- fsz = features.shape[-1]
- start = 0
- with torch.no_grad():
- with open(save_path + ".lengths", "w") as lengths_out:
- for length in tqdm.tqdm(lengths):
- length = int(length)
- end = start + length
- feats = features[start:end]
- start += length
- x = torch.from_numpy(feats).cuda()
- target_num = math.ceil(length * args.subsample_rate)
- rem = length % target_num
-
- if rem > 0:
- if args.remove_extra:
- to_rem = target_num - rem
- target_num -= 1
- x = x[:-to_rem]
- else:
- to_add = target_num - rem
- x = F.pad(x, [0, 0, 0, to_add])
- x[-to_add:] = x[-to_add - 1]
-
- x = x.view(target_num, -1, fsz)
- x = x.mean(dim=-2)
- print(target_num, file=lengths_out)
- npaa.append(x.cpu().numpy())
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/audio/multi_modality_dataset.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/audio/multi_modality_dataset.py
deleted file mode 100644
index 69d23d31c1eb66803fa5062b5991a7c34ab07dc7..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/audio/multi_modality_dataset.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# Copyright (c) 2021-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-
-import logging
-import math
-from typing import List, Optional, NamedTuple
-
-import numpy as np
-import torch
-from fairseq.data import (
- ConcatDataset,
- LanguagePairDataset,
- FileAudioDataset,
- data_utils,
-)
-from fairseq.data import FairseqDataset
-
-logger = logging.getLogger(__name__)
-
-
-class ModalityDatasetItem(NamedTuple):
- datasetname: str
- dataset: any
- max_positions: List[int]
- max_tokens: Optional[int] = None
- max_sentences: Optional[int] = None
-
-# MultiModalityDataset: it concate multiple datasets with different modalities.
-# Compared with ConcatDataset it can 1) sample data given the ratios for different datasets
-# 2) it adds mode to indicate what type of the data samples come from.
-# It will be used with GroupedEpochBatchIterator together to generate mini-batch with samples
-# from the same type of dataset
-# If only one dataset is used, it will perform like the original dataset with mode added
-class MultiModalityDataset(ConcatDataset):
- def __init__(self, datasets: List[ModalityDatasetItem]):
- id_to_mode = []
- dsets = []
- max_tokens = []
- max_sentences = []
- max_positions = []
- for dset in datasets:
- id_to_mode.append(dset.datasetname)
- dsets.append(dset.dataset)
- max_tokens.append(dset.max_tokens)
- max_positions.append(dset.max_positions)
- max_sentences.append(dset.max_sentences)
- weights = [1.0 for s in dsets]
- super().__init__(dsets, weights)
- self.max_tokens = max_tokens
- self.max_positions = max_positions
- self.max_sentences = max_sentences
- self.id_to_mode = id_to_mode
- self.raw_sub_batch_samplers = []
- self._cur_epoch = 0
-
- def set_epoch(self, epoch):
- super().set_epoch(epoch)
- self._cur_epoch = epoch
-
- def __getitem__(self, idx):
- dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
- sample = self.datasets[dataset_idx][sample_idx]
- return (dataset_idx, sample)
-
- def collater(self, samples):
- if len(samples) == 0:
- return {}
- dataset_idx = samples[0][0]
- # make sure all samples in samples are from same dataset
- assert sum([0 if dataset_idx == s[0] else 1 for s in samples]) == 0
- samples = self.datasets[dataset_idx].collater([x[1] for x in samples])
- # add mode
- samples["net_input"]["mode"] = self.id_to_mode[dataset_idx]
-
- return samples
-
- def size(self, index: int):
- if len(self.datasets) == 1:
- return self.datasets[0].size(index)
- return super().size(index)
-
- @property
- def sizes(self):
- if len(self.datasets) == 1:
- return self.datasets[0].sizes
- super().sizes
-
- def ordered_indices(self):
- """
- Returns indices sorted by length. So less padding is needed.
- """
- if len(self.datasets) == 1:
- return self.datasets[0].ordered_indices()
- indices_group = []
- for d_idx, ds in enumerate(self.datasets):
- sample_num = self.cumulative_sizes[d_idx]
- if d_idx > 0:
- sample_num = sample_num - self.cumulative_sizes[d_idx - 1]
- assert sample_num == len(ds)
- indices_group.append(ds.ordered_indices())
- return indices_group
-
- def get_raw_batch_samplers(self, required_batch_size_multiple, seed):
- if len(self.raw_sub_batch_samplers) > 0:
- logger.info(" raw_sub_batch_samplers exists. No action is taken")
- return
- with data_utils.numpy_seed(seed):
- indices = self.ordered_indices()
- for i, ds in enumerate(self.datasets):
- indices[i] = ds.filter_indices_by_size(
- indices[i],
- self.max_positions[i],
- )[0]
- sub_batch_sampler = ds.batch_by_size(
- indices[i],
- max_tokens=self.max_tokens[i],
- max_sentences=self.max_sentences[i],
- required_batch_size_multiple=required_batch_size_multiple,
- )
- self.raw_sub_batch_samplers.append(sub_batch_sampler)
-
- def get_batch_samplers(self, mult_ratios, required_batch_size_multiple, seed):
- self.get_raw_batch_samplers(required_batch_size_multiple, seed)
- batch_samplers = []
- for i, _ in enumerate(self.datasets):
- if i > 0:
- sub_batch_sampler = [
- [y + self.cumulative_sizes[i - 1] for y in x]
- for x in self.raw_sub_batch_samplers[i]
- ]
- else:
- sub_batch_sampler = list(self.raw_sub_batch_samplers[i])
- smp_r = mult_ratios[i]
- if smp_r != 1:
- is_increase = "increased" if smp_r > 1 else "decreased"
- logger.info(
- "number of batch for the dataset {} is {} from {} to {}".format(
- self.id_to_mode[i],
- is_increase,
- len(sub_batch_sampler),
- int(len(sub_batch_sampler) * smp_r),
- )
- )
- mul_samplers = []
- for _ in range(math.floor(smp_r)):
- mul_samplers = mul_samplers + sub_batch_sampler
- if math.floor(smp_r) != smp_r:
- with data_utils.numpy_seed(seed + self._cur_epoch):
- np.random.shuffle(sub_batch_sampler)
- smp_num = int(
- (smp_r - math.floor(smp_r)) * len(sub_batch_sampler)
- )
- mul_samplers = mul_samplers + sub_batch_sampler[:smp_num]
- sub_batch_sampler = mul_samplers
- else:
- logger.info(
- "dataset {} batch number is {} ".format(
- self.id_to_mode[i], len(sub_batch_sampler)
- )
- )
- batch_samplers.append(sub_batch_sampler)
-
- return batch_samplers
-
-
-class LangPairMaskDataset(FairseqDataset):
- def __init__(
- self,
- dataset: LanguagePairDataset,
- src_eos: int,
- src_bos: Optional[int] = None,
- noise_id: Optional[int] = -1,
- mask_ratio: Optional[float] = 0,
- mask_type: Optional[str] = "random",
- ):
- self.dataset = dataset
- self.src_eos = src_eos
- self.src_bos = src_bos
- self.noise_id = noise_id
- self.mask_ratio = mask_ratio
- self.mask_type = mask_type
- assert mask_type in ("random", "tail")
-
- @property
- def src_sizes(self):
- return self.dataset.src_sizes
-
- @property
- def tgt_sizes(self):
- return self.dataset.tgt_sizes
-
- @property
- def sizes(self):
- # dataset.sizes can be a dynamically computed sizes:
- return self.dataset.sizes
-
- def get_batch_shapes(self):
- return self.dataset.buckets
-
- def num_tokens_vec(self, indices):
- return self.dataset.num_tokens_vec(indices)
-
- def __len__(self):
- return len(self.dataset)
-
- def num_tokens(self, index):
- return self.dataset.num_tokens(index)
-
- def size(self, index):
- return self.dataset.size(index)
-
- def ordered_indices(self):
- return self.dataset.ordered_indices()
-
- @property
- def supports_prefetch(self):
- return getattr(self.dataset, "supports_prefetch", False)
-
- def prefetch(self, indices):
- return self.dataset.prefetch(indices)
-
- def mask_src_tokens(self, sample):
- src_item = sample["source"]
- mask = None
- if self.mask_type == "random":
- mask = torch.rand(len(src_item)).le(self.mask_ratio)
- else:
- mask = torch.ones(len(src_item))
- mask[: int(len(src_item) * (1 - self.mask_ratio))] = 0
- mask = mask.eq(1)
- if src_item[0] == self.src_bos:
- mask[0] = False
- if src_item[-1] == self.src_eos:
- mask[-1] = False
- mask_src_item = src_item.masked_fill(mask, self.noise_id)
- smp = {"id": sample["id"], "source": mask_src_item, "target": sample["target"]}
- return smp
-
- def __getitem__(self, index):
- sample = self.dataset[index]
- if self.mask_ratio > 0:
- sample = self.mask_src_tokens(sample)
- return sample
-
- def collater(self, samples, pad_to_length=None):
- return self.dataset.collater(samples, pad_to_length)
-
-
-class FileAudioDatasetWrapper(FileAudioDataset):
- def collater(self, samples):
- samples = super().collater(samples)
- if len(samples) == 0:
- return {}
- samples["net_input"]["src_tokens"] = samples["net_input"]["source"]
- samples["net_input"]["prev_output_tokens"] = None
- del samples["net_input"]["source"]
- samples["net_input"]["src_lengths"] = None
- samples["net_input"]["alignment"] = None
- return samples
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp
deleted file mode 100644
index 744c363e550231b8e0fbb94f998d46039daf5c00..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Copyright (c) Facebook, Inc. and its affiliates.
- *
- * This source code is licensed under the MIT license found in the
- * LICENSE file in the root directory of this source tree.
- */
-
-#include
-#include
-
-std::vector
-dynamicconv_cuda_forward(at::Tensor input, at::Tensor filters, int padding_l);
-
-std::vector dynamicconv_cuda_backward(
- at::Tensor gradOutput,
- int padding_l,
- at::Tensor input,
- at::Tensor filters);
-
-#define CHECK_CUDA(x) \
- AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
-#define CHECK_CONTIGUOUS(x) \
- AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
-#define CHECK_INPUT(x) \
- CHECK_CUDA(x); \
- CHECK_CONTIGUOUS(x)
-
-std::vector
-dynamicconv_forward(at::Tensor input, at::Tensor filters, int padding_l) {
- CHECK_INPUT(input);
- CHECK_INPUT(filters);
-
- return dynamicconv_cuda_forward(input, filters, padding_l);
-}
-
-std::vector dynamicconv_backward(
- at::Tensor gradOutput,
- int padding_l,
- at::Tensor input,
- at::Tensor filters) {
- CHECK_INPUT(gradOutput);
- CHECK_INPUT(input);
- CHECK_INPUT(filters);
-
- return dynamicconv_cuda_backward(gradOutput, padding_l, input, filters);
-}
-
-PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
- m.def("forward", &dynamicconv_forward, "dynamicconv forward (CUDA)");
- m.def("backward", &dynamicconv_backward, "dynamicconv backward (CUDA)");
-}
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/m2m_100/tokenizers/seg_ko.sh b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/m2m_100/tokenizers/seg_ko.sh
deleted file mode 100644
index c523d92634d9b61b97bbcdbfd17dfc33465bfc09..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/m2m_100/tokenizers/seg_ko.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-SCRIPT=`realpath $0`
-MECAB=`dirname $SCRIPT`/thirdparty/mecab-0.996-ko-0.9.2
-
-export PATH=$PATH:"$MECAB/bin":"$MECAB/lib"
-export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:"$MECAB/lib"
-
-cat - | mecab -O wakati
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/legacy/masked_lm_dataset.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/legacy/masked_lm_dataset.py
deleted file mode 100644
index dd8ea2c60aff306ab3a756223a298a28d41a4991..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/legacy/masked_lm_dataset.py
+++ /dev/null
@@ -1,303 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-from typing import Dict, List, Tuple
-
-import numpy as np
-import torch
-from fairseq.data import Dictionary, FairseqDataset, data_utils
-from fairseq.data.concat_dataset import ConcatDataset
-from fairseq.data.legacy.block_pair_dataset import BlockPairDataset
-from fairseq.data.token_block_dataset import TokenBlockDataset
-
-
-class MaskedLMDataset(FairseqDataset):
- """
- A wrapper Dataset for masked language modelling. The dataset
- wraps around TokenBlockDataset or BlockedPairDataset and creates a batch
- where the input blocks are masked according to the specified masking
- probability. Additionally the batch can also contain sentence level targets
- if this is specified.
-
- Args:
- dataset: Dataset which generates blocks of data. Only BlockPairDataset
- and TokenBlockDataset are supported.
- sizes: Sentence lengths
- vocab: Dictionary with the vocabulary and special tokens.
- pad_idx: Id of padding token in dictionary
- mask_idx: Id of mask token in dictionary
- classif_token_idx: Id of classification token in dictionary. This is the
- token associated with the sentence embedding (Eg: CLS for BERT)
- sep_token_idx: Id of separator token in dictionary
- (Eg: SEP in BERT)
- seed: Seed for random number generator for reproducibility.
- shuffle: Shuffle the elements before batching.
- has_pairs: Specifies whether the underlying dataset
- generates a pair of blocks along with a sentence_target or not.
- Setting it to True assumes that the underlying dataset generates a
- label for the pair of sentences which is surfaced as
- sentence_target. The default value assumes a single block with no
- sentence target.
- segment_id: An optional segment id for filling in the segment labels
- when we are in the single block setting (Eg: XLM). Default is 0.
- masking_ratio: specifies what percentage of the blocks should be masked.
- masking_prob: specifies the probability of a given token being
- replaced with the "MASK" token.
- random_token_prob: specifies the probability of a given token being
- replaced by a random token from the vocabulary.
- """
-
- def __init__(
- self,
- dataset: FairseqDataset,
- sizes: np.ndarray,
- vocab: Dictionary,
- pad_idx: int,
- mask_idx: int,
- classif_token_idx: int,
- sep_token_idx: int,
- seed: int = 1,
- shuffle: bool = True,
- has_pairs: bool = True,
- segment_id: int = 0,
- masking_ratio: float = 0.15,
- masking_prob: float = 0.8,
- random_token_prob: float = 0.1,
- ):
- # Make sure the input datasets are the ones supported
- assert (
- isinstance(dataset, TokenBlockDataset)
- or isinstance(dataset, BlockPairDataset)
- or isinstance(dataset, ConcatDataset)
- ), (
- "MaskedLMDataset only wraps TokenBlockDataset or BlockPairDataset or "
- "ConcatDataset"
- )
-
- self.dataset = dataset
- self.sizes = np.array(sizes)
- self.vocab = vocab
- self.pad_idx = pad_idx
- self.mask_idx = mask_idx
- self.classif_token_idx = classif_token_idx
- self.sep_token_idx = sep_token_idx
- self.shuffle = shuffle
- self.seed = seed
- self.has_pairs = has_pairs
- self.segment_id = segment_id
- self.masking_ratio = masking_ratio
- self.masking_prob = masking_prob
- self.random_token_prob = random_token_prob
-
- # If we have only one block then sizes needs to be updated to include
- # the classification token
- if not has_pairs:
- self.sizes = self.sizes + 1
-
- def __getitem__(self, index: int):
- # if has_pairs, then expect 2 blocks and a sentence target
- if self.has_pairs:
- (block_one, block_two, sentence_target) = self.dataset[index]
- else:
- block_one = self.dataset[index]
-
- return {
- "id": index,
- "block_one": block_one,
- "block_two": block_two if self.has_pairs else None,
- "sentence_target": sentence_target if self.has_pairs else None,
- }
-
- def __len__(self):
- return len(self.dataset)
-
- def _mask_block(
- self,
- sentence: np.ndarray,
- mask_idx: int,
- pad_idx: int,
- dictionary_token_range: Tuple,
- ):
- """
- Mask tokens for Masked Language Model training
- Samples mask_ratio tokens that will be predicted by LM.
-
- Note:This function may not be efficient enough since we had multiple
- conversions between np and torch, we can replace them with torch
- operators later.
-
- Args:
- sentence: 1d tensor to be masked
- mask_idx: index to use for masking the sentence
- pad_idx: index to use for masking the target for tokens we aren't
- predicting
- dictionary_token_range: range of indices in dictionary which can
- be used for random word replacement
- (e.g. without special characters)
- Return:
- masked_sent: masked sentence
- target: target with words which we are not predicting replaced
- by pad_idx
- """
- masked_sent = np.copy(sentence)
- sent_length = len(sentence)
- mask_num = math.ceil(sent_length * self.masking_ratio)
- mask = np.random.choice(sent_length, mask_num, replace=False)
- target = np.copy(sentence)
-
- for i in range(sent_length):
- if i in mask:
- rand = np.random.random()
-
- # replace with mask if probability is less than masking_prob
- # (Eg: 0.8)
- if rand < self.masking_prob:
- masked_sent[i] = mask_idx
-
- # replace with random token if probability is less than
- # masking_prob + random_token_prob (Eg: 0.9)
- elif rand < (self.masking_prob + self.random_token_prob):
- # sample random token from dictionary
- masked_sent[i] = np.random.randint(
- dictionary_token_range[0], dictionary_token_range[1]
- )
- else:
- target[i] = pad_idx
-
- return masked_sent, target
-
- def _collate(self, samples: List[Dict], pad_idx: int, eos_idx: int):
- """
- Does the heavy lifting for creating a batch from the input list of
- examples. The logic is as follows:
- 1. Mask the input blocks. In case has_pair is True then we have 2
- blocks to mask.
- 2. Prepend the first masked block tensor with the special token
- used as sentence embedding. Eg: CLS in BERT. This happens
- irrespective of the value of has_pair.
- 3. If has_pair is True, then append the first masked block with the
- special separator token (eg: SEP for BERT) and compute segment
- label accordingly. In this case, also append the second masked
- block with this special separator token and compute its segment
- label.
- 4. For the targets tensor, prepend and append with padding index
- accordingly.
- 5. Concatenate all tensors.
- """
- if len(samples) == 0:
- return {}
- # To ensure determinism, we reset the state of the PRNG after every
- # batch based on the seed and the first id of the batch. This ensures
- # that across epochs we get the same mask for the same example. This
- # is needed for reproducibility and is how BERT does masking
- # TODO: Can we add deteminism without this constraint?
- with data_utils.numpy_seed(self.seed + samples[0]["id"]):
- for s in samples:
-
- # token range is needed for replacing with random token during
- # masking
- token_range = (self.vocab.nspecial, len(self.vocab))
-
- # mask according to specified probabilities.
- masked_blk_one, masked_tgt_one = self._mask_block(
- s["block_one"],
- self.mask_idx,
- self.pad_idx,
- token_range,
- )
-
- tokens = np.concatenate([[self.classif_token_idx], masked_blk_one])
- targets = np.concatenate([[self.pad_idx], masked_tgt_one])
- segments = np.ones(len(tokens)) * self.segment_id
-
- # if has_pairs is True then we need to add the SEP token to both
- # the blocks after masking and re-compute segments based on the new
- # lengths.
- if self.has_pairs:
- tokens_one = np.concatenate([tokens, [self.sep_token_idx]])
- targets_one = np.concatenate([targets, [self.pad_idx]])
-
- masked_blk_two, masked_tgt_two = self._mask_block(
- s["block_two"], self.mask_idx, self.pad_idx, token_range
- )
- tokens_two = np.concatenate([masked_blk_two, [self.sep_token_idx]])
- targets_two = np.concatenate([masked_tgt_two, [self.pad_idx]])
-
- # block + 1 sep + 1 special (CLS)
- segments_one = np.zeros(len(tokens_one))
- # block + 1 sep
- segments_two = np.ones(len(tokens_two))
-
- tokens = np.concatenate([tokens_one, tokens_two])
- targets = np.concatenate([targets_one, targets_two])
- segments = np.concatenate([segments_one, segments_two])
-
- s["source"] = torch.LongTensor(tokens)
- s["segment_labels"] = torch.LongTensor(segments)
- s["lm_target"] = torch.LongTensor(targets)
-
- def merge(key):
- return data_utils.collate_tokens(
- [s[key] for s in samples], pad_idx, eos_idx, left_pad=False
- )
-
- return {
- "id": torch.LongTensor([s["id"] for s in samples]),
- "ntokens": sum(len(s["source"]) for s in samples),
- "net_input": {
- "src_tokens": merge("source"),
- "segment_labels": merge("segment_labels"),
- },
- "lm_target": merge("lm_target"),
- "sentence_target": torch.LongTensor([s["sentence_target"] for s in samples])
- if self.has_pairs
- else None,
- "nsentences": len(samples),
- }
-
- def collater(self, samples: List[Dict]):
- """Merge a list of samples to form a mini-batch.
-
- Args:
- samples (List[dict]): samples to collate
-
- Returns:
- dict: a mini-batch of data
- """
- return self._collate(samples, self.vocab.pad(), self.vocab.eos())
-
- def num_tokens(self, index: int):
- """
- Return the number of tokens in a sample. This value is used to
- enforce max-tokens during batching.
- """
- return self.sizes[index]
-
- def size(self, index: int):
- """
- Return an example's size as a float or tuple. This value is used when
- filtering a dataset with max-positions.
- """
- return self.sizes[index]
-
- def ordered_indices(self):
- """
- Return an ordered list of indices. Batches will be constructed based
- on this order.
- """
- if self.shuffle:
- return np.random.permutation(len(self))
- else:
- order = [np.arange(len(self))]
- order.append(self.sizes)
- return np.lexsort(order)
-
- @property
- def supports_prefetch(self):
- return getattr(self.dataset, "supports_prefetch", False)
-
- def prefetch(self, indices):
- self.dataset.prefetch(indices)
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/model_parallel/modules/multihead_attention.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/model_parallel/modules/multihead_attention.py
deleted file mode 100644
index 8eb9d09dad37ab132295166d691873beec63eaf1..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/model_parallel/modules/multihead_attention.py
+++ /dev/null
@@ -1,349 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from typing import Dict, Optional, Tuple
-
-import torch
-import torch.nn.functional as F
-from fairseq import utils
-from fairseq.incremental_decoding_utils import with_incremental_state
-from fairseq.modules.fairseq_dropout import FairseqDropout
-from torch import Tensor, nn
-
-
-try:
- from fairseq.model_parallel.megatron.mpu import (
- get_cuda_rng_tracker,
- get_model_parallel_world_size,
- ColumnParallelLinear,
- RowParallelLinear,
- )
-
- has_megatron_submodule = True
-except (ImportError, ModuleNotFoundError):
- has_megatron_submodule = False
-
-
-@with_incremental_state
-class ModelParallelMultiheadAttention(nn.Module):
- """Model parallel Multi-headed attention.
- This performs the Multi-headed attention over multiple gpus.
-
- See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
- """
-
- def __init__(
- self,
- embed_dim,
- num_heads,
- kdim=None,
- vdim=None,
- dropout=0.0,
- bias=True,
- self_attention=False,
- encoder_decoder_attention=False,
- ):
- super().__init__()
- if not has_megatron_submodule:
- raise ImportError(
- "\n\nPlease install the megatron submodule:"
- "\n\n git submodule update --init "
- "fairseq/model_parallel/megatron"
- )
- self.embed_dim = embed_dim
- self.kdim = kdim if kdim is not None else embed_dim
- self.vdim = vdim if vdim is not None else embed_dim
- self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
-
- self.model_parallel_size = get_model_parallel_world_size()
-
- self.num_heads_partition = num_heads // self.model_parallel_size
- assert (
- self.num_heads_partition * self.model_parallel_size == num_heads
- ), "Number of heads must be divisible by model parallel size"
-
- self.dropout_module = FairseqDropout(
- dropout, module_name=self.__class__.__name__
- )
- self.head_dim = embed_dim // num_heads
- assert (
- self.head_dim * num_heads == self.embed_dim
- ), "embed_dim must be divisible by num_heads"
- self.scaling = self.head_dim ** -0.5
-
- self.self_attention = self_attention
- self.encoder_decoder_attention = encoder_decoder_attention
-
- assert (
- not self.self_attention or self.qkv_same_dim
- ), "Self-attention requires query, key and value to be of the same size"
-
- self.k_proj = ColumnParallelLinear(
- self.kdim, embed_dim, bias=bias, gather_output=False
- )
- self.v_proj = ColumnParallelLinear(
- self.vdim, embed_dim, bias=bias, gather_output=False
- )
- self.q_proj = ColumnParallelLinear(
- embed_dim, embed_dim, bias=bias, gather_output=False
- )
- self.out_proj = RowParallelLinear(
- embed_dim, embed_dim, bias=bias, input_is_parallel=True
- )
-
- def forward(
- self,
- query,
- key: Optional[Tensor],
- value: Optional[Tensor],
- key_padding_mask: Optional[Tensor] = None,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- static_kv: bool = False,
- attn_mask: Optional[Tensor] = None,
- **unused_kwargs,
- ) -> Tuple[Tensor, Optional[Tensor]]:
- """Input shape: Time x Batch x Channel
-
- Args:
- key_padding_mask (ByteTensor, optional): mask to exclude
- keys that are pads, of shape `(batch, src_len)`, where
- padding elements are indicated by 1s.
- attn_mask (ByteTensor, optional): typically used to
- implement causal attention, where the mask prevents the
- attention from looking forward in time (default: None).
- """
- tgt_len, bsz, embed_dim = query.size()
- assert embed_dim == self.embed_dim
- assert list(query.size()) == [tgt_len, bsz, embed_dim]
-
- is_tpu = query.device.type == "xla"
-
- if incremental_state is not None:
- saved_state = self._get_input_buffer(incremental_state)
- if saved_state is not None and "prev_key" in saved_state:
- # previous time steps are cached - no need to recompute
- # key and value if they are static
- if static_kv:
- assert self.encoder_decoder_attention and not self.self_attention
- key = value = None
- else:
- saved_state = None
-
- if self.self_attention:
- q = self.q_proj(query)
- k = self.k_proj(query)
- v = self.v_proj(query)
- elif self.encoder_decoder_attention:
- # encoder-decoder attention
- q = self.q_proj(query)
- if key is None:
- assert value is None
- k = v = None
- else:
- k = self.k_proj(key)
- v = self.v_proj(key)
-
- else:
- assert key is not None and value is not None
- q = self.q_proj(query)
- k = self.k_proj(key)
- v = self.v_proj(value)
- q *= self.scaling
-
- q = (
- q.contiguous()
- .view(tgt_len, bsz * self.num_heads_partition, self.head_dim)
- .transpose(0, 1)
- )
- if k is not None:
- k = (
- k.contiguous()
- .view(-1, bsz * self.num_heads_partition, self.head_dim)
- .transpose(0, 1)
- )
- if v is not None:
- v = (
- v.contiguous()
- .view(-1, bsz * self.num_heads_partition, self.head_dim)
- .transpose(0, 1)
- )
-
- if saved_state is not None:
- # saved states are stored with shape (bsz, num_heads_partition, seq_len, head_dim)
- if "prev_key" in saved_state:
- _prev_key = saved_state["prev_key"]
- assert _prev_key is not None
- prev_key = _prev_key.view(
- bsz * self.num_heads_partition, -1, self.head_dim
- )
- if static_kv:
- k = prev_key
- else:
- assert k is not None
- k = torch.cat([prev_key, k], dim=1)
- if "prev_value" in saved_state:
- _prev_value = saved_state["prev_value"]
- assert _prev_value is not None
- prev_value = _prev_value.view(
- bsz * self.num_heads_partition, -1, self.head_dim
- )
- if static_kv:
- v = prev_value
- else:
- assert v is not None
- v = torch.cat([prev_value, v], dim=1)
- prev_key_padding_mask: Optional[Tensor] = None
- if "prev_key_padding_mask" in saved_state:
- prev_key_padding_mask = saved_state["prev_key_padding_mask"]
- assert k is not None and v is not None
- key_padding_mask = (
- ModelParallelMultiheadAttention._append_prev_key_padding_mask(
- key_padding_mask=key_padding_mask,
- prev_key_padding_mask=prev_key_padding_mask,
- batch_size=bsz,
- src_len=k.size(1),
- static_kv=static_kv,
- )
- )
-
- saved_state["prev_key"] = k.view(
- bsz, self.num_heads_partition, -1, self.head_dim
- )
- saved_state["prev_value"] = v.view(
- bsz, self.num_heads_partition, -1, self.head_dim
- )
- saved_state["prev_key_padding_mask"] = key_padding_mask
- # In this branch incremental_state is never None
- assert incremental_state is not None
- incremental_state = self._set_input_buffer(incremental_state, saved_state)
- assert k is not None
- src_len = k.size(1)
-
- # This is part of a workaround to get around fork/join parallelism
- # not supporting Optional types.
- if key_padding_mask is not None and key_padding_mask.dim() == 0:
- key_padding_mask = None
-
- if key_padding_mask is not None:
- assert key_padding_mask.size(0) == bsz
- assert key_padding_mask.size(1) == src_len
-
- attn_weights = torch.bmm(q, k.transpose(1, 2))
-
- assert list(attn_weights.size()) == [
- bsz * self.num_heads_partition,
- tgt_len,
- src_len,
- ]
-
- if attn_mask is not None:
- attn_mask = attn_mask.unsqueeze(0)
- attn_weights += attn_mask
-
- if key_padding_mask is not None:
- # don't attend to padding symbols
- attn_weights = attn_weights.view(
- bsz, self.num_heads_partition, tgt_len, src_len
- )
- if not is_tpu:
- attn_weights = attn_weights.masked_fill(
- key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
- float("-inf"),
- )
- else:
- attn_weights = attn_weights.transpose(0, 2)
- attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
- attn_weights = attn_weights.transpose(0, 2)
- attn_weights = attn_weights.view(
- bsz * self.num_heads_partition, tgt_len, src_len
- )
-
- attn_weights_float = utils.softmax(attn_weights, dim=-1)
- attn_weights = attn_weights_float.type_as(attn_weights)
-
- with get_cuda_rng_tracker().fork():
- attn_probs = self.dropout_module(attn_weights)
-
- assert v is not None
- attn = torch.bmm(attn_probs, v)
- assert list(attn.size()) == [
- bsz * self.num_heads_partition,
- tgt_len,
- self.head_dim,
- ]
- embed_dim_partition = embed_dim // self.model_parallel_size
- attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim_partition)
- attn = self.out_proj(attn)
- # return attn_weights None to keep the return type same as single gpu multihead attention
- # This will be deprecated.
- attn_weights: Optional[Tensor] = None
-
- return attn, attn_weights
-
- @staticmethod
- def _append_prev_key_padding_mask(
- key_padding_mask: Optional[Tensor],
- prev_key_padding_mask: Optional[Tensor],
- batch_size: int,
- src_len: int,
- static_kv: bool,
- ) -> Optional[Tensor]:
- # saved key padding masks have shape (bsz, seq_len)
- if prev_key_padding_mask is not None and static_kv:
- new_key_padding_mask = prev_key_padding_mask
- elif prev_key_padding_mask is not None and key_padding_mask is not None:
- new_key_padding_mask = torch.cat(
- [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
- )
- # During incremental decoding, as the padding token enters and
- # leaves the frame, there will be a time when prev or current
- # is None
- elif prev_key_padding_mask is not None:
-
- filler = torch.zeros(batch_size, src_len - prev_key_padding_mask.size(1))
- if prev_key_padding_mask.is_cuda:
- filler = filler.cuda()
- new_key_padding_mask = torch.cat(
- [prev_key_padding_mask.float(), filler.float()], dim=1
- )
- elif key_padding_mask is not None:
- filler = torch.zeros(batch_size, src_len - key_padding_mask.size(1))
- if key_padding_mask.is_cuda:
- filler = filler.cuda()
- new_key_padding_mask = torch.cat(
- [filler.float(), key_padding_mask.float()], dim=1
- )
- else:
- new_key_padding_mask = prev_key_padding_mask
- return new_key_padding_mask
-
- def reorder_incremental_state(
- self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order
- ):
- """Reorder buffered internal state (for incremental generation)."""
- input_buffer = self._get_input_buffer(incremental_state)
- if input_buffer is not None:
- for k in input_buffer.keys():
- if input_buffer[k] is not None:
- input_buffer[k] = input_buffer[k].index_select(0, new_order)
- incremental_state = self._set_input_buffer(incremental_state, input_buffer)
- return incremental_state
-
- def _get_input_buffer(
- self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
- ) -> Dict[str, Optional[Tensor]]:
- result = self.get_incremental_state(incremental_state, "attn_state")
- if result is not None:
- return result
- else:
- empty_result: Dict[str, Optional[Tensor]] = {}
- return empty_result
-
- def _set_input_buffer(
- self,
- incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
- buffer: Dict[str, Optional[Tensor]],
- ):
- return self.set_incremental_state(incremental_state, "attn_state", buffer)
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/laser/laser_src/laser_transformer.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/laser/laser_src/laser_transformer.py
deleted file mode 100644
index 0be030994ff87334ca0392302374693f7f2c61b3..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/laser/laser_src/laser_transformer.py
+++ /dev/null
@@ -1,354 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-
-from typing import Any, Dict, List, Optional
-from torch import Tensor
-
-import torch
-import torch.nn as nn
-
-from fairseq.models import (
- FairseqEncoderDecoderModel,
- register_model,
- register_model_architecture,
-)
-from fairseq.models.transformer import (
- base_architecture,
- Embedding,
- TransformerModel,
- TransformerEncoder,
- TransformerDecoder,
-)
-from fairseq.modules import (
- TransformerDecoderLayer,
-)
-
-logger = logging.getLogger(__name__)
-
-
-@register_model("laser_transformer")
-class LaserTransformerModel(FairseqEncoderDecoderModel):
- """Train Transformer for LASER task
-
- Requires --task laser
- """
-
- def __init__(self, encoder, decoder):
- super().__init__(encoder, decoder)
-
- def forward(
- self,
- src_tokens,
- src_lengths,
- prev_output_tokens=None,
- tgt_tokens=None,
- tgt_lengths=None,
- target_language_id=-1,
- dataset_name="",
- ):
- laser_encoder_out = self.encoder(src_tokens, src_lengths)
- return self.decoder(
- prev_output_tokens, laser_encoder_out, lang_id=target_language_id
- )
-
- @staticmethod
- def add_args(parser):
- """Add model-specific arguments to the parser."""
- TransformerModel.add_args(parser)
- parser.add_argument(
- "--decoder-lang-embed-dim",
- type=int,
- metavar="N",
- help="decoder language embedding dimension",
- )
-
- @classmethod
- def build_model(cls, args, task):
- base_laser_transformer_architecture(args)
-
- num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0
-
- def load_embed_tokens(dictionary, embed_dim):
- num_embeddings = len(dictionary)
- padding_idx = dictionary.pad()
-
- return Embedding(num_embeddings, embed_dim, padding_idx)
-
- encoder_embed_tokens = load_embed_tokens(
- task.source_dictionary, args.encoder_embed_dim
- )
- decoder_embed_tokens = load_embed_tokens(
- task.target_dictionary, args.decoder_embed_dim
- )
- num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0
-
- encoder = LaserTransformerEncoder(
- args, task.source_dictionary, encoder_embed_tokens
- )
-
- decoder = LaserTransformerDecoder(
- args,
- task.target_dictionary,
- decoder_embed_tokens,
- num_langs=num_langs,
- lang_embed_dim=args.decoder_lang_embed_dim,
- )
-
- return cls(encoder, decoder)
-
-
-class LaserTransformerEncoder(TransformerEncoder):
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
-
- def forward(self, src_tokens, *args, **kwargs):
- encoder_out = super().forward(src_tokens, *args, **kwargs)
-
- x = encoder_out["encoder_out"][0] # T x B x C
- padding_mask = src_tokens.eq(self.padding_idx).t().unsqueeze(-1)
-
- if padding_mask.any():
- x = x.float().masked_fill_(padding_mask, float("-inf")).type_as(x)
-
- # Build the sentence embedding by max-pooling over the encoder outputs
- sentemb = x.max(dim=0)[0]
-
- # The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
- # `foward` so we use a dictionary instead.
- # TorchScript does not support mixed values so the values are all lists.
- # The empty list is equivalent to None.
- return {"sentemb": [sentemb]} # B x C
-
- @torch.jit.export
- def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
- """
- Same as the one in transformer.py, with new_sentemb
- """
- if len(encoder_out["sentemb"]) == 0:
- new_sentemb = []
- else:
- new_sentemb = [encoder_out["sentemb"][0].index_select(0, new_order)]
-
- return {
- "sentemb": new_sentemb, # B x C
- }
-
-
-class LaserTransformerDecoder(TransformerDecoder):
- def __init__(self, args, dictionary, *kargs, **kwargs):
- self.num_langs = kwargs.get("num_langs", 1)
- self.lang_embed_dim = kwargs.get("lang_embed_dim", 0)
- kwargs.pop("num_langs", None)
- kwargs.pop("lang_embed_dim", None)
-
- super().__init__(args, dictionary, *kargs, **kwargs, no_encoder_attn=True)
-
- if self.lang_embed_dim == 0:
- self.embed_lang = None
- else:
- self.embed_lang = nn.Embedding(self.num_langs, self.lang_embed_dim)
- nn.init.uniform_(self.embed_lang.weight, -0.1, 0.1)
-
- if self.output_projection is not None:
- laser_output_embed_dim = (
- self.output_embed_dim + self.lang_embed_dim + args.encoder_embed_dim
- )
- self.output_projection = nn.Linear(
- laser_output_embed_dim, len(dictionary), bias=False
- )
- nn.init.normal_(
- self.output_projection.weight,
- mean=0,
- std=laser_output_embed_dim ** -0.5,
- )
-
- def build_decoder_layer(self, args, no_encoder_attn=False):
- decoder_embed_dim = args.decoder_embed_dim
- args.decoder_embed_dim = (
- decoder_embed_dim + self.lang_embed_dim + args.encoder_embed_dim
- )
- res = TransformerDecoderLayer(args, no_encoder_attn=True)
- args.decoder_embed_dim = decoder_embed_dim
-
- return res
-
- def extract_features(
- self,
- prev_output_tokens,
- encoder_out: Optional[Dict[str, List[Tensor]]],
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- full_context_alignment: bool = False,
- alignment_layer: Optional[int] = None,
- alignment_heads: Optional[int] = None,
- lang_id: Optional[int] = None,
- ):
- """
- Similar to *forward* but only return features.
-
- Includes several features from "Jointly Learning to Align and
- Translate with Transformer Models" (Garg et al., EMNLP 2019).
-
- Args:
- full_context_alignment (bool, optional): don't apply
- auto-regressive mask to self-attention (default: False).
- alignment_layer (int, optional): return mean alignment over
- heads at this layer (default: last layer).
- alignment_heads (int, optional): only average alignment over
- this many heads (default: all heads).
-
- Returns:
- tuple:
- - the decoder's features of shape `(batch, tgt_len, embed_dim)`
- - a dictionary with any model-specific outputs
- """
- if alignment_layer is None:
- alignment_layer = self.num_layers - 1
-
- # embed positions
- positions = (
- self.embed_positions(
- prev_output_tokens, incremental_state=incremental_state
- )
- if self.embed_positions is not None
- else None
- )
-
- if incremental_state is not None:
- prev_output_tokens = prev_output_tokens[:, -1:]
- if positions is not None:
- positions = positions[:, -1:]
-
- bsz, seqlen = prev_output_tokens.size()
-
- # embed tokens and positions
- x = self.embed_scale * self.embed_tokens(prev_output_tokens)
-
- if self.quant_noise is not None:
- x = self.quant_noise(x)
-
- if self.project_in_dim is not None:
- x = self.project_in_dim(x)
-
- if positions is not None:
- x += positions
-
- if self.layernorm_embedding is not None:
- x = self.layernorm_embedding(x)
-
- x = self.dropout_module(x)
-
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- if self.embed_lang is not None:
- lang_ids = prev_output_tokens.data.new_full((bsz,), lang_id)
- langemb = self.embed_lang(lang_ids)
- langemb = langemb.unsqueeze(0)
- repeat_vals = [x.shape[0] // langemb.shape[0]] + [-1] * (
- len(langemb.shape) - 1
- )
- x = torch.cat((x, langemb.expand(*repeat_vals)), dim=-1)
-
- sentemb = encoder_out["sentemb"][0]
- sentemb = sentemb.unsqueeze(0)
-
- repeat_vals = [x.shape[0] // sentemb.shape[0]] + [-1] * (len(sentemb.shape) - 1)
- x = torch.cat((x, sentemb.expand(*repeat_vals)), dim=-1)
-
- self_attn_padding_mask: Optional[Tensor] = None
- if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
- self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
-
- # decoder layers
- attn: Optional[Tensor] = None
- inner_states: List[Optional[Tensor]] = [x]
- for idx, layer in enumerate(self.layers):
- if incremental_state is None and not full_context_alignment:
- self_attn_mask = self.buffered_future_mask(x)
- else:
- self_attn_mask = None
-
- x, layer_attn, _ = layer(
- x,
- None,
- None,
- incremental_state,
- self_attn_mask=self_attn_mask,
- self_attn_padding_mask=self_attn_padding_mask,
- need_attn=bool((idx == alignment_layer)),
- need_head_weights=bool((idx == alignment_layer)),
- )
- inner_states.append(x)
- if layer_attn is not None and idx == alignment_layer:
- attn = layer_attn.float().to(x)
-
- if attn is not None:
- if alignment_heads is not None:
- attn = attn[:alignment_heads]
-
- # average probabilities over heads
- attn = attn.mean(dim=0)
-
- if self.layer_norm is not None:
- x = self.layer_norm(x)
-
- # T x B x C -> B x T x C
- x = x.transpose(0, 1)
-
- if self.project_out_dim is not None:
- x = self.project_out_dim(x)
-
- return x, {"attn": [attn], "inner_states": inner_states}
-
- def forward(
- self,
- prev_output_tokens,
- encoder_out: Optional[Dict[str, List[Tensor]]] = None,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- features_only: bool = False,
- alignment_layer: Optional[int] = None,
- alignment_heads: Optional[int] = None,
- src_lengths: Optional[Any] = None,
- return_all_hiddens: bool = False,
- lang_id: Optional[int] = None,
- ):
- """
- Args:
- prev_output_tokens (LongTensor): previous decoder outputs of shape
- `(batch, tgt_len)`, for teacher forcing
- encoder_out (optional): output from the encoder, used for
- encoder-side attention
- incremental_state (dict): dictionary used for storing state during
- :ref:`Incremental decoding`
- features_only (bool, optional): only return features without
- applying output layer (default: False).
-
- Returns:
- tuple:
- - the decoder's output of shape `(batch, tgt_len, vocab)`
- - a dictionary with any model-specific outputs
- """
-
- assert lang_id is not None
-
- x, extra = self.extract_features(
- prev_output_tokens,
- encoder_out=encoder_out,
- incremental_state=incremental_state,
- alignment_layer=alignment_layer,
- alignment_heads=alignment_heads,
- lang_id=lang_id,
- )
- if not features_only:
- x = self.output_layer(x)
- return x, extra
-
-
-@register_model_architecture("laser_transformer", "laser_transformer")
-def base_laser_transformer_architecture(args):
- base_architecture(args)
- args.decoder_lang_embed_dim = getattr(args, "decoder_lang_embed_dim", 0)
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/prepend_token_dataset.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/prepend_token_dataset.py
deleted file mode 100644
index fd1331f4c44c1595eb9bb78baa0cf5cf3bcce9ad..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/prepend_token_dataset.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import numpy as np
-import torch
-
-from . import BaseWrapperDataset
-
-
-class PrependTokenDataset(BaseWrapperDataset):
- def __init__(self, dataset, token=None):
- super().__init__(dataset)
- self.token = token
- if token is not None:
- self._sizes = np.array(dataset.sizes) + 1
- else:
- self._sizes = dataset.sizes
-
- def __getitem__(self, idx):
- item = self.dataset[idx]
- if self.token is not None:
- item = torch.cat([item.new([self.token]), item])
- return item
-
- @property
- def sizes(self):
- return self._sizes
-
- def num_tokens(self, index):
- n = self.dataset.num_tokens(index)
- if self.token is not None:
- n += 1
- return n
-
- def size(self, index):
- n = self.dataset.size(index)
- if self.token is not None:
- n += 1
- return n
diff --git a/spaces/ORI-Muchim/MinamiTTS/README.md b/spaces/ORI-Muchim/MinamiTTS/README.md
deleted file mode 100644
index ad0a0c5e295b1abee0518474f19f8ccb6e13f1c5..0000000000000000000000000000000000000000
--- a/spaces/ORI-Muchim/MinamiTTS/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: MinamiTTS
-emoji: 🏃
-colorFrom: blue
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.12.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/ORI-Muchim/NahidaTTS/mel_processing.py b/spaces/ORI-Muchim/NahidaTTS/mel_processing.py
deleted file mode 100644
index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000
--- a/spaces/ORI-Muchim/NahidaTTS/mel_processing.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import torch
-import torch.utils.data
-from librosa.filters import mel as librosa_mel_fn
-
-MAX_WAV_VALUE = 32768.0
-
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- """
- PARAMS
- ------
- C: compression factor
- """
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-
-def dynamic_range_decompression_torch(x, C=1):
- """
- PARAMS
- ------
- C: compression factor used to compress
- """
- return torch.exp(x) / C
-
-
-def spectral_normalize_torch(magnitudes):
- output = dynamic_range_compression_torch(magnitudes)
- return output
-
-
-def spectral_de_normalize_torch(magnitudes):
- output = dynamic_range_decompression_torch(magnitudes)
- return output
-
-
-mel_basis = {}
-hann_window = {}
-
-
-def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
- return spec
-
-
-def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
- global mel_basis
- dtype_device = str(spec.dtype) + '_' + str(spec.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
- return spec
-
-
-def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global mel_basis, hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
-
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
-
- return spec
diff --git a/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/oneformer/modeling/pixel_decoder/ops/functions/ms_deform_attn_func.py b/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/oneformer/modeling/pixel_decoder/ops/functions/ms_deform_attn_func.py
deleted file mode 100644
index 42f2597235ed11cec681e21112dcdebd0c9a1149..0000000000000000000000000000000000000000
--- a/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/oneformer/modeling/pixel_decoder/ops/functions/ms_deform_attn_func.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# ------------------------------------------------------------------------------------------------
-# Deformable DETR
-# Copyright (c) 2020 SenseTime. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-# ------------------------------------------------------------------------------------------------
-# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
-# ------------------------------------------------------------------------------------------------
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
-
-from __future__ import absolute_import
-from __future__ import print_function
-from __future__ import division
-
-import torch
-import torch.nn.functional as F
-from torch.autograd import Function
-from torch.autograd.function import once_differentiable
-
-if torch.cuda.is_available():
- try:
- import MultiScaleDeformableAttention as MSDA
- except ModuleNotFoundError as e:
- info_string = (
- "\n\nPlease compile MultiScaleDeformableAttention CUDA op with the following commands:\n"
- "\t`cd mask2former/modeling/pixel_decoder/ops`\n"
- "\t`sh make.sh`\n"
- )
- raise ModuleNotFoundError(info_string)
-else:
- MultiScaleDeformableAttention = None
-
-
-class MSDeformAttnFunction(Function):
- @staticmethod
- def forward(ctx, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step):
- ctx.im2col_step = im2col_step
- output = MSDA.ms_deform_attn_forward(
- value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, ctx.im2col_step)
- ctx.save_for_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights)
- return output
-
- @staticmethod
- @once_differentiable
- def backward(ctx, grad_output):
- value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights = ctx.saved_tensors
- grad_value, grad_sampling_loc, grad_attn_weight = \
- MSDA.ms_deform_attn_backward(
- value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, ctx.im2col_step)
-
- return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
-
-
-def ms_deform_attn_core_pytorch(value, value_spatial_shapes, sampling_locations, attention_weights):
- # for debug and test only,
- # need to use cuda version instead
- N_, S_, M_, D_ = value.shape
- _, Lq_, M_, L_, P_, _ = sampling_locations.shape
- value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
- sampling_grids = 2 * sampling_locations - 1
- sampling_value_list = []
- for lid_, (H_, W_) in enumerate(value_spatial_shapes):
- # N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_
- value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_*M_, D_, H_, W_)
- # N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2
- sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1)
- # N_*M_, D_, Lq_, P_
- sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_,
- mode='bilinear', padding_mode='zeros', align_corners=False)
- sampling_value_list.append(sampling_value_l_)
- # (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_)
- attention_weights = attention_weights.transpose(1, 2).reshape(N_*M_, 1, Lq_, L_*P_)
- output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_*D_, Lq_)
- return output.transpose(1, 2).contiguous()
diff --git a/spaces/Paperboxiv/Dunhuang_GPT/README.md b/spaces/Paperboxiv/Dunhuang_GPT/README.md
deleted file mode 100644
index 02492473bdc43eebb7a2eae6960e2f5080d5e0fe..0000000000000000000000000000000000000000
--- a/spaces/Paperboxiv/Dunhuang_GPT/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Dunhuang GPT
-emoji: 🦀
-colorFrom: gray
-colorTo: gray
-sdk: gradio
-sdk_version: 3.34.0
-app_file: app.py
-pinned: false
-license: other
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/share/lilypond/2.24.2/python/langdefs.py b/spaces/Pattr/DrumClassification/lilypond-2.24.2/share/lilypond/2.24.2/python/langdefs.py
deleted file mode 100644
index fcce7bd8e4ee071dccccb32fa9bc925de313604e..0000000000000000000000000000000000000000
--- a/spaces/Pattr/DrumClassification/lilypond-2.24.2/share/lilypond/2.24.2/python/langdefs.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# langdefs.py
-# -*- coding: utf-8 -*-
-#
-# This file is part of LilyPond, the GNU music typesetter.
-#
-# Copyright (C) 2006--2022 John Mandereau
-#
-# LilyPond is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# LilyPond is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with LilyPond. If not, see .
-
-
-"""
-Documentation i18n module
-"""
-
-import gettext
-import os
-import re
-import sys
-
-
-def lang_file_name(p, langext, ext):
- if langext != '':
- return p + '.' + langext + ext
- return p + ext
-
-
-class LanguageDef:
- def __init__(self, code, name, webext=None, html_filter=lambda s: s, enable_ly_identifier_l10n=True):
- self.code = code
- self.name = name
- self.enabled = True
- if webext is None:
- self.webext = self.code
- else:
- self.webext = webext
- self.html_filter = html_filter
- self.enable_ly_identifier_l10n = enable_ly_identifier_l10n
-
- def file_name(self, prefix, ext):
- return lang_file_name(prefix, self.webext, ext)
-
-
-# All language information needed for documentation i18n is defined
-# here. For each 'Documentation/ab' directory containing docs
-# translated in 'ab', there should be one entry in LANGUAGES.
-
-site = LanguageDef('en', 'English', webext='')
-
-html_body_re = re.compile('', re.I)
-html_end_body_re = re.compile('