diff --git a/spaces/0x90e/ESRGAN-MANGA/ESRGANer.py b/spaces/0x90e/ESRGAN-MANGA/ESRGANer.py deleted file mode 100644 index d372355e4aeaead258e8e8b5986f00e5a962c827..0000000000000000000000000000000000000000 --- a/spaces/0x90e/ESRGAN-MANGA/ESRGANer.py +++ /dev/null @@ -1,156 +0,0 @@ -from PIL import Image, ImageChops -import numpy as np -import cv2 as cv -import math -import torch -from torch.nn import functional as F - -""" -Borrowed and adapted from https://github.com/xinntao/Real-ESRGAN/blob/master/realesrgan/utils.py -Thank you xinntao! -""" -class ESRGANer(): - """A helper class for upsampling images with ESRGAN. - - Args: - scale (int): Upsampling scale factor used in the networks. It is usually 2 or 4. - model (nn.Module): The defined network. Default: None. - tile (int): As too large images result in the out of GPU memory issue, so this tile option will first crop - input images into tiles, and then process each of them. Finally, they will be merged into one image. - 0 denotes for do not use tile. Default: 500. - tile_pad (int): The pad size for each tile, to remove border artifacts. Default: 10. - pre_pad (int): Pad the input images to avoid border artifacts. Default: 10. - """ - - def __init__(self, - scale=4, - model=None, - tile=300, - tile_pad=10, - pre_pad=10 - ): - self.scale = scale - self.tile_size = tile - self.tile_pad = tile_pad - self.pre_pad = pre_pad - self.mod_scale = None - - self.model = model - - def pre_process(self, img): - """Pre-process, such as pre-pad and mod pad, so that the images can be divisible - """ - self.img = img - - # pre_pad - if self.pre_pad != 0: - self.img = F.pad(self.img, (0, self.pre_pad, 0, self.pre_pad), 'reflect') - # mod pad for divisible borders - if self.scale == 2: - self.mod_scale = 2 - elif self.scale == 1: - self.mod_scale = 4 - if self.mod_scale is not None: - self.mod_pad_h, self.mod_pad_w = 0, 0 - _, _, h, w = self.img.size() - if (h % self.mod_scale != 0): - self.mod_pad_h = (self.mod_scale - h % self.mod_scale) - if (w % self.mod_scale != 0): - self.mod_pad_w = (self.mod_scale - w % self.mod_scale) - self.img = F.pad(self.img, (0, self.mod_pad_w, 0, self.mod_pad_h), 'reflect') - - def process(self): - # model inference - self.output = self.model(self.img) - - def tile_process(self): - """It will first crop input images to tiles, and then process each tile. - Finally, all the processed tiles are merged into one images. - - Modified from: https://github.com/ata4/esrgan-launcher - """ - batch, channel, height, width = self.img.shape - output_height = height * self.scale - output_width = width * self.scale - output_shape = (batch, channel, output_height, output_width) - - # start with black image - self.output = self.img.new_zeros(output_shape) - tiles_x = math.ceil(width / self.tile_size) - tiles_y = math.ceil(height / self.tile_size) - - print("Image processing started...") - - # loop over all tiles - for y in range(tiles_y): - for x in range(tiles_x): - # extract tile from input image - ofs_x = x * self.tile_size - ofs_y = y * self.tile_size - # input tile area on total image - input_start_x = ofs_x - input_end_x = min(ofs_x + self.tile_size, width) - input_start_y = ofs_y - input_end_y = min(ofs_y + self.tile_size, height) - - # input tile area on total image with padding - input_start_x_pad = max(input_start_x - self.tile_pad, 0) - input_end_x_pad = min(input_end_x + self.tile_pad, width) - input_start_y_pad = max(input_start_y - self.tile_pad, 0) - input_end_y_pad = min(input_end_y + self.tile_pad, height) - - # input tile dimensions - input_tile_width = input_end_x - input_start_x - input_tile_height = input_end_y - input_start_y - tile_idx = y * tiles_x + x + 1 - input_tile = self.img[:, :, input_start_y_pad:input_end_y_pad, input_start_x_pad:input_end_x_pad] - - # upscale tile - try: - with torch.no_grad(): - output_tile = self.model(input_tile) - except RuntimeError as error: - print('Error', error) - print(f'Processing tile {tile_idx}/{tiles_x * tiles_y}') - - # output tile area on total image - output_start_x = input_start_x * self.scale - output_end_x = input_end_x * self.scale - output_start_y = input_start_y * self.scale - output_end_y = input_end_y * self.scale - - # output tile area without padding - output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale - output_end_x_tile = output_start_x_tile + input_tile_width * self.scale - output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale - output_end_y_tile = output_start_y_tile + input_tile_height * self.scale - - # put tile into output image - self.output[:, :, output_start_y:output_end_y, - output_start_x:output_end_x] = output_tile[:, :, output_start_y_tile:output_end_y_tile, - output_start_x_tile:output_end_x_tile] - - print('All tiles processed, saving output image!') - - def post_process(self): - # remove extra pad - if self.mod_scale is not None: - _, _, h, w = self.output.size() - self.output = self.output[:, :, 0:h - self.mod_pad_h * self.scale, 0:w - self.mod_pad_w * self.scale] - # remove prepad - if self.pre_pad != 0: - _, _, h, w = self.output.size() - self.output = self.output[:, :, 0:h - self.pre_pad * self.scale, 0:w - self.pre_pad * self.scale] - return self.output - - @torch.no_grad() - def enhance(self, img): - self.pre_process(img) - - if self.tile_size > 0: - self.tile_process() - else: - self.process() - output_img = self.post_process() - - return output_img \ No newline at end of file diff --git a/spaces/123aa/pastel-mix/app.py b/spaces/123aa/pastel-mix/app.py deleted file mode 100644 index c77d61dc487061c475ccbc66baa7cd8695069b14..0000000000000000000000000000000000000000 --- a/spaces/123aa/pastel-mix/app.py +++ /dev/null @@ -1,137 +0,0 @@ -from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler -import gradio as gr -import torch -from PIL import Image - -model_id = 'andite/pastel-mix' -prefix = '' - -scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler") - -pipe = StableDiffusionPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -if torch.cuda.is_available(): - pipe = pipe.to("cuda") - pipe_i2i = pipe_i2i.to("cuda") - -def error_str(error, title="Error"): - return f"""#### {title} - {error}""" if error else "" - -def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False): - - generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None - prompt = f"{prefix} {prompt}" if auto_prefix else prompt - - try: - if img is not None: - return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None - else: - return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None - except Exception as e: - return None, error_str(e) - -def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator): - - result = pipe( - prompt, - negative_prompt = neg_prompt, - num_inference_steps = int(steps), - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return result.images[0] - -def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator): - - ratio = min(height / img.height, width / img.width) - img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) - result = pipe_i2i( - prompt, - negative_prompt = neg_prompt, - init_image = img, - num_inference_steps = int(steps), - strength = strength, - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return result.images[0] - -css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem} -""" -with gr.Blocks(css=css) as demo: - gr.HTML( - f""" -
-
-

Pastel Mix

-
-

- Demo for Pastel Mix Stable Diffusion model.
- {"Add the following tokens to your prompts for the model to work properly: prefix" if prefix else ""} -

- Running on {"GPU 🔥" if torch.cuda.is_available() else f"CPU 🥶. For faster inference it is recommended to upgrade to GPU in Settings"} after duplicating the space

- Duplicate Space -
- """ - ) - with gr.Row(): - - with gr.Column(scale=55): - with gr.Group(): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder=f"{prefix} [your prompt]").style(container=False) - generate = gr.Button(value="Generate").style(rounded=(False, True, True, False)) - - image_out = gr.Image(height=512) - error_output = gr.Markdown() - - with gr.Column(scale=45): - with gr.Tab("Options"): - with gr.Group(): - neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image") - auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically ()", value=prefix, visible=prefix) - - with gr.Row(): - guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15) - steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1) - - with gr.Row(): - width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8) - height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8) - - seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1) - - with gr.Tab("Image to image"): - with gr.Group(): - image = gr.Image(label="Image", height=256, tool="editor", type="pil") - strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5) - - auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False) - - inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix] - outputs = [image_out, error_output] - prompt.submit(inference, inputs=inputs, outputs=outputs) - generate.click(inference, inputs=inputs, outputs=outputs) - - gr.HTML(""" -
-
-

This space was created using SD Space Creator.

-
- """) - -demo.queue(concurrency_count=1) -demo.launch() diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/ACDSee Photo Studio Ultimate 2020 v13.0.1 Build 2023 Crack [Latest] Features and Benefits.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/ACDSee Photo Studio Ultimate 2020 v13.0.1 Build 2023 Crack [Latest] Features and Benefits.md deleted file mode 100644 index d1fcc7a50e6f01c91cd81f1f3883565f08c3ae29..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/ACDSee Photo Studio Ultimate 2020 v13.0.1 Build 2023 Crack [Latest] Features and Benefits.md +++ /dev/null @@ -1,80 +0,0 @@ - -

ACDSee Photo Studio Ultimate 2020 v13.0.1 Build 2023 Crack [Latest]: A Comprehensive Review

-

If you are looking for a powerful and versatile photo editing software that can handle all your creative needs, you might want to check out ACDSee Photo Studio Ultimate 2020. This software is not only a digital asset manager and a RAW editor with layers, but also a full-featured photo editor that offers a wide range of tools and features to help you create stunning images.

-

ACDSee Photo Studio Ultimate 2020 v13.0.1 Build 2023 Crack [Latest]


Download Zip ✔✔✔ https://byltly.com/2uKvQl



-

In this article, we will review ACDSee Photo Studio Ultimate 2020 in detail and show you what it can do for you. We will cover its main features, benefits, drawbacks, and how to get it for free with a crack. By the end of this article, you will have a clear idea of whether ACDSee Photo Studio Ultimate 2020 is the right software for you or not.

-

Introduction: What is ACDSee Photo Studio Ultimate 2020 and why you need it

-

ACDSee Photo Studio Ultimate 2020 is a software developed by ACD Systems, a company that has been in the business of digital imaging since 1994. It is the latest version of their flagship product, which combines several functions into one package.

-

ACDSee Photo Studio Ultimate 2020 is designed to answer your creative graphic and photography needs. It allows you to manage your photos from import to export, edit them with layers and filters, enhance them with adjustments and effects, organize them by faces and keywords, and share them online or offline.

-

ACDSee Photo Studio Ultimate 2020 is suitable for both beginners and professionals who want a fast and flexible solution for their photo editing projects. It supports over 500 camera models and formats, including RAW files. It also works seamlessly with other software like Photoshop and Lightroom.

-

In this article, we will explore ACDSee Photo Studio Ultimate 2020's features in depth and show you how they can help you improve your workflow and creativity.

-

Features: What are the main features of ACDSee Photo Studio Ultimate 2020 and how they can help you with your photo editing needs

-

ACDSee Photo Studio Ultimate 2020 has many features that make it stand out from other photo editing software. Here are some of the most important ones:

-

Face Detection & Facial Recognition

-

One of the most impressive features of ACDSee Photo Studio Ultimate 2020 is its face detection and facial recognition tool. This tool allows you to find and name the people in your photos automatically. You can also search your photos by unnamed, auto-named, or suggested names.

-

ACDSee Photo Studio Ultimate 2020 full version offline installer
-ACDSee Photo Studio Ultimate 2020 with facial recognition tool
-ACDSee Photo Studio Ultimate 2020 free download with crack
-ACDSee Photo Studio Ultimate 2020 color wheel feature
-ACDSee Photo Studio Ultimate 2020 review and tutorial
-ACDSee Photo Studio Ultimate 2020 vs Photoshop
-ACDSee Photo Studio Ultimate 2020 system requirements and technical details
-ACDSee Photo Studio Ultimate 2020 best price and discount
-ACDSee Photo Studio Ultimate 2020 layered photo editor
-ACDSee Photo Studio Ultimate 2020 RAW editor with layers
-ACDSee Photo Studio Ultimate 2020 digital asset manager
-ACDSee Photo Studio Ultimate 2020 non-destructive photo editing
-ACDSee Photo Studio Ultimate 2020 develop mode and presets
-ACDSee Photo Studio Ultimate 2020 import face data from Lightroom and Picasa
-ACDSee Photo Studio Ultimate 2020 support for GoPro.GPR file format
-How to install and activate ACDSee Photo Studio Ultimate 2020 with crack
-How to use ACDSee Photo Studio Ultimate 2020 for creative graphic and image composition
-How to optimize your workflow with ACDSee Photo Studio Ultimate 2020
-How to create stunning photo manipulations with ACDSee Photo Studio Ultimate 2020
-How to organize and manage your photos with ACDSee Photo Studio Ultimate 2020
-How to enhance your photos with ACDSee Photo Studio Ultimate 2020 adjustment layers
-How to apply filters and effects with ACDSee Photo Studio Ultimate 2020
-How to edit RAW images with ACDSee Photo Studio Ultimate 2020
-How to use the color wheel in ACDSee Photo Studio Ultimate 2020
-How to find and name faces in your photos with ACDSee Photo Studio Ultimate 2020
-How to compare ACDSee Photo Studio Ultimate 2020 with other photo editing software
-How to get the latest updates and features of ACDSee Photo Studio Ultimate 2020
-How to troubleshoot common issues with ACDSee Photo Studio Ultimate 2020
-How to backup and restore your photos with ACDSee Photo Studio Ultimate 2020
-How to share your photos online with ACDSee Photo Studio Ultimate 2020

-

This feature is very useful for organizing your photos by person or family member. You can also create smart collections based on faces or use face data as metadata for sorting or filtering. You can also edit faces individually or in batches with tools like skin tune, red-eye removal, blemish removal, teeth whitening, etc.

-

HDR & Focus Stacking

-

If you want to create stunning images with high dynamic range and depth of field,ACDSee Photo Studio Ultimate 2020's HDR and focus stacking tools are perfect for you. These tools allow you to combine multiple images with different exposures or focal distances into one image with maximum detail in shadows and highlights.

-

The HDR tool lets you merge a series of images with different exposures into one image that captures the full range of light in your scene. You can also adjust parameters like brightness, contrast, saturation, tone curve, etc. to fine-tune your HDR image.

-

The focus stacking tool lets you merge a series of images with different focal distances into one image that has a greater depth of field than a single exposure would allow. You can also adjust parameters like alignment, blending mode, radius, etc. to fine-tune your focus stacked image.

-

Layered Editing

-

If you want to create complex compositions and manipulations,ACDSee Photo Studio Ultimate 2020's layered editing feature is ideal for you. This feature allows you to use layers, masks, filters, and effects to edit your images non-destructively.

-

You can add as many layers as you want to your image and apply any edit mode filter or adjustment to each layer individually. You can also blend layers with different modes like normal, multiply, screen, overlay, etc. You can also use masks to control which parts of your image are affected by each layer.

-

You can also use text layers to add captions, titles, or watermarks to your image. You can customize font, size, color, style, justification, opacity, and more for each text layer. You can also add effects like inner glow, shadows, bevel, outline, or blur to your text layers.

-

Digital Asset Management

-

If you want to manage, sort, tag, and search your photos efficiently,ACDSee Photo Studio Ultimate 2020's digital asset management feature is essential for you. This feature allows you to import, export, browse, organize, and backup your photos easily.

-```html issues, learning curve, bugs, and updates.

-
  • How to get ACDSee Photo Studio Ultimate 2020 for free with a crack?
  • -

    A: To get ACDSee Photo Studio Ultimate 2020 for free with a crack, you need to download the software from a reliable source and apply the crack file to activate the full version of the software. You can follow these steps:

    -
      -
    1. Download ACDSee Photo Studio Ultimate 2020 v13.0.1 Build 2023 Crack [Latest] from the link below.
    2. -
    3. Extract the downloaded file with WinRAR or any other file extractor.
    4. -
    5. Run the setup file and follow the installation instructions.
    6. -
    7. Do not run the software after installation.
    8. -
    9. Copy the crack file from the crack folder and paste it into the installation directory of the software.
    10. -
    11. Run the software and enjoy the full version of ACDSee Photo Studio Ultimate 2020 for free.
    12. -
    -
  • Is ACDSee Photo Studio Ultimate 2020 safe to use?
  • -

    A: ACDSee Photo Studio Ultimate 2020 is safe to use if you download it from a trusted source and scan it with a reputable antivirus program before installing it. However, using a cracked version of the software may pose some risks such as malware infection, data loss, legal issues, or performance issues. Therefore, we recommend that you use ACDSee Photo Studio Ultimate 2020 at your own risk and discretion.

    -
  • What are some alternatives to ACDSee Photo Studio Ultimate 2020?
  • -

    A: Some alternatives to ACDSee Photo Studio Ultimate 2020 are:

    - - -

    I hope you enjoyed this article and learned something new about ACDSee Photo Studio Ultimate 2020. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Stronghold Crusader Trainer v1.0.0.1 for Free and Dominate the Game.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Stronghold Crusader Trainer v1.0.0.1 for Free and Dominate the Game.md deleted file mode 100644 index 3ff4c03959807f1548fd87ed3c2d4b9cee95bd62..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Stronghold Crusader Trainer v1.0.0.1 for Free and Dominate the Game.md +++ /dev/null @@ -1,143 +0,0 @@ -
    -

    Stronghold Crusader Trainer V1.0.0.1: A Guide to Download and Use

    -

    If you are a fan of strategy games, you might have heard of Stronghold Crusader, a popular medieval-themed game that lets you build and defend your own castle against various enemies. The game offers a lot of challenges and fun, but it can also be quite difficult and frustrating at times.

    -

    stronghold crusader trainer v1.0.0.1 free download


    Download File ○○○ https://byltly.com/2uKzBP



    -

    That's why some players use trainers, which are programs that modify the game's code and give you access to various cheats and hacks that can make the game easier or more enjoyable.

    -

    In this article, we will show you how to download and use one of the best trainers for Stronghold Crusader, which is Stronghold Crusader Trainer V1.0.0.1. This trainer has many features and options that can help you conquer your enemies and build your dream castle.

    -

    So, if you are interested in learning more about this trainer, keep reading!

    -

    What is Stronghold Crusader?

    -

    Stronghold Crusader is a real-time strategy game developed by Firefly Studios and released in 2002. It is a sequel to Stronghold, which was released in 2001.

    -

    The game is set in the Middle East during the Crusades, where you can play as either a European lord or an Arabian sultan. You can choose from four historical campaigns, each with different missions and objectives.

    -

    The game also has a skirmish mode, where you can play against up to seven computer-controlled opponents or other players online.

    -

    The main goal of the game is to build a strong castle that can withstand attacks from your enemies, while also producing resources, recruiting troops, and expanding your territory.

    -

    The game has many features that make it realistic and immersive, such as weather effects, day-night cycle, fire propagation, siege engines, historical characters, and different types of units.

    -

    stronghold crusader hd trainer mrantifun
    -stronghold crusader plus 27 trainer deviance
    -stronghold crusader unlimited gold and resources cheat
    -stronghold crusader extreme cheat table
    -stronghold crusader trainer for steam version
    -stronghold crusader god mode trainer
    -stronghold crusader trainer setup.exe
    -stronghold crusader trainer with popularity cheat
    -stronghold crusader hd v1.0.1 trainer +2
    -stronghold crusader trainer for windows 10
    -stronghold crusader trainer for version 1.3
    -stronghold crusader hd and extreme latest version trainer
    -stronghold crusader trainer with invincible units
    -stronghold crusader trainer with instakill cheat
    -stronghold crusader hd (steam) 9-1-20 trainer +4
    -stronghold crusader trainer with unlimited chickens
    -stronghold crusader trainer with freeze time cheat
    -stronghold crusader trainer with stop increasing population cheat
    -stronghold crusader hd v1.0.0.1 free download full version
    -stronghold crusader hd v1.0.0.1 free download for pc
    -stronghold crusader hd v1.0.0.1 free download mega
    -stronghold crusader hd v1.0.0.1 free download torrent
    -stronghold crusader hd v1.0.0.1 free download crack
    -stronghold crusader hd v1.0.0.1 free download skidrow
    -stronghold crusader hd v1.0.0.1 free download ocean of games
    -how to install stronghold crusader hd v1.0.0.1 free download
    -how to use stronghold crusader hd v1.0.0.1 free download trainer
    -how to update stronghold crusader hd v1.0.0.1 free download
    -how to play multiplayer on stronghold crusader hd v1.0.0.1 free download
    -how to fix bugs on stronghold crusader hd v1.0.0.1 free download
    -best mods for stronghold crusader hd v1.0.0.1 free download
    -best maps for stronghold crusader hd v1.0.0.1 free download
    -best tips and tricks for stronghold crusader hd v1.0.0.1 free download
    -best cheats and hacks for stronghold crusader hd v1.0.0.1 free download
    -best strategies and guides for stronghold crusader hd v1.0.0.1 free download
    -review of stronghold crusader hd v1.0.0.1 free download
    -gameplay of stronghold crusader hd v1.0.0.1 free download
    -walkthrough of stronghold crusader hd v1.0.0.1 free download
    -comparison of stronghold crusader hd v1.0.0.1 free download and original game
    -comparison of stronghold crusader hd v1.0.0.1 free download and extreme edition

    -

    What is a trainer and why do you need it?

    -

    A trainer is a program that modifies the game's code and gives you access to various cheats and hacks that can alter the game's behavior.

    -

    Some of the common cheats and hacks that trainers offer are:

    - -

    You might need a trainer for various reasons:

    - -

    How to download Stronghold Crusader Trainer V1.0.0.1?

    -

    If you want to download Stronghold Crusader Trainer V1.0.0.1, you need to follow these steps:

    -
      -
    1. Go to this link, which is one of the reliable sources for trainers on the internet.
    2. -
    3. Scroll down until you see the attachment section with two files: Stronghold Crusader HD V1.0.1 Trainer +2 MrAntiFun.zip and Stronghold Crusader HD (Steam) Trainer Setup.exe.
    4. -
    5. If you have the Steam version of the game, download the second file; otherwise download the first file.
    6. -
    7. Save the file on your computer in a folder where you can easily find it later.
    8. -
    -

    How to install and run Stronghold Crusader Trainer V1.0.0.1?

    -

    If you have downloaded Stronghold Crusader Trainer V1.0.0.1 successfully ```html

    How to install and run Stronghold Crusader Trainer V1.0.0.1?

    -

    If you have downloaded Stronghold Crusader Trainer V1.0.0.1 successfully, you need to follow these steps:

    -
      -
    1. If you have downloaded the zip file, extract it using a program like WinRAR or 7-Zip.
    2. -
    3. If you have downloaded the exe file, run it and follow the instructions.
    4. -
    5. Copy the trainer file (Crusader10Trn.exe) to the folder where you have installed Stronghold Crusader.
    6. -
    7. Run the trainer file before you run the game.
    8. -
    9. You should see a small window with the trainer's options and hotkeys.
    10. -
    11. Run the game and enjoy the cheats!
    12. -
    -

    What are the features and options of Stronghold Crusader Trainer V1.0.0.1?

    -

    Stronghold Crusader Trainer V1.0.0.1 has many features and options that can make your game easier or more fun. Here is a list and description of them:

    - - - - - - - - - -
    FeatureDescriptionHotkey
    Unlimited resourcesYou can have unlimited amounts of food, wood, stone, iron, pitch, wheat, bread, cheese, meat, apples, beer, flour, bows, crossbows, spears, pikes, maces, swords, leather armor, metal armor, gold, etc.Q - Y
    Unlimited populationYou can have unlimited number of peasants and soldiers in your castle.F6
    Unlimited healthYou can make your units invincible or heal them instantly.F7
    Stop timeYou can pause or speed up the game's clock.M
    Happy residentsYou can make your peasants happy or unhappy.N
    God modeYou can make your units have superpowers or abilities.F8
    Power bar to the maxYou can fill up your power bar to use special abilities in Stronghold Crusader Extreme.F9
    -

    How to use Stronghold Crusader Trainer V1.0.0.1 effectively?

    -

    If you want to use Stronghold Crusader Trainer V1.0.0.1 effectively, you need to follow these tips and tricks:

    - -

    Conclusion

    -

    In conclusion, Stronghold Crusader Trainer V1.0.0.1 is a great program that can enhance your gaming experience with Stronghold Crusader. It can give you access to various cheats and hacks that can help you overcome challenges and have more fun.

    -

    If you want to download and use this trainer, you need to follow our guide carefully and make sure you get it from a reliable source. You also need to be careful and responsible when using it and avoid any problems or issues that might arise.

    -

    We hope you found this article helpful and informative. If you did, please share it with your friends and fellow gamers who might be interested in this trainer as well.

    -

    Thank you for reading and happy gaming!

    -

    Frequently Asked Questions

    -
      -
    1. Does this trainer work with other versions of Stronghold Crusader?
    2. -

      No, this trainer only works with version 1.0 of Stronghold Crusader (English). If you have a different version of the game, you need to find a different trainer that is compatible with it.

      -
    3. Does this trainer work with Stronghold Crusader HD or Extreme?
    4. -

      No, this trainer only works with Stronghold Crusader (original). If you have Stronghold Crusader HD or Extreme, you need to find a different trainer that is compatible with them.

      -
    5. Does this trainer work with mods or custom maps?
    6. -

      No, this trainer only works with the vanilla game (original). If you have any mods or custom maps installed, you need to disable them before using this trainer.

      -
    7. Is this trainer safe and virus-free?
    8. -

      We cannot guarantee that this trainer is 100% safe and virus-free, as we did not create it ourselves. However, we did scan it with several antivirus programs and found no threats or malware in it. Use it at your own risk and discretion.

      -
    9. Where can I get more trainers for Stronghold Crusader?
    10. -

      If you want more trainers for Stronghold Crusader ```html

      If you want more trainers for Stronghold Crusader, you can check out these sources:

      - -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/8211759.9137094 Gt E2250 Flash Loader 7.5.4 Csc V0.4 Lite.md b/spaces/1gistliPinn/ChatGPT4/Examples/8211759.9137094 Gt E2250 Flash Loader 7.5.4 Csc V0.4 Lite.md deleted file mode 100644 index 399a20e2172cde276fc6b88d35cda80112560b60..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/8211759.9137094 Gt E2250 Flash Loader 7.5.4 Csc V0.4 Lite.md +++ /dev/null @@ -1,8 +0,0 @@ -

      8211759.9137094 Gt E2250 Flash Loader 7.5.4 Csc V0.4 Lite


      DOWNLOAD ––– https://imgfil.com/2uxZKR



      - -8211759.9137094 Gt E2250 Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 -Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader -7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 8a78ff9644
      -
      -
      -

      diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Atoll Rf Planning Tool Cracked __TOP__.md b/spaces/1gistliPinn/ChatGPT4/Examples/Atoll Rf Planning Tool Cracked __TOP__.md deleted file mode 100644 index 5def6a26a74222cd37c224ba68c2c1870955666a..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Atoll Rf Planning Tool Cracked __TOP__.md +++ /dev/null @@ -1,24 +0,0 @@ -

      Atoll Rf Planning Tool Cracked


      Download Filehttps://imgfil.com/2uxZ7N



      -
      -die, Jr. is on april 5, 1967. other key dates include april 1, 1975, in which a new building permit is issued, and october 1, 1978, when the mia zina rf sound system is operating. office hours for building, engineering and design services are 8 a.m. to 4:30 p.m., thursday, friday and saturday. phone (702) 224-6801. _______________________________________________ - -NEW NAME FOR OFFICE IN VILLAGE LIGHT PROPERTIES - -email to: service@vllp.net - -7/5/01 - -Village Light Properties has a new name. - -from the desk of Dr. Linda Anderson - -Let’s just call her Linda, shall we? - -At a recent office staff meeting the Chief Executive Officer of the newly named non-profit Community Living Center of West Miami announced that the BISCUIT HOUSE, the non-profit facility which provides needed services and affordable housing for the homeless for over 25 years, will henceforth be known as the VILLAGE LIGHT PROPERTIES CERTIFIED INNOVATIVE RESIDENCE. For those unfamiliar with Village Light Properties, it is a non-profit Florida corporation that for over two decades has designed and built a series of elegant, state-of-the-art, affordable housing projects which incorporate sustainable practices into their design, construction and operation. Village Light Properties is dedicated to the idea of creating safe, affordable homes for homeless and at-risk residents of Miami-Dade County who have traditionally been ignored by the private and public housing sectors. - -Village Light Properties is a pioneer in the production of rental housing in the Suncoast. Founded in 1984, Village Light Properties has established a reputation for attracting innovative private capital and utilizing the highest quality design in the industry. Villages are consistently recognized as the premier destination for innovative housing in the Suncoast. The unique model Village Light Properties has developed has enabled them to move with the times and continue to provide housing for at-risk, homeless and special needs residents while simultaneously offering affordable housing at below market rents. - -In 2000, an assessment of our financial status identified that we were in dire need of an infusion of capital. Our deteriorating financial status was due to the poor quality of our financing and the fact that we were a non-profit, or at least a non-profit with not as many advantages as a for- 4fefd39f24
      -
      -
      -

      diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/CES Edupack 2013.rar.md b/spaces/1gistliPinn/ChatGPT4/Examples/CES Edupack 2013.rar.md deleted file mode 100644 index 260b6b521029b4fae3486de97d04bc8ba1fc3370..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/CES Edupack 2013.rar.md +++ /dev/null @@ -1,31 +0,0 @@ -
      -

      How to Download and Install CES EduPack 2013

      -

      CES EduPack is a software tool that helps students learn about materials and engineering design. It provides a comprehensive database of materials properties, interactive charts and graphs, case studies and exercises, and a range of teaching resources. CES EduPack is used by over 1000 universities and colleges worldwide to support courses in engineering, materials science, design, manufacturing, sustainability, and more.

      -

      CES edupack 2013.rar


      Download Ziphttps://imgfil.com/2uxZ16



      -

      In this article, we will show you how to download and install CES EduPack 2013 on your computer. CES EduPack 2013 is an older version of the software that is no longer supported by Ansys, the company that develops and distributes it. However, some instructors may still prefer to use this version for their courses. If you are looking for the latest version of CES EduPack, please visit Ansys Granta EduPack.

      -

      Step 1: Download CES EduPack 2013

      -

      The first step is to download the CES EduPack 2013 installation file from a reliable source. One such source is 4shared, a file sharing service that hosts various files uploaded by users. To download CES EduPack 2013 from 4shared, follow these steps:

      - -

      Step 2: Extract CES EduPack 2013

      -

      The second step is to extract the CES EduPack 2013 installation file from the compressed archive. A compressed archive is a file that contains one or more files that are reduced in size to save space and bandwidth. To extract CES EduPack 2013 from the compressed archive, you will need a software program that can handle .rar files, such as WinRAR or 7-Zip. To extract CES EduPack 2013 using WinRAR, follow these steps:

      - -

      Step 3: Install CES EduPack 2013

      -

      The third and final step is to install CES EduPack 2013 on your computer. To install CES EduPack 2013, follow these steps:

      -

      - -

      Congratulations! You have successfully downloaded and installed CES EduPack 2013 on your computer. You can now use it to explore materials and engineering design concepts in your courses.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Beach Buggy Racing 2 MOD APK Download - Get Unlimited Money and Unlock All Cars.md b/spaces/1phancelerku/anime-remove-background/Beach Buggy Racing 2 MOD APK Download - Get Unlimited Money and Unlock All Cars.md deleted file mode 100644 index 84412decfa09b593559bd073b73f0b8ff8c2bdb2..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Beach Buggy Racing 2 MOD APK Download - Get Unlimited Money and Unlock All Cars.md +++ /dev/null @@ -1,199 +0,0 @@ - -

      Download Beach Buggy Racing 2 Mod APK Revdl

      -

      Do you love kart racing games? Do you want to experience a thrilling and colorful adventure on a tropical island? Do you want to unlock and upgrade dozens of cars and power-ups? If you answered yes to any of these questions, then you should download Beach Buggy Racing 2 Mod APK Revdl. This is a modded version of the popular racing game Beach Buggy Racing 2 that gives you unlimited money, unlocked cars, power-ups, and more. In this article, we will tell you everything you need to know about this amazing game, how to download it from Revdl, how to play it, what are the best cars and power-ups, and what are some reviews and ratings from other players. So buckle up and get ready for some fun!

      -

      download beach buggy racing 2 mod apk revdl


      Download Ziphttps://jinyurl.com/2uNJQY



      -

      What is Beach Buggy Racing 2?

      -

      Beach Buggy Racing 2 is a 3D kart racing game developed by Vector Unit. It is the sequel to Beach Buggy Racing, which was released in 2014. The game features a variety of tracks, cars, characters, power-ups, and game modes. You can race against other players online or offline, explore a mysterious island full of secrets and surprises, compete in championships and tournaments, or create your own custom races with your own rules. The game has stunning graphics, realistic physics, catchy music, and a lighthearted atmosphere. It is suitable for all ages and skill levels.

      -

      Why download Beach Buggy Racing 2 Mod APK Revdl?

      -

      Beach Buggy Racing 2 is free to play on Android devices. However, it also contains in-app purchases that require real money. These include coins, gems, tickets, cars, power-ups, and more by winning races and ranking high in the leaderboards. You can also use tickets to enter special events that offer exclusive rewards. Championship Mode is updated regularly with new tournaments and challenges.

      -

      Race Mode

      -

      Race Mode is the classic mode where you race against other players or AI opponents on different tracks. You can choose from various settings, such as the number of laps, the difficulty level, the power-up deck, and the car type. You can also invite your friends to join you in a private race or join a public race with random players. Race Mode is a great way to test your skills and have fun with others.

      -

      Drift Attack Mode

      -

      Drift Attack Mode is the skill-based mode where you perform drifts and powerslides to earn points and bonuses. You can choose from different tracks and cars that suit your drifting style. You can also use power-ups to boost your speed, score, or time. Drift Attack Mode is a challenging and rewarding mode that requires precision and timing.

      -

      Custom Mode

      -

      Custom Mode is the creative mode where you can customize and save your own race rules and power-up decks. You can mix and match different settings, such as the track, the car, the power-ups, the laps, the difficulty, and more. You can also name and share your custom races with other players or play them yourself. Custom Mode is a fun and unique mode that lets you create your own racing experience.

      -

      What are the best cars and power-ups in Beach Buggy Racing 2?

      -

      Beach Buggy Racing 2 has a lot of cars and power-ups to choose from, each with their own stats, abilities, and effects. Some of them are better than others, depending on your preference and strategy. Here are some of the best cars and power-ups in Beach Buggy Racing 2:

      -

      Cars

      -

      The following table shows the name, image, type, speed, acceleration, handling, and special ability of each car in the game:

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -4/5 - - - - - - - - - - - - - - - - - - - - - - - - -Offensive - - - - - - - - - - - - - - - - - - - - -Drops a mine behind the car that explodes when an opponent touches it.1: Single mine.
      2: Double mine.
      3: Triple mine. - -Banana PeelDefensiveDrops a banana peel behind the car that makes opponents slip and spin out.1: Single peel.
      2: Double peel.
      3: Triple peel. - -Frost BiteOffensive - - - - - - -Attracts coins and power-ups to the car.1: Short duration.
      2: Medium duration.
      3: Long duration. - -FireworkOffensiveFires a firework that flies in a random direction and explodes on impact.1: Single firework.
      2: Double firework.
      3: Triple firework. - -Coin ShieldDefensiveCreates a shield of coins around the car that protects it from damage and power-ups.1: Small shield.
      2: Medium shield.
      3: Large shield. - -BombOffensive - - - -
      NameImageTypeSpeedAccelerationHandlingSpecial Ability
      LambiniLambiniSport5/54/54/5Nitro Boost: Increases speed for a short time.
      Baja BanditoBaja BanditoBuggy4/54/55/5Baja Blast: Launches a shockwave that knocks back nearby opponents.
      Rocket BoatRocket BoatBoat4/55/53/5Rocket Boost: Fires a rocket that propels the car forward.
      Sandstorm GTSandstorm GTMuscle5/53/5Sandstorm: Creates a sandstorm that obscures the vision of opponents behind.
      Lightning GTLightning GTElectric4/54/54/5Lightning Strike: Zaps nearby opponents with a bolt of electricity.
      Monster BusMonster BusMonster3/53/53/5Monster Crush: Crushes opponents under its huge wheels.
      Firework TruckFirework TruckTruck3/5Increases the speed of the car for a short time.1: Small boost.
      2: Medium boost.
      3: Large boost.
      Spring TrapSpring TrapDefensiveDrops a spring trap behind the car that launches opponents into the air.1: Single spring.
      2: Double spring.
      3: Triple spring.
      Lightning ZapLightning ZapOffensiveZaps the opponent in front of the car with a bolt of lightning that damages and slows them down.1: Single zap.
      2: Double zap.
      3: Triple zap.
      Mine DropMine DropDefensive
      Banana Peel
      Frost BiteFreezes the opponent in front of the car with a blast of ice that damages and stops them.1: Single freeze.
      2: Double freeze.
      3: Triple freeze.
      MagnetMagnetDefensive
      Firework
      Coin Shield
      BombThrows a bomb that explodes after a few seconds and damages nearby opponents.1: Small bomb.
      2: Medium bomb.
      3: Large bomb.
      -

      As you can see, there are many power-ups to choose from, each with their own effects and upgrades. You can try them all and find the ones that suit your style and strategy. You can also mix and match them to create your own power-up deck.

      -

      download beach buggy racing 2 mod apk unlimited money
      -download beach buggy racing 2 mod apk latest version
      -download beach buggy racing 2 mod apk android 1
      -download beach buggy racing 2 mod apk rexdl
      -download beach buggy racing 2 mod apk offline
      -download beach buggy racing 2 mod apk hack
      -download beach buggy racing 2 mod apk free shopping
      -download beach buggy racing 2 mod apk for pc
      -download beach buggy racing 2 mod apk obb
      -download beach buggy racing 2 mod apk data
      -download beach buggy racing 2 mod apk pure
      -download beach buggy racing 2 mod apk happymod
      -download beach buggy racing 2 mod apk no ads
      -download beach buggy racing 2 mod apk all cars unlocked
      -download beach buggy racing 2 mod apk andropalace
      -download beach buggy racing 2 mod apk apkpure
      -download beach buggy racing 2 mod apk an1
      -download beach buggy racing 2 mod apk android oyun club
      -download beach buggy racing 2 mod apk by revdl
      -download beach buggy racing 2 mod apk blackmod
      -download beach buggy racing 2 mod apk bluestacks
      -download beach buggy racing 2 mod apk bestmodapk.com
      -download beach buggy racing 2 mod apk cheat
      -download beach buggy racing 2 mod apk coins and gems
      -download beach buggy racing 2 mod apk cracked
      -download beach buggy racing 2 mod apk clubapk.com
      -download beach buggy racing 2 mod apk diamond
      -download beach buggy racing 2 mod apk direct link
      -download beach buggy racing 2 mod apk dlandroid.com
      -download beach buggy racing 2 mod apk easydownloadz.com
      -download beach buggy racing 2 mod apk everything unlocked
      -download beach buggy racing 2 mod apk full version
      -download beach buggy racing 2 mod apk file
      -download beach buggy racing 2 mod apk from apkmody.io
      -download beach buggy racing 2 mod apk gamestechy.com
      -download beach buggy racing 2 mod apk gems and coins generator online tool hack cheat unlimited resources free no survey no human verification no password no jailbreak no root required android ios pc windows mac xbox ps4 switch nintendo device mobile phone tablet laptop desktop computer smart tv console gaming system handheld device emulator simulator controller joystick keyboard mouse touch screen vr headset ar glasses wearable device smart watch fitness tracker bluetooth speaker wireless earbuds headphones microphone webcam camera projector printer scanner fax machine copier shredder laminator calculator calendar clock timer stopwatch alarm reminder note memo voice recorder music player video player podcast player radio player streaming player media player dvd player blu-ray player cd player cassette player vinyl record player mp3 player mp4 player flac player wav player ogg player wma player aac player m4a player midi player karaoke machine jukebox boombox stereo system surround sound system home theater system sound bar subwoofer speaker amplifier equalizer mixer turntable dj controller guitar hero rock band dance dance revolution just dance singstar guitar tuner metronome piano keyboard synthesizer drum machine sampler sequencer beat maker loop station vocoder autotune pitch correction noise cancellation noise reduction noise gate compressor limiter reverb delay echo chorus flanger phaser tremolo vibrato distortion overdrive fuzz wah-wah pedal volume pedal expression pedal sustain pedal octave pedal harmonizer pedal looper pedal multi-effects pedal guitar amp bass amp acoustic amp keyboard amp drum amp pa system microphone stand pop filter shock mount windscreen cable adapter splitter converter connector jack plug socket outlet power strip surge protector extension cord battery charger power bank solar panel generator flashlight lantern torch lamp light bulb led cfl halogen incandescent fluorescent neon laser infrared ultraviolet x-ray gamma ray radio wave microwave radar sonar lidar ultrasound echolocation doppler effect sound wave frequency wavelength amplitude modulation demodulation encoding decoding encryption decryption compression decompression zip rar tar gz bz2 iso dmg exe msi bat sh cmd ps1 py rb js php html css xml json csv tsv sql db mdb accdb xls xlsx ppt pptx doc docx pdf txt rtf odt ods odp epub mobi azw3 djvu cbz cbr epub mobi azw3 djvu cbz cbr jpg jpeg png gif bmp tiff webp svg eps psd ai cdr dwg dxf stl obj fbx gltf usdz dae ply pcd pnm pgm ppm pbm pcx xbm xpm dds tga hdr exr raw nef crw cr2 arw dng heic heif webm mp4 mov avi mkv flv wmv mpg mpeg vob mts m4v mxf asf rm rmvb swf ogv f4v f4

      -

      What are some reviews and ratings of Beach Buggy Racing 2?

      -

      Beach Buggy Racing 2 is a highly rated and well-reviewed game by players and critics alike. It has over 50 million downloads and 4.4 stars out of 5 on Google Play Store. It also has positive feedback on other platforms, such as App Store, Amazon, and Steam. Here are some of the reviews and ratings of Beach Buggy Racing 2:

      -

      Positive Reviews

      -

      Here are some of the quotes from players and critics who have praised the game for its graphics, gameplay, variety, and fun factor:

      - -

      Negative Reviews

      -

      Here are some of the quotes from players and critics who have criticized the game for its ads, bugs, difficulty, and repetition:

      - -

      Conclusion

      -

      Beach Buggy Racing 2 is a fun and exciting kart racing game that offers a lot of features and content for players of all ages and skill levels. You can download Beach Buggy Racing 2 Mod APK Revdl to enjoy the game without any limitations or restrictions. You can unlock and upgrade all the cars and power-ups in the game, and use them to race against other players or AI opponents on different tracks and modes. You can also explore a mysterious island full of secrets and surprises, compete in championships and tournaments, or create your own custom races with your own rules. Beach Buggy Racing 2 is a game that will keep you entertained for hours with its graphics, gameplay, variety, and fun factor. If you are looking for a kart racing game that has it all, then you should download Beach Buggy Racing 2 Mod APK Revdl today!

      -

      FAQs

      -

      Here are some of the frequently asked questions about Beach Buggy Racing 2 and Beach Buggy Racing 2 Mod APK Revdl:

      -
        -
      1. Is Beach Buggy Racing 2 Mod APK Revdl safe to download and install?
      2. -

        Yes, Beach Buggy Racing 2 Mod APK Revdl is safe to download and install. It does not contain any viruses, malware, or spyware that can harm your device or data. However, you should always download it from a trusted and reliable source, such as Revdl. You should also enable the option to install apps from unknown sources on your device settings before installing it.

        -
      3. Is Beach Buggy Racing 2 Mod APK Revdl compatible with my device?
      4. -

        Beach Buggy Racing 2 Mod APK Revdl is compatible with most Android devices that have Android 4.4 or higher. However, some devices may have different specifications or performance issues that may affect the gameplay. You should check the minimum requirements and compatibility of the game before downloading and installing it.

        -
      5. How can I update Beach Buggy Racing 2 Mod APK Revdl?
      6. -

        Beach Buggy Racing 2 Mod APK Revdl is updated regularly with new features and content. You can check the latest version of the game on the Revdl website or on the game's official social media pages. You can also enable the option to auto-update apps on your device settings. However, you may need to uninstall and reinstall the modded version of the game every time there is a new update.

        -
      7. How can I contact the developers of Beach Buggy Racing 2?
      8. -

        If you have any questions, feedback, suggestions, or issues regarding Beach Buggy Racing 2, you can contact the developers of the game by email at support@vectorunit.com. You can also visit their website at https://www.vectorunit.com/ or follow them on Facebook, Twitter, Instagram, YouTube, or Discord.

        -
      9. How can I support the developers of Beach Buggy Racing 2?
      10. -

        If you enjoy playing Beach Buggy Racing 2 and want to support the developers of the game, you can do so by purchasing coins, gems, tickets, cars, power-ups, and more in the game. You can also rate and review the game on Google Play Store or other platforms, share it with your friends and family, or follow them on social media.

        -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/4Taps/SadTalker/src/facerender/modules/dense_motion.py b/spaces/4Taps/SadTalker/src/facerender/modules/dense_motion.py deleted file mode 100644 index 30c13060be8e82979771514b4ec51e5de23f49fa..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/facerender/modules/dense_motion.py +++ /dev/null @@ -1,117 +0,0 @@ -from torch import nn -import torch.nn.functional as F -import torch -from src.facerender.modules.util import Hourglass, make_coordinate_grid, kp2gaussian - -from src.facerender.sync_batchnorm import SynchronizedBatchNorm3d as BatchNorm3d - - -class DenseMotionNetwork(nn.Module): - """ - Module that predicting a dense motion from sparse motion representation given by kp_source and kp_driving - """ - - def __init__(self, block_expansion, num_blocks, max_features, num_kp, feature_channel, reshape_depth, compress, - estimate_occlusion_map=False): - super(DenseMotionNetwork, self).__init__() - # self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(feature_channel+1), max_features=max_features, num_blocks=num_blocks) - self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(compress+1), max_features=max_features, num_blocks=num_blocks) - - self.mask = nn.Conv3d(self.hourglass.out_filters, num_kp + 1, kernel_size=7, padding=3) - - self.compress = nn.Conv3d(feature_channel, compress, kernel_size=1) - self.norm = BatchNorm3d(compress, affine=True) - - if estimate_occlusion_map: - # self.occlusion = nn.Conv2d(reshape_channel*reshape_depth, 1, kernel_size=7, padding=3) - self.occlusion = nn.Conv2d(self.hourglass.out_filters*reshape_depth, 1, kernel_size=7, padding=3) - else: - self.occlusion = None - - self.num_kp = num_kp - - - def create_sparse_motions(self, feature, kp_driving, kp_source): - bs, _, d, h, w = feature.shape - identity_grid = make_coordinate_grid((d, h, w), type=kp_source['value'].type()) - identity_grid = identity_grid.view(1, 1, d, h, w, 3) - coordinate_grid = identity_grid - kp_driving['value'].view(bs, self.num_kp, 1, 1, 1, 3) - - # if 'jacobian' in kp_driving: - if 'jacobian' in kp_driving and kp_driving['jacobian'] is not None: - jacobian = torch.matmul(kp_source['jacobian'], torch.inverse(kp_driving['jacobian'])) - jacobian = jacobian.unsqueeze(-3).unsqueeze(-3).unsqueeze(-3) - jacobian = jacobian.repeat(1, 1, d, h, w, 1, 1) - coordinate_grid = torch.matmul(jacobian, coordinate_grid.unsqueeze(-1)) - coordinate_grid = coordinate_grid.squeeze(-1) - - - driving_to_source = coordinate_grid + kp_source['value'].view(bs, self.num_kp, 1, 1, 1, 3) # (bs, num_kp, d, h, w, 3) - - #adding background feature - identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1, 1) - sparse_motions = torch.cat([identity_grid, driving_to_source], dim=1) #bs num_kp+1 d h w 3 - - # sparse_motions = driving_to_source - - return sparse_motions - - def create_deformed_feature(self, feature, sparse_motions): - bs, _, d, h, w = feature.shape - feature_repeat = feature.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp+1, 1, 1, 1, 1, 1) # (bs, num_kp+1, 1, c, d, h, w) - feature_repeat = feature_repeat.view(bs * (self.num_kp+1), -1, d, h, w) # (bs*(num_kp+1), c, d, h, w) - sparse_motions = sparse_motions.view((bs * (self.num_kp+1), d, h, w, -1)) # (bs*(num_kp+1), d, h, w, 3) !!!! - sparse_deformed = F.grid_sample(feature_repeat, sparse_motions) - sparse_deformed = sparse_deformed.view((bs, self.num_kp+1, -1, d, h, w)) # (bs, num_kp+1, c, d, h, w) - return sparse_deformed - - def create_heatmap_representations(self, feature, kp_driving, kp_source): - spatial_size = feature.shape[3:] - gaussian_driving = kp2gaussian(kp_driving, spatial_size=spatial_size, kp_variance=0.01) - gaussian_source = kp2gaussian(kp_source, spatial_size=spatial_size, kp_variance=0.01) - heatmap = gaussian_driving - gaussian_source - - # adding background feature - zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1], spatial_size[2]).type(heatmap.type()) - heatmap = torch.cat([zeros, heatmap], dim=1) - heatmap = heatmap.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w) - return heatmap - - def forward(self, feature, kp_driving, kp_source): - bs, _, d, h, w = feature.shape - - feature = self.compress(feature) - feature = self.norm(feature) - feature = F.relu(feature) - - out_dict = dict() - sparse_motion = self.create_sparse_motions(feature, kp_driving, kp_source) - deformed_feature = self.create_deformed_feature(feature, sparse_motion) - - heatmap = self.create_heatmap_representations(deformed_feature, kp_driving, kp_source) - - input_ = torch.cat([heatmap, deformed_feature], dim=2) - input_ = input_.view(bs, -1, d, h, w) - - # input = deformed_feature.view(bs, -1, d, h, w) # (bs, num_kp+1 * c, d, h, w) - - prediction = self.hourglass(input_) - - - mask = self.mask(prediction) - mask = F.softmax(mask, dim=1) - out_dict['mask'] = mask - mask = mask.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w) - sparse_motion = sparse_motion.permute(0, 1, 5, 2, 3, 4) # (bs, num_kp+1, 3, d, h, w) - deformation = (sparse_motion * mask).sum(dim=1) # (bs, 3, d, h, w) - deformation = deformation.permute(0, 2, 3, 4, 1) # (bs, d, h, w, 3) - - out_dict['deformation'] = deformation - - if self.occlusion: - bs, c, d, h, w = prediction.shape - prediction = prediction.view(bs, -1, h, w) - occlusion_map = torch.sigmoid(self.occlusion(prediction)) - out_dict['occlusion_map'] = occlusion_map - - return out_dict diff --git a/spaces/52Hz/SRMNet_real_world_denoising/model/SRMNet.py b/spaces/52Hz/SRMNet_real_world_denoising/model/SRMNet.py deleted file mode 100644 index 5e6aa50258f82a9cc14aec6b787c1f0493a39828..0000000000000000000000000000000000000000 --- a/spaces/52Hz/SRMNet_real_world_denoising/model/SRMNet.py +++ /dev/null @@ -1,227 +0,0 @@ -import torch -import torch.nn as nn - -##---------- Basic Layers ---------- -def conv3x3(in_chn, out_chn, bias=True): - layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias) - return layer - -def conv(in_channels, out_channels, kernel_size, bias=False, stride=1): - return nn.Conv2d( - in_channels, out_channels, kernel_size, - padding=(kernel_size // 2), bias=bias, stride=stride) - -def bili_resize(factor): - return nn.Upsample(scale_factor=factor, mode='bilinear', align_corners=False) - -##---------- Basic Blocks ---------- -class UNetConvBlock(nn.Module): - def __init__(self, in_size, out_size, downsample): - super(UNetConvBlock, self).__init__() - self.downsample = downsample - self.block = SK_RDB(in_channels=in_size, growth_rate=out_size, num_layers=3) - if downsample: - self.downsample = PS_down(out_size, out_size, downscale=2) - - def forward(self, x): - out = self.block(x) - if self.downsample: - out_down = self.downsample(out) - return out_down, out - else: - return out - -class UNetUpBlock(nn.Module): - def __init__(self, in_size, out_size): - super(UNetUpBlock, self).__init__() - # self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, bias=True) - self.up = PS_up(in_size, out_size, upscale=2) - self.conv_block = UNetConvBlock(in_size, out_size, False) - - def forward(self, x, bridge): - up = self.up(x) - out = torch.cat([up, bridge], dim=1) - out = self.conv_block(out) - return out - -##---------- Resizing Modules (Pixel(Un)Shuffle) ---------- -class PS_down(nn.Module): - def __init__(self, in_size, out_size, downscale): - super(PS_down, self).__init__() - self.UnPS = nn.PixelUnshuffle(downscale) - self.conv1 = nn.Conv2d((downscale**2) * in_size, out_size, 1, 1, 0) - - def forward(self, x): - x = self.UnPS(x) # h/2, w/2, 4*c - x = self.conv1(x) - return x - -class PS_up(nn.Module): - def __init__(self, in_size, out_size, upscale): - super(PS_up, self).__init__() - - self.PS = nn.PixelShuffle(upscale) - self.conv1 = nn.Conv2d(in_size//(upscale**2), out_size, 1, 1, 0) - - def forward(self, x): - x = self.PS(x) # h/2, w/2, 4*c - x = self.conv1(x) - return x - -##---------- Selective Kernel Feature Fusion (SKFF) ---------- -class SKFF(nn.Module): - def __init__(self, in_channels, height=3, reduction=8, bias=False): - super(SKFF, self).__init__() - - self.height = height - d = max(int(in_channels / reduction), 4) - - self.avg_pool = nn.AdaptiveAvgPool2d(1) - self.conv_du = nn.Sequential(nn.Conv2d(in_channels, d, 1, padding=0, bias=bias), nn.PReLU()) - - self.fcs = nn.ModuleList([]) - for i in range(self.height): - self.fcs.append(nn.Conv2d(d, in_channels, kernel_size=1, stride=1, bias=bias)) - - self.softmax = nn.Softmax(dim=1) - - def forward(self, inp_feats): - batch_size, n_feats, H, W = inp_feats[1].shape - - inp_feats = torch.cat(inp_feats, dim=1) - inp_feats = inp_feats.view(batch_size, self.height, n_feats, inp_feats.shape[2], inp_feats.shape[3]) - - feats_U = torch.sum(inp_feats, dim=1) - feats_S = self.avg_pool(feats_U) - feats_Z = self.conv_du(feats_S) - - attention_vectors = [fc(feats_Z) for fc in self.fcs] - attention_vectors = torch.cat(attention_vectors, dim=1) - attention_vectors = attention_vectors.view(batch_size, self.height, n_feats, 1, 1) - - attention_vectors = self.softmax(attention_vectors) - feats_V = torch.sum(inp_feats * attention_vectors, dim=1) - - return feats_V - -##---------- Dense Block ---------- -class DenseLayer(nn.Module): - def __init__(self, in_channels, out_channels, I): - super(DenseLayer, self).__init__() - self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=3 // 2) - self.relu = nn.ReLU(inplace=True) - self.sk = SKFF(out_channels, height=2, reduction=8, bias=False) - - def forward(self, x): - x1 = self.relu(self.conv(x)) - # output = torch.cat([x, x1], 1) # -> RDB - output = self.sk((x, x1)) - return output - -##---------- Selective Kernel Residual Dense Block (SK-RDB) ---------- -class SK_RDB(nn.Module): - def __init__(self, in_channels, growth_rate, num_layers): - super(SK_RDB, self).__init__() - self.identity = nn.Conv2d(in_channels, growth_rate, 1, 1, 0) - self.layers = nn.Sequential( - *[DenseLayer(in_channels, in_channels, I=i) for i in range(num_layers)] - ) - self.lff = nn.Conv2d(in_channels, growth_rate, kernel_size=1) - - def forward(self, x): - res = self.identity(x) - x = self.layers(x) - x = self.lff(x) - return res + x - -##---------- testNet ---------- -class SRMNet(nn.Module): - def __init__(self, in_chn=3, wf=96, depth=4): - super(SRMNet, self).__init__() - self.depth = depth - self.down_path = nn.ModuleList() - self.bili_down = bili_resize(0.5) - self.conv_01 = nn.Conv2d(in_chn, wf, 3, 1, 1) - - # encoder of UNet - prev_channels = 0 - for i in range(depth): # 0,1,2,3 - downsample = True if (i + 1) < depth else False - self.down_path.append(UNetConvBlock(prev_channels + wf, (2 ** i) * wf, downsample)) - prev_channels = (2 ** i) * wf - - # decoder of UNet - self.up_path = nn.ModuleList() - self.skip_conv = nn.ModuleList() - self.conv_up = nn.ModuleList() - self.bottom_conv = nn.Conv2d(prev_channels, wf, 3, 1, 1) - self.bottom_up = bili_resize(2 ** (depth-1)) - - for i in reversed(range(depth - 1)): - self.up_path.append(UNetUpBlock(prev_channels, (2 ** i) * wf)) - self.skip_conv.append(nn.Conv2d((2 ** i) * wf, (2 ** i) * wf, 3, 1, 1)) - self.conv_up.append(nn.Sequential(*[nn.Conv2d((2 ** i) * wf, wf, 3, 1, 1), bili_resize(2 ** i)])) - prev_channels = (2 ** i) * wf - - self.final_ff = SKFF(in_channels=wf, height=depth) - self.last = conv3x3(prev_channels, in_chn, bias=True) - - def forward(self, x): - img = x - scale_img = img - - ##### shallow conv ##### - x1 = self.conv_01(img) - encs = [] - ######## UNet ######## - # Down-path (Encoder) - for i, down in enumerate(self.down_path): - if i == 0: - x1, x1_up = down(x1) - encs.append(x1_up) - elif (i + 1) < self.depth: - scale_img = self.bili_down(scale_img) - left_bar = self.conv_01(scale_img) - x1 = torch.cat([x1, left_bar], dim=1) - x1, x1_up = down(x1) - encs.append(x1_up) - else: - scale_img = self.bili_down(scale_img) - left_bar = self.conv_01(scale_img) - x1 = torch.cat([x1, left_bar], dim=1) - x1 = down(x1) - - # Up-path (Decoder) - ms_result = [self.bottom_up(self.bottom_conv(x1))] - for i, up in enumerate(self.up_path): - x1 = up(x1, self.skip_conv[i](encs[-i - 1])) - ms_result.append(self.conv_up[i](x1)) - - # Multi-scale selective feature fusion - msff_result = self.final_ff(ms_result) - - ##### Reconstruct ##### - out_1 = self.last(msff_result) + img - - return out_1 - - -if __name__ == "__main__": - from thop import profile - - input = torch.ones(1, 3, 256, 256, dtype=torch.float, requires_grad=False) - model = SRMNet(in_chn=3, wf=96, depth=4) - out = model(input) - flops, params = profile(model, inputs=(input,)) - total = sum(p.numel() for p in model.parameters()) - - # RDBlayer = SK_RDB(in_channels=64, growth_rate=64, num_layers=3) - # print(RDBlayer) - # out = RDBlayer(input) - # flops, params = profile(RDBlayer, inputs=(input,)) - - print('input shape:', input.shape) - print('output shape', out.shape) - print("-----------------------------------") - print("Total params: %.4f M" % (total / 1e6)) - print("Total params: %.4f G" % (flops / 1e9)) diff --git a/spaces/801artistry/RVC801/Applio-RVC-Fork/utils/backups_test.py b/spaces/801artistry/RVC801/Applio-RVC-Fork/utils/backups_test.py deleted file mode 100644 index f3edf15811b5035ee82f21e54e87b7e87ce413eb..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/Applio-RVC-Fork/utils/backups_test.py +++ /dev/null @@ -1,138 +0,0 @@ - -import os -import shutil -import hashlib -import time - -LOGS_FOLDER = '/content/Applio-RVC-Fork/logs' -WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights' -GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup' - -def import_google_drive_backup(): - print("Importing Google Drive backup...") - GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup' # change this to your Google Drive path - LOGS_FOLDER = '/content/Applio-RVC-Fork/logs' - WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights' - weights_exist = False - files_to_copy = [] - weights_to_copy = [] - - def handle_files(root, files, is_weight_files=False): - for filename in files: - filepath = os.path.join(root, filename) - if filename.endswith('.pth') and is_weight_files: - weights_exist = True - backup_filepath = os.path.join(WEIGHTS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH)) - else: - backup_filepath = os.path.join(LOGS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH)) - backup_folderpath = os.path.dirname(backup_filepath) - if not os.path.exists(backup_folderpath): - os.makedirs(backup_folderpath) - print(f'Created folder: {backup_folderpath}', flush=True) - if is_weight_files: - weights_to_copy.append((filepath, backup_filepath)) - else: - files_to_copy.append((filepath, backup_filepath)) - - for root, dirs, files in os.walk(os.path.join(GOOGLE_DRIVE_PATH, 'logs')): - handle_files(root, files) - - for root, dirs, files in os.walk(os.path.join(GOOGLE_DRIVE_PATH, 'weights')): - handle_files(root, files, True) - - # Copy files in batches - total_files = len(files_to_copy) - start_time = time.time() - for i, (source, dest) in enumerate(files_to_copy, start=1): - with open(source, 'rb') as src, open(dest, 'wb') as dst: - shutil.copyfileobj(src, dst, 1024*1024) # 1MB buffer size - # Report progress every 5 seconds or after every 100 files, whichever is less frequent - if time.time() - start_time > 5 or i % 100 == 0: - print(f'\rCopying file {i} of {total_files} ({i * 100 / total_files:.2f}%)', end="") - start_time = time.time() - print(f'\nImported {len(files_to_copy)} files from Google Drive backup') - - # Copy weights in batches - total_weights = len(weights_to_copy) - start_time = time.time() - for i, (source, dest) in enumerate(weights_to_copy, start=1): - with open(source, 'rb') as src, open(dest, 'wb') as dst: - shutil.copyfileobj(src, dst, 1024*1024) # 1MB buffer size - # Report progress every 5 seconds or after every 100 files, whichever is less frequent - if time.time() - start_time > 5 or i % 100 == 0: - print(f'\rCopying weight file {i} of {total_weights} ({i * 100 / total_weights:.2f}%)', end="") - start_time = time.time() - if weights_exist: - print(f'\nImported {len(weights_to_copy)} weight files') - print("Copied weights from Google Drive backup to local weights folder.") - else: - print("\nNo weights found in Google Drive backup.") - print("Google Drive backup import completed.") - -def backup_files(): - print("\n Starting backup loop...") - last_backup_timestamps_path = os.path.join(LOGS_FOLDER, 'last_backup_timestamps.txt') - fully_updated = False # boolean to track if all files are up to date - try: - with open(last_backup_timestamps_path, 'r') as f: - last_backup_timestamps = dict(line.strip().split(':') for line in f) - except: - last_backup_timestamps = {} - - while True: - updated = False - files_to_copy = [] - files_to_delete = [] - - for root, dirs, files in os.walk(LOGS_FOLDER): - for filename in files: - if filename != 'last_backup_timestamps.txt': - filepath = os.path.join(root, filename) - if os.path.isfile(filepath): - backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER)) - backup_folderpath = os.path.dirname(backup_filepath) - - if not os.path.exists(backup_folderpath): - os.makedirs(backup_folderpath) - print(f'Created backup folder: {backup_folderpath}', flush=True) - - # check if file has changed since last backup - last_backup_timestamp = last_backup_timestamps.get(filepath) - current_timestamp = os.path.getmtime(filepath) - if last_backup_timestamp is None or float(last_backup_timestamp) < current_timestamp: - files_to_copy.append((filepath, backup_filepath)) # add to list of files to copy - last_backup_timestamps[filepath] = str(current_timestamp) # update last backup timestamp - updated = True - fully_updated = False # if a file is updated, all files are not up to date - - # check if any files were deleted in Colab and delete them from the backup drive - for filepath in list(last_backup_timestamps.keys()): - if not os.path.exists(filepath): - backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER)) - if os.path.exists(backup_filepath): - files_to_delete.append(backup_filepath) # add to list of files to delete - del last_backup_timestamps[filepath] - updated = True - fully_updated = False # if a file is deleted, all files are not up to date - - # Copy files in batches - if files_to_copy: - for source, dest in files_to_copy: - shutil.copy2(source, dest) - print(f'Copied or updated {len(files_to_copy)} files') - - # Delete files in batches - if files_to_delete: - for file in files_to_delete: - os.remove(file) - print(f'Deleted {len(files_to_delete)} files') - - if not updated and not fully_updated: - print("Files are up to date.") - fully_updated = True # if all files are up to date, set the boolean to True - copy_weights_folder_to_drive() - - with open(last_backup_timestamps_path, 'w') as f: - for filepath, timestamp in last_backup_timestamps.items(): - f.write(f'{filepath}:{timestamp}\n') - time.sleep(15) # wait for 15 seconds before checking again diff --git a/spaces/A-Celsius/Caption-Generator/app.py b/spaces/A-Celsius/Caption-Generator/app.py deleted file mode 100644 index b38134c7396f58dad120de68b7aa0f97052e9fd6..0000000000000000000000000000000000000000 --- a/spaces/A-Celsius/Caption-Generator/app.py +++ /dev/null @@ -1,48 +0,0 @@ -from PIL import Image -from transformers import BlipProcessor, BlipForConditionalGeneration -import torch -import gradio as gr - -model_name = "Salesforce/blip-image-captioning-base" - -caption_processor = BlipProcessor.from_pretrained(model_name) -model = BlipForConditionalGeneration.from_pretrained(model_name) - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -model.to(device) - -def generate_captions(image, num_captions=5,size=(512, 512)): - image = image.resize(size) - if image.mode != 'RGB': - image = image.convert('RGB') - pixel_values = caption_processor(image, return_tensors='pt').to(device) - - caption_ids = model.generate( - **pixel_values, - max_length=30, - num_beams=5, - num_return_sequences=num_captions, - temperature=1.0 - ) - - captions = [ - caption_processor.decode(ids, skip_special_tokens=True) - for ids in caption_ids - ] - - return captions - -from gradio.components import Image, Textbox,Slider - -interface = gr.Interface( - fn=generate_captions, - inputs=[ - Image(type="pil", label="Input Image"), - Slider(minimum=1, maximum=5, step=1, label="Number of Captions") - ], - outputs=Textbox(type="text", label="Captions"), - title="Image Caption Generator", - description="AI tool that creates captions based on the image provided by the user.", -) - -interface.launch() \ No newline at end of file diff --git a/spaces/AHzizi/WaifuVoiceGen/commons.py b/spaces/AHzizi/WaifuVoiceGen/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/AHzizi/WaifuVoiceGen/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/AIFILMS/generate_human_motion/VQ-Trans/utils/config.py b/spaces/AIFILMS/generate_human_motion/VQ-Trans/utils/config.py deleted file mode 100644 index 091d790e963959c326917688ee267e6a4ec136d1..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/generate_human_motion/VQ-Trans/utils/config.py +++ /dev/null @@ -1,17 +0,0 @@ -import os - -SMPL_DATA_PATH = "./body_models/smpl" - -SMPL_KINTREE_PATH = os.path.join(SMPL_DATA_PATH, "kintree_table.pkl") -SMPL_MODEL_PATH = os.path.join(SMPL_DATA_PATH, "SMPL_NEUTRAL.pkl") -JOINT_REGRESSOR_TRAIN_EXTRA = os.path.join(SMPL_DATA_PATH, 'J_regressor_extra.npy') - -ROT_CONVENTION_TO_ROT_NUMBER = { - 'legacy': 23, - 'no_hands': 21, - 'full_hands': 51, - 'mitten_hands': 33, -} - -GENDERS = ['neutral', 'male', 'female'] -NUM_BETAS = 10 \ No newline at end of file diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/contperceptual_dis.py b/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/contperceptual_dis.py deleted file mode 100644 index be47add559612aaf8c667afd554d88e23fd8fd56..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/contperceptual_dis.py +++ /dev/null @@ -1,137 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -import sys - -sys.path.insert(0, '.') # nopep8 -from ldm.modules.losses_audio.vqperceptual import * -from ldm.modules.discriminator.multi_window_disc import Discriminator - -class LPAPSWithDiscriminator(nn.Module):# 相比于contperceptual.py添加了MultiWindowDiscriminator - def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0, - disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, - perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, - disc_loss="hinge"): - - super().__init__() - assert disc_loss in ["hinge", "vanilla"] - self.kl_weight = kl_weight - self.pixel_weight = pixelloss_weight - self.perceptual_loss = LPAPS().eval() - self.perceptual_weight = perceptual_weight - # output log variance - self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init) - - self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, - n_layers=disc_num_layers, - use_actnorm=use_actnorm, - ).apply(weights_init) - self.discriminator_iter_start = disc_start - if disc_loss == "hinge": - self.disc_loss = hinge_d_loss - elif disc_loss == "vanilla": - self.disc_loss = vanilla_d_loss - else: - raise ValueError(f"Unknown GAN loss '{disc_loss}'.") - print(f"LPAPSWithDiscriminator running with {disc_loss} loss.") - self.disc_factor = disc_factor - self.discriminator_weight = disc_weight - self.disc_conditional = disc_conditional - - disc_win_num = 3 - mel_disc_hidden_size = 128 - self.discriminator_multi = Discriminator(time_lengths=[32, 64, 128][:disc_win_num], - freq_length=80, hidden_size=mel_disc_hidden_size, kernel=(3, 3), - cond_size=0, norm_type="in", reduction="stack") - - def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): - if last_layer is not None: - nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] - g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] - else: - nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] - g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] - - d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) - d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() - d_weight = d_weight * self.discriminator_weight - return d_weight - - def forward(self, inputs, reconstructions, posteriors, optimizer_idx, - global_step, last_layer=None, cond=None, split="train", weights=None): - rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) - if self.perceptual_weight > 0: - p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) - rec_loss = rec_loss + self.perceptual_weight * p_loss - else: - p_loss = torch.tensor([0.0]) - - nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar - weighted_nll_loss = nll_loss - if weights is not None: - weighted_nll_loss = weights*nll_loss - weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0] - nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] - kl_loss = posteriors.kl() - kl_loss = torch.sum(kl_loss) / kl_loss.shape[0] - - # now the GAN part - if optimizer_idx == 0: - # generator update - if cond is None: - assert not self.disc_conditional - logits_fake = self.discriminator(reconstructions.contiguous()) - else: - assert self.disc_conditional - logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) - - logits_fake_multi = self.discriminator_multi(reconstructions.contiguous().squeeze(1).transpose(1, 2)) - - g_loss = -torch.mean(logits_fake) - g_loss_multi = -torch.mean(logits_fake_multi['y']) - - try: - d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) - d_weight_multi = self.calculate_adaptive_weight(nll_loss, g_loss_multi, last_layer=last_layer) - except RuntimeError: - assert not self.training - d_weight = d_weight_multi = torch.tensor(0.0) - - disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) - loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss + d_weight_multi * disc_factor * g_loss_multi - - log = {"{}/total_loss".format(split): loss.clone().detach().mean(), - "{}/logvar".format(split): self.logvar.detach(), - "{}/kl_loss".format(split): kl_loss.detach().mean(), - "{}/nll_loss".format(split): nll_loss.detach().mean(), - "{}/rec_loss".format(split): rec_loss.detach().mean(), - "{}/d_weight".format(split): d_weight.detach(), - "{}/disc_factor".format(split): torch.tensor(disc_factor), - "{}/g_loss".format(split): g_loss.detach().mean(), - "{}/g_loss_multi".format(split): g_loss_multi.detach().mean(), - } - return loss, log - - if optimizer_idx == 1: - # second pass for discriminator update - if cond is None: - logits_real = self.discriminator(inputs.contiguous().detach()) - logits_fake = self.discriminator(reconstructions.contiguous().detach()) - else: - logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) - logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) - - logits_real_multi = self.discriminator_multi(inputs.contiguous().detach().squeeze(1).transpose(1, 2)) - logits_fake_multi = self.discriminator_multi(reconstructions.contiguous().detach().squeeze(1).transpose(1, 2)) - - disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) - d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) - d_loss_multi = disc_factor * self.disc_loss(logits_real_multi['y'], logits_fake_multi['y']) - - log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), - "{}/disc_loss_multi".format(split): d_loss_multi.clone().detach().mean(), - "{}/logits_real".format(split): logits_real.detach().mean(), - "{}/logits_fake".format(split): logits_fake.detach().mean() - } - return d_loss+d_loss_multi, log - diff --git a/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/open_clap/factory.py b/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/open_clap/factory.py deleted file mode 100644 index 3c3b28658adb03462b9c4b5405548d4e0d1edc5e..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/open_clap/factory.py +++ /dev/null @@ -1,257 +0,0 @@ -import json -import logging -import os -import pathlib -import re -from copy import deepcopy -from pathlib import Path - -import torch - -from .model import CLAP, convert_weights_to_fp16 -from .openai import load_openai_model -from .pretrained import get_pretrained_url, download_pretrained -from .transform import image_transform - -_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"] -_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs - - -def _natural_key(string_): - return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())] - - -def _rescan_model_configs(): - global _MODEL_CONFIGS - - config_ext = (".json",) - config_files = [] - for config_path in _MODEL_CONFIG_PATHS: - if config_path.is_file() and config_path.suffix in config_ext: - config_files.append(config_path) - elif config_path.is_dir(): - for ext in config_ext: - config_files.extend(config_path.glob(f"*{ext}")) - - for cf in config_files: - with open(cf, "r") as f: - model_cfg = json.load(f) - if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")): - _MODEL_CONFIGS[cf.stem] = model_cfg - - _MODEL_CONFIGS = { - k: v - for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0])) - } - - -_rescan_model_configs() # initial populate of model config registry - - -def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True): - checkpoint = torch.load(checkpoint_path, map_location=map_location) - if isinstance(checkpoint, dict) and "state_dict" in checkpoint: - state_dict = checkpoint["state_dict"] - else: - state_dict = checkpoint - if skip_params: - if next(iter(state_dict.items()))[0].startswith("module"): - state_dict = {k[7:]: v for k, v in state_dict.items()} - # for k in state_dict: - # if k.startswith('transformer'): - # v = state_dict.pop(k) - # state_dict['text_branch.' + k[12:]] = v - return state_dict - - -def create_model( - amodel_name: str, - tmodel_name: str, - pretrained: str = "", - precision: str = "fp32", - device: torch.device = torch.device("cpu"), - jit: bool = False, - force_quick_gelu: bool = False, - openai_model_cache_dir: str = os.path.expanduser("~/.cache/clip"), - skip_params=True, - pretrained_audio: str = "", - pretrained_text: str = "", - enable_fusion: bool = False, - fusion_type: str = 'None' - # pretrained_image: bool = False, -): - amodel_name = amodel_name.replace( - "/", "-" - ) # for callers using old naming with / in ViT names - pretrained_orig = pretrained - pretrained = pretrained.lower() - if pretrained == "openai": - if amodel_name in _MODEL_CONFIGS: - logging.info(f"Loading {amodel_name} model config.") - model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name]) - else: - logging.error( - f"Model config for {amodel_name} not found; available models {list_models()}." - ) - raise RuntimeError(f"Model config for {amodel_name} not found.") - - logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.") - # Hard Code in model name - model_cfg["text_cfg"]["model_type"] = tmodel_name - model = load_openai_model( - "ViT-B-16", - model_cfg, - device=device, - jit=jit, - cache_dir=openai_model_cache_dir, - enable_fusion=enable_fusion, - fusion_type=fusion_type - ) - # See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372 - if precision == "amp" or precision == "fp32": - model = model.float() - else: - if amodel_name in _MODEL_CONFIGS: - logging.info(f"Loading {amodel_name} model config.") - model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name]) - else: - logging.error( - f"Model config for {amodel_name} not found; available models {list_models()}." - ) - raise RuntimeError(f"Model config for {amodel_name} not found.") - - if force_quick_gelu: - # override for use of QuickGELU on non-OpenAI transformer models - model_cfg["quick_gelu"] = True - - # if pretrained_image: - # if 'timm_amodel_name' in model_cfg.get('vision_cfg', {}): - # # pretrained weight loading for timm models set via vision_cfg - # model_cfg['vision_cfg']['timm_model_pretrained'] = True - # else: - # assert False, 'pretrained image towers currently only supported for timm models' - model_cfg["text_cfg"]["model_type"] = tmodel_name - model_cfg["enable_fusion"] = enable_fusion - model_cfg["fusion_type"] = fusion_type - model = CLAP(**model_cfg) - - if pretrained: - checkpoint_path = "" - url = get_pretrained_url(amodel_name, pretrained) - if url: - checkpoint_path = download_pretrained(url, root=openai_model_cache_dir) - elif os.path.exists(pretrained_orig): - checkpoint_path = pretrained_orig - if checkpoint_path: - logging.info(f"Loading pretrained {amodel_name}-{tmodel_name} weights ({pretrained}).") - ckpt = load_state_dict(checkpoint_path, skip_params=True) - model.load_state_dict(ckpt) - param_names = [n for n, p in model.named_parameters()] - for n in param_names: - print(n, "\t", "Loaded" if n in ckpt else "Unloaded") - else: - logging.warning( - f"Pretrained weights ({pretrained}) not found for model {amodel_name}." - ) - raise RuntimeError( - f"Pretrained weights ({pretrained}) not found for model {amodel_name}." - ) - - if pretrained_audio: - if amodel_name.startswith('PANN'): - if 'Cnn14_mAP' in pretrained_audio: # official checkpoint - audio_ckpt = torch.load(pretrained_audio, map_location='cpu') - audio_ckpt = audio_ckpt['model'] - keys = list(audio_ckpt.keys()) - for key in keys: - if 'spectrogram_extractor' not in key and 'logmel_extractor' not in key: - v = audio_ckpt.pop(key) - audio_ckpt['audio_branch.' + key] = v - elif os.path.basename(pretrained_audio).startswith('PANN'): # checkpoint trained via HTSAT codebase - audio_ckpt = torch.load(pretrained_audio, map_location='cpu') - audio_ckpt = audio_ckpt['state_dict'] - keys = list(audio_ckpt.keys()) - for key in keys: - if key.startswith('sed_model'): - v = audio_ckpt.pop(key) - audio_ckpt['audio_branch.' + key[10:]] = v - elif os.path.basename(pretrained_audio).startswith('finetuned'): # checkpoint trained via linear probe codebase - audio_ckpt = torch.load(pretrained_audio, map_location='cpu') - else: - raise ValueError('Unknown audio checkpoint') - elif amodel_name.startswith('HTSAT'): - if 'HTSAT_AudioSet_Saved' in pretrained_audio: # official checkpoint - audio_ckpt = torch.load(pretrained_audio, map_location='cpu') - audio_ckpt = audio_ckpt['state_dict'] - keys = list(audio_ckpt.keys()) - for key in keys: - if key.startswith('sed_model') and ('spectrogram_extractor' not in key - and 'logmel_extractor' not in key): - v = audio_ckpt.pop(key) - audio_ckpt['audio_branch.' + key[10:]] = v - elif os.path.basename(pretrained_audio).startswith('HTSAT'): # checkpoint trained via HTSAT codebase - audio_ckpt = torch.load(pretrained_audio, map_location='cpu') - audio_ckpt = audio_ckpt['state_dict'] - keys = list(audio_ckpt.keys()) - for key in keys: - if key.startswith('sed_model'): - v = audio_ckpt.pop(key) - audio_ckpt['audio_branch.' + key[10:]] = v - elif os.path.basename(pretrained_audio).startswith('finetuned'): # checkpoint trained via linear probe codebase - audio_ckpt = torch.load(pretrained_audio, map_location='cpu') - else: - raise ValueError('Unknown audio checkpoint') - else: - raise f'this audio encoder pretrained checkpoint is not support' - - model.load_state_dict(audio_ckpt, strict=False) - logging.info(f"Loading pretrained {amodel_name} weights ({pretrained_audio}).") - param_names = [n for n, p in model.named_parameters()] - for n in param_names: - print(n, "\t", "Loaded" if n in audio_ckpt else "Unloaded") - - model.to(device=device) - if precision == "fp16": - assert device.type != "cpu" - convert_weights_to_fp16(model) - - if jit: - model = torch.jit.script(model) - - return model, model_cfg - - -def create_model_and_transforms( - model_name: str, - pretrained: str = "", - precision: str = "fp32", - device: torch.device = torch.device("cpu"), - jit: bool = False, - force_quick_gelu: bool = False, - # pretrained_image: bool = False, -): - model = create_model( - model_name, - pretrained, - precision, - device, - jit, - force_quick_gelu=force_quick_gelu, - # pretrained_image=pretrained_image - ) - preprocess_train = image_transform(model.visual.image_size, is_train=True) - preprocess_val = image_transform(model.visual.image_size, is_train=False) - return model, preprocess_train, preprocess_val - - -def list_models(): - """enumerate available model architectures based on config files""" - return list(_MODEL_CONFIGS.keys()) - - -def add_model_config(path): - """add model config path or file and update registry""" - if not isinstance(path, Path): - path = Path(path) - _MODEL_CONFIG_PATHS.append(path) - _rescan_model_configs() diff --git a/spaces/AILab-CVC/SEED-LLaMA/models/seed_qformer/vit.py b/spaces/AILab-CVC/SEED-LLaMA/models/seed_qformer/vit.py deleted file mode 100644 index 3c2d04489f8bde88fd51acb34c62b570ab221f35..0000000000000000000000000000000000000000 --- a/spaces/AILab-CVC/SEED-LLaMA/models/seed_qformer/vit.py +++ /dev/null @@ -1,395 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause - - Based on timm code base - https://github.com/rwightman/pytorch-image-models/tree/master/timm -""" - -import math -import torch -import torch.nn as nn -import torch.nn.functional as F -from functools import partial - -from timm.models.vision_transformer import _cfg, PatchEmbed -from timm.models.registry import register_model -from timm.models.layers import trunc_normal_, DropPath -from timm.models.helpers import named_apply, adapt_input_conv - - -class Mlp(nn.Module): - """MLP as used in Vision Transformer, MLP-Mixer and related networks""" - def __init__( - self, - in_features, - hidden_features=None, - out_features=None, - act_layer=nn.GELU, - drop=0.0, - ): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class Attention(nn.Module): - def __init__( - self, - dim, - num_heads=8, - qkv_bias=False, - qk_scale=None, - attn_drop=0.0, - proj_drop=0.0, - ): - super().__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights - self.scale = qk_scale or head_dim**-0.5 - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - self.attn_gradients = None - self.attention_map = None - - def save_attn_gradients(self, attn_gradients): - self.attn_gradients = attn_gradients - - def get_attn_gradients(self): - return self.attn_gradients - - def save_attention_map(self, attention_map): - self.attention_map = attention_map - - def get_attention_map(self): - return self.attention_map - - def forward(self, x, register_hook=False): - B, N, C = x.shape - qkv = (self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)) - q, k, v = ( - qkv[0], - qkv[1], - qkv[2], - ) # make torchscript happy (cannot use tensor as tuple) - - attn = (q @ k.transpose(-2, -1)) * self.scale - attn = attn.softmax(dim=-1) - attn = self.attn_drop(attn) - - if register_hook: - self.save_attention_map(attn) - attn.register_hook(self.save_attn_gradients) - - x = (attn @ v).transpose(1, 2).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class Block(nn.Module): - def __init__( - self, - dim, - num_heads, - mlp_ratio=4.0, - qkv_bias=False, - qk_scale=None, - drop=0.0, - attn_drop=0.0, - drop_path=0.0, - act_layer=nn.GELU, - norm_layer=nn.LayerNorm, - use_grad_checkpointing=False, - ): - super().__init__() - self.norm1 = norm_layer(dim) - self.attn = Attention( - dim, - num_heads=num_heads, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=drop, - ) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp( - in_features=dim, - hidden_features=mlp_hidden_dim, - act_layer=act_layer, - drop=drop, - ) - - # if use_grad_checkpointing: - # self.attn = checkpoint_wrapper(self.attn) - # self.mlp = checkpoint_wrapper(self.mlp) - - def forward(self, x, register_hook=False): - x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook)) - x = x + self.drop_path(self.mlp(self.norm2(x))) - return x - - -class VisionTransformer(nn.Module): - """Vision Transformer - A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - - https://arxiv.org/abs/2010.11929 - """ - def __init__( - self, - img_size=224, - patch_size=16, - in_chans=3, - num_classes=1000, - embed_dim=768, - depth=12, - num_heads=12, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - representation_size=None, - drop_rate=0.0, - attn_drop_rate=0.0, - drop_path_rate=0.0, - norm_layer=None, - use_grad_checkpointing=False, - ckpt_layer=0, - ): - """ - Args: - img_size (int, tuple): input image size - patch_size (int, tuple): patch size - in_chans (int): number of input channels - num_classes (int): number of classes for classification head - embed_dim (int): embedding dimension - depth (int): depth of transformer - num_heads (int): number of attention heads - mlp_ratio (int): ratio of mlp hidden dim to embedding dim - qkv_bias (bool): enable bias for qkv if True - qk_scale (float): override default qk scale of head_dim ** -0.5 if set - representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set - drop_rate (float): dropout rate - attn_drop_rate (float): attention dropout rate - drop_path_rate (float): stochastic depth rate - norm_layer: (nn.Module): normalization layer - """ - super().__init__() - self.num_features = (self.embed_dim) = embed_dim # num_features for consistency with other models - norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) - - self.patch_embed = PatchEmbed( - img_size=img_size, - patch_size=patch_size, - in_chans=in_chans, - embed_dim=embed_dim, - ) - - num_patches = self.patch_embed.num_patches - - self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) - self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) - self.pos_drop = nn.Dropout(p=drop_rate) - - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule - self.blocks = nn.ModuleList([ - Block( - dim=embed_dim, - num_heads=num_heads, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop_rate, - attn_drop=attn_drop_rate, - drop_path=dpr[i], - norm_layer=norm_layer, - use_grad_checkpointing=(use_grad_checkpointing and i >= depth - ckpt_layer), - ) for i in range(depth) - ]) - self.norm = norm_layer(embed_dim) - - trunc_normal_(self.pos_embed, std=0.02) - trunc_normal_(self.cls_token, std=0.02) - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=0.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - @torch.jit.ignore - def no_weight_decay(self): - return {"pos_embed", "cls_token"} - - def forward(self, x, register_blk=-1): - B = x.shape[0] - x = self.patch_embed(x) - - cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks - x = torch.cat((cls_tokens, x), dim=1) - - x = x + self.pos_embed[:, :x.size(1), :] - x = self.pos_drop(x) - - for i, blk in enumerate(self.blocks): - x = blk(x, register_blk == i) - x = self.norm(x) - - return x - - @torch.jit.ignore() - def load_pretrained(self, checkpoint_path, prefix=""): - _load_weights(self, checkpoint_path, prefix) - - -@torch.no_grad() -def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ""): - """Load weights from .npz checkpoints for official Google Brain Flax implementation""" - import numpy as np - - def _n2p(w, t=True): - if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: - w = w.flatten() - if t: - if w.ndim == 4: - w = w.transpose([3, 2, 0, 1]) - elif w.ndim == 3: - w = w.transpose([2, 0, 1]) - elif w.ndim == 2: - w = w.transpose([1, 0]) - return torch.from_numpy(w) - - w = np.load(checkpoint_path) - if not prefix and "opt/target/embedding/kernel" in w: - prefix = "opt/target/" - - if hasattr(model.patch_embed, "backbone"): - # hybrid - backbone = model.patch_embed.backbone - stem_only = not hasattr(backbone, "stem") - stem = backbone if stem_only else backbone.stem - stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f"{prefix}conv_root/kernel"]))) - stem.norm.weight.copy_(_n2p(w[f"{prefix}gn_root/scale"])) - stem.norm.bias.copy_(_n2p(w[f"{prefix}gn_root/bias"])) - if not stem_only: - for i, stage in enumerate(backbone.stages): - for j, block in enumerate(stage.blocks): - bp = f"{prefix}block{i + 1}/unit{j + 1}/" - for r in range(3): - getattr(block, f"conv{r + 1}").weight.copy_(_n2p(w[f"{bp}conv{r + 1}/kernel"])) - getattr(block, f"norm{r + 1}").weight.copy_(_n2p(w[f"{bp}gn{r + 1}/scale"])) - getattr(block, f"norm{r + 1}").bias.copy_(_n2p(w[f"{bp}gn{r + 1}/bias"])) - if block.downsample is not None: - block.downsample.conv.weight.copy_(_n2p(w[f"{bp}conv_proj/kernel"])) - block.downsample.norm.weight.copy_(_n2p(w[f"{bp}gn_proj/scale"])) - block.downsample.norm.bias.copy_(_n2p(w[f"{bp}gn_proj/bias"])) - embed_conv_w = _n2p(w[f"{prefix}embedding/kernel"]) - else: - embed_conv_w = adapt_input_conv(model.patch_embed.proj.weight.shape[1], _n2p(w[f"{prefix}embedding/kernel"])) - model.patch_embed.proj.weight.copy_(embed_conv_w) - model.patch_embed.proj.bias.copy_(_n2p(w[f"{prefix}embedding/bias"])) - model.cls_token.copy_(_n2p(w[f"{prefix}cls"], t=False)) - pos_embed_w = _n2p(w[f"{prefix}Transformer/posembed_input/pos_embedding"], t=False) - if pos_embed_w.shape != model.pos_embed.shape: - pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights - pos_embed_w, - model.pos_embed, - getattr(model, "num_tokens", 1), - model.patch_embed.grid_size, - ) - model.pos_embed.copy_(pos_embed_w) - model.norm.weight.copy_(_n2p(w[f"{prefix}Transformer/encoder_norm/scale"])) - model.norm.bias.copy_(_n2p(w[f"{prefix}Transformer/encoder_norm/bias"])) - # if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: - # model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) - # model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) - # if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: - # model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) - # model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) - for i, block in enumerate(model.blocks.children()): - block_prefix = f"{prefix}Transformer/encoderblock_{i}/" - mha_prefix = block_prefix + "MultiHeadDotProductAttention_1/" - block.norm1.weight.copy_(_n2p(w[f"{block_prefix}LayerNorm_0/scale"])) - block.norm1.bias.copy_(_n2p(w[f"{block_prefix}LayerNorm_0/bias"])) - block.attn.qkv.weight.copy_( - torch.cat([_n2p(w[f"{mha_prefix}{n}/kernel"], t=False).flatten(1).T for n in ("query", "key", "value")])) - block.attn.qkv.bias.copy_( - torch.cat([_n2p(w[f"{mha_prefix}{n}/bias"], t=False).reshape(-1) for n in ("query", "key", "value")])) - block.attn.proj.weight.copy_(_n2p(w[f"{mha_prefix}out/kernel"]).flatten(1)) - block.attn.proj.bias.copy_(_n2p(w[f"{mha_prefix}out/bias"])) - for r in range(2): - getattr(block.mlp, f"fc{r + 1}").weight.copy_(_n2p(w[f"{block_prefix}MlpBlock_3/Dense_{r}/kernel"])) - getattr(block.mlp, f"fc{r + 1}").bias.copy_(_n2p(w[f"{block_prefix}MlpBlock_3/Dense_{r}/bias"])) - block.norm2.weight.copy_(_n2p(w[f"{block_prefix}LayerNorm_2/scale"])) - block.norm2.bias.copy_(_n2p(w[f"{block_prefix}LayerNorm_2/bias"])) - - -def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()): - # Rescale the grid of position embeddings when loading from state_dict. Adapted from - # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 - print("Resized position embedding: %s to %s", posemb.shape, posemb_new.shape) - ntok_new = posemb_new.shape[1] - if num_tokens: - posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:] - ntok_new -= num_tokens - else: - posemb_tok, posemb_grid = posemb[:, :0], posemb[0] - gs_old = int(math.sqrt(len(posemb_grid))) - if not len(gs_new): # backwards compatibility - gs_new = [int(math.sqrt(ntok_new))] * 2 - assert len(gs_new) >= 2 - print("Position embedding grid-size from %s to %s", [gs_old, gs_old], gs_new) - posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) - posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode="bicubic", align_corners=False) - posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) - posemb = torch.cat([posemb_tok, posemb_grid], dim=1) - return - - -def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder): - # interpolate position embedding - embedding_size = pos_embed_checkpoint.shape[-1] - num_patches = visual_encoder.patch_embed.num_patches - num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches - # height (== width) for the checkpoint position embedding - orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens)**0.5) - # height (== width) for the new position embedding - new_size = int(num_patches**0.5) - - if orig_size != new_size: - # class_token and dist_token are kept unchanged - extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] - # only the position tokens are interpolated - pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] - pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) - pos_tokens = torch.nn.functional.interpolate(pos_tokens, size=(new_size, new_size), mode="bicubic", align_corners=False) - pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) - new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) - print("reshape position embedding from %d to %d" % (orig_size**2, new_size**2)) - - return new_pos_embed - else: - return pos_embed_checkpoint diff --git a/spaces/Abhaykoul/Merriam-webster_clone/app.py b/spaces/Abhaykoul/Merriam-webster_clone/app.py deleted file mode 100644 index cf43e1de700e799357e3c8ef1a7c5e1e0c5b6def..0000000000000000000000000000000000000000 --- a/spaces/Abhaykoul/Merriam-webster_clone/app.py +++ /dev/null @@ -1,39 +0,0 @@ -import streamlit as st -import requests -from bs4 import BeautifulSoup - -st.set_page_config(page_title="Word Definition App") - -def fetch_definition(word): - base_url = f"https://www.merriam-webster.com/dictionary/{word}" - - response = requests.get(base_url) - - if response.status_code == 200: - soup = BeautifulSoup(response.text, 'html.parser') - definition_span = soup.find("span", class_="dtText") - - if definition_span: - full_definition = definition_span.get_text() - sentences = full_definition.split('. ') - limited_definition = '. '.join(sentences[:3]) - return limited_definition - else: - return "Definition not found." - else: - return "Word not found or unable to retrieve data." - -def main(): - st.title("Word Definition App") - word = st.text_input("Enter a word:") - - if word.lower() == 'quit': - st.warning("You entered 'quit'. The app will not quit as this is a web application.") - else: - if st.button("Get Definition"): - definition = fetch_definition(word) - st.write("Definition:") - st.write(definition) - -if __name__ == '__main__': - main() diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/ChatgptDemo.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/ChatgptDemo.py deleted file mode 100644 index 95cb9ecf2e27f66e10e9b232ae03fe2f3a195d91..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/ChatgptDemo.py +++ /dev/null @@ -1,61 +0,0 @@ -from __future__ import annotations - -import time, json, re -from aiohttp import ClientSession -from typing import AsyncGenerator - -from .base_provider import AsyncGeneratorProvider -from .helper import format_prompt - -class ChatgptDemo(AsyncGeneratorProvider): - url = "https://chat.chatgptdemo.net" - supports_gpt_35_turbo = True - working = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> AsyncGenerator: - headers = { - "authority": "chat.chatgptdemo.net", - "accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US", - "origin": "https://chat.chatgptdemo.net", - "referer": "https://chat.chatgptdemo.net/", - "sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36" - } - async with ClientSession(headers=headers) as session: - async with session.get(f"{cls.url}/", proxy=proxy) as response: - response.raise_for_status() - response = await response.text() - result = re.search(r'',trigger:"hover focus",title:"",delay:0,html:!1,selector:!1,placement:"top",offset:0,container:!1,fallbackPlacement:"flip",boundary:"scrollParent",customClass:"",sanitize:!0,sanitizeFn:null,whiteList:{"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},popperConfig:null},We={animation:"boolean",template:"string",title:"(string|element|function)",trigger:"string",delay:"(number|object)",html:"boolean",selector:"(string|boolean)",placement:"(string|function)",offset:"(number|string|function)",container:"(string|element|boolean)",fallbackPlacement:"(string|array)",boundary:"(string|element)",customClass:"(string|function)",sanitize:"boolean",sanitizeFn:"(null|function)",whiteList:"object",popperConfig:"(null|object)"},Ue={HIDE:"hide.bs.tooltip",HIDDEN:"hidden.bs.tooltip",SHOW:"show.bs.tooltip",SHOWN:"shown.bs.tooltip",INSERTED:"inserted.bs.tooltip",CLICK:"click.bs.tooltip",FOCUSIN:"focusin.bs.tooltip",FOCUSOUT:"focusout.bs.tooltip",MOUSEENTER:"mouseenter.bs.tooltip",MOUSELEAVE:"mouseleave.bs.tooltip"},Ve=function(){function t(t,e){if("undefined"==typeof Yt)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this.element=t,this.config=this._getConfig(e),this.tip=null,this._setListeners()}var e=t.prototype;return e.enable=function(){this._isEnabled=!0},e.disable=function(){this._isEnabled=!1},e.toggleEnabled=function(){this._isEnabled=!this._isEnabled},e.toggle=function(t){if(this._isEnabled)if(t){var e=this.constructor.DATA_KEY,n=i.default(t.currentTarget).data(e);n||(n=new this.constructor(t.currentTarget,this._getDelegateConfig()),i.default(t.currentTarget).data(e,n)),n._activeTrigger.click=!n._activeTrigger.click,n._isWithActiveTrigger()?n._enter(null,n):n._leave(null,n)}else{if(i.default(this.getTipElement()).hasClass(Fe))return void this._leave(null,this);this._enter(null,this)}},e.dispose=function(){clearTimeout(this._timeout),i.default.removeData(this.element,this.constructor.DATA_KEY),i.default(this.element).off(this.constructor.EVENT_KEY),i.default(this.element).closest(".modal").off("hide.bs.modal",this._hideModalHandler),this.tip&&i.default(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,this._activeTrigger=null,this._popper&&this._popper.destroy(),this._popper=null,this.element=null,this.config=null,this.tip=null},e.show=function(){var t=this;if("none"===i.default(this.element).css("display"))throw new Error("Please use show on visible elements");var e=i.default.Event(this.constructor.Event.SHOW);if(this.isWithContent()&&this._isEnabled){i.default(this.element).trigger(e);var n=u.findShadowRoot(this.element),o=i.default.contains(null!==n?n:this.element.ownerDocument.documentElement,this.element);if(e.isDefaultPrevented()||!o)return;var r=this.getTipElement(),a=u.getUID(this.constructor.NAME);r.setAttribute("id",a),this.element.setAttribute("aria-describedby",a),this.setContent(),this.config.animation&&i.default(r).addClass(Pe);var s="function"==typeof this.config.placement?this.config.placement.call(this,r,this.element):this.config.placement,l=this._getAttachment(s);this.addAttachmentClass(l);var f=this._getContainer();i.default(r).data(this.constructor.DATA_KEY,this),i.default.contains(this.element.ownerDocument.documentElement,this.tip)||i.default(r).appendTo(f),i.default(this.element).trigger(this.constructor.Event.INSERTED),this._popper=new Yt(this.element,r,this._getPopperConfig(l)),i.default(r).addClass(Fe),i.default(r).addClass(this.config.customClass),"ontouchstart"in document.documentElement&&i.default(document.body).children().on("mouseover",null,i.default.noop);var d=function(){t.config.animation&&t._fixTransition();var e=t._hoverState;t._hoverState=null,i.default(t.element).trigger(t.constructor.Event.SHOWN),e===Be&&t._leave(null,t)};if(i.default(this.tip).hasClass(Pe)){var c=u.getTransitionDurationFromElement(this.tip);i.default(this.tip).one(u.TRANSITION_END,d).emulateTransitionEnd(c)}else d()}},e.hide=function(t){var e=this,n=this.getTipElement(),o=i.default.Event(this.constructor.Event.HIDE),r=function(){e._hoverState!==Re&&n.parentNode&&n.parentNode.removeChild(n),e._cleanTipClass(),e.element.removeAttribute("aria-describedby"),i.default(e.element).trigger(e.constructor.Event.HIDDEN),null!==e._popper&&e._popper.destroy(),t&&t()};if(i.default(this.element).trigger(o),!o.isDefaultPrevented()){if(i.default(n).removeClass(Fe),"ontouchstart"in document.documentElement&&i.default(document.body).children().off("mouseover",null,i.default.noop),this._activeTrigger.click=!1,this._activeTrigger.focus=!1,this._activeTrigger.hover=!1,i.default(this.tip).hasClass(Pe)){var a=u.getTransitionDurationFromElement(n);i.default(n).one(u.TRANSITION_END,r).emulateTransitionEnd(a)}else r();this._hoverState=""}},e.update=function(){null!==this._popper&&this._popper.scheduleUpdate()},e.isWithContent=function(){return Boolean(this.getTitle())},e.addAttachmentClass=function(t){i.default(this.getTipElement()).addClass("bs-tooltip-"+t)},e.getTipElement=function(){return this.tip=this.tip||i.default(this.config.template)[0],this.tip},e.setContent=function(){var t=this.getTipElement();this.setElementContent(i.default(t.querySelectorAll(".tooltip-inner")),this.getTitle()),i.default(t).removeClass("fade show")},e.setElementContent=function(t,e){"object"!=typeof e||!e.nodeType&&!e.jquery?this.config.html?(this.config.sanitize&&(e=ke(e,this.config.whiteList,this.config.sanitizeFn)),t.html(e)):t.text(e):this.config.html?i.default(e).parent().is(t)||t.empty().append(e):t.text(i.default(e).text())},e.getTitle=function(){var t=this.element.getAttribute("data-original-title");return t||(t="function"==typeof this.config.title?this.config.title.call(this.element):this.config.title),t},e._getPopperConfig=function(t){var e=this;return a({},{placement:t,modifiers:{offset:this._getOffset(),flip:{behavior:this.config.fallbackPlacement},arrow:{element:".arrow"},preventOverflow:{boundariesElement:this.config.boundary}},onCreate:function(t){t.originalPlacement!==t.placement&&e._handlePopperPlacementChange(t)},onUpdate:function(t){return e._handlePopperPlacementChange(t)}},this.config.popperConfig)},e._getOffset=function(){var t=this,e={};return"function"==typeof this.config.offset?e.fn=function(e){return e.offsets=a({},e.offsets,t.config.offset(e.offsets,t.element)),e}:e.offset=this.config.offset,e},e._getContainer=function(){return!1===this.config.container?document.body:u.isElement(this.config.container)?i.default(this.config.container):i.default(document).find(this.config.container)},e._getAttachment=function(t){return qe[t.toUpperCase()]},e._setListeners=function(){var t=this;this.config.trigger.split(" ").forEach((function(e){if("click"===e)i.default(t.element).on(t.constructor.Event.CLICK,t.config.selector,(function(e){return t.toggle(e)}));else if("manual"!==e){var n=e===He?t.constructor.Event.MOUSEENTER:t.constructor.Event.FOCUSIN,o=e===He?t.constructor.Event.MOUSELEAVE:t.constructor.Event.FOCUSOUT;i.default(t.element).on(n,t.config.selector,(function(e){return t._enter(e)})).on(o,t.config.selector,(function(e){return t._leave(e)}))}})),this._hideModalHandler=function(){t.element&&t.hide()},i.default(this.element).closest(".modal").on("hide.bs.modal",this._hideModalHandler),this.config.selector?this.config=a({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},e._fixTitle=function(){var t=typeof this.element.getAttribute("data-original-title");(this.element.getAttribute("title")||"string"!==t)&&(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},e._enter=function(t,e){var n=this.constructor.DATA_KEY;(e=e||i.default(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),i.default(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusin"===t.type?Me:He]=!0),i.default(e.getTipElement()).hasClass(Fe)||e._hoverState===Re?e._hoverState=Re:(clearTimeout(e._timeout),e._hoverState=Re,e.config.delay&&e.config.delay.show?e._timeout=setTimeout((function(){e._hoverState===Re&&e.show()}),e.config.delay.show):e.show())},e._leave=function(t,e){var n=this.constructor.DATA_KEY;(e=e||i.default(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),i.default(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusout"===t.type?Me:He]=!1),e._isWithActiveTrigger()||(clearTimeout(e._timeout),e._hoverState=Be,e.config.delay&&e.config.delay.hide?e._timeout=setTimeout((function(){e._hoverState===Be&&e.hide()}),e.config.delay.hide):e.hide())},e._isWithActiveTrigger=function(){for(var t in this._activeTrigger)if(this._activeTrigger[t])return!0;return!1},e._getConfig=function(t){var e=i.default(this.element).data();return Object.keys(e).forEach((function(t){-1!==Le.indexOf(t)&&delete e[t]})),"number"==typeof(t=a({},this.constructor.Default,e,"object"==typeof t&&t?t:{})).delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),u.typeCheckConfig(Ie,t,this.constructor.DefaultType),t.sanitize&&(t.template=ke(t.template,t.whiteList,t.sanitizeFn)),t},e._getDelegateConfig=function(){var t={};if(this.config)for(var e in this.config)this.constructor.Default[e]!==this.config[e]&&(t[e]=this.config[e]);return t},e._cleanTipClass=function(){var t=i.default(this.getTipElement()),e=t.attr("class").match(je);null!==e&&e.length&&t.removeClass(e.join(""))},e._handlePopperPlacementChange=function(t){this.tip=t.instance.popper,this._cleanTipClass(),this.addAttachmentClass(this._getAttachment(t.placement))},e._fixTransition=function(){var t=this.getTipElement(),e=this.config.animation;null===t.getAttribute("x-placement")&&(i.default(t).removeClass(Pe),this.config.animation=!1,this.hide(),this.show(),this.config.animation=e)},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this),o=n.data(Oe),r="object"==typeof e&&e;if((o||!/dispose|hide/.test(e))&&(o||(o=new t(this,r),n.data(Oe,o)),"string"==typeof e)){if("undefined"==typeof o[e])throw new TypeError('No method named "'+e+'"');o[e]()}}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"Default",get:function(){return Qe}},{key:"NAME",get:function(){return Ie}},{key:"DATA_KEY",get:function(){return Oe}},{key:"Event",get:function(){return Ue}},{key:"EVENT_KEY",get:function(){return".bs.tooltip"}},{key:"DefaultType",get:function(){return We}}]),t}();i.default.fn.tooltip=Ve._jQueryInterface,i.default.fn.tooltip.Constructor=Ve,i.default.fn.tooltip.noConflict=function(){return i.default.fn.tooltip=xe,Ve._jQueryInterface};var Ye="bs.popover",ze=i.default.fn.popover,Ke=new RegExp("(^|\\s)bs-popover\\S+","g"),Xe=a({},Ve.Default,{placement:"right",trigger:"click",content:"",template:''}),Ge=a({},Ve.DefaultType,{content:"(string|element|function)"}),$e={HIDE:"hide.bs.popover",HIDDEN:"hidden.bs.popover",SHOW:"show.bs.popover",SHOWN:"shown.bs.popover",INSERTED:"inserted.bs.popover",CLICK:"click.bs.popover",FOCUSIN:"focusin.bs.popover",FOCUSOUT:"focusout.bs.popover",MOUSEENTER:"mouseenter.bs.popover",MOUSELEAVE:"mouseleave.bs.popover"},Je=function(t){var e,n;function o(){return t.apply(this,arguments)||this}n=t,(e=o).prototype=Object.create(n.prototype),e.prototype.constructor=e,s(e,n);var a=o.prototype;return a.isWithContent=function(){return this.getTitle()||this._getContent()},a.addAttachmentClass=function(t){i.default(this.getTipElement()).addClass("bs-popover-"+t)},a.getTipElement=function(){return this.tip=this.tip||i.default(this.config.template)[0],this.tip},a.setContent=function(){var t=i.default(this.getTipElement());this.setElementContent(t.find(".popover-header"),this.getTitle());var e=this._getContent();"function"==typeof e&&(e=e.call(this.element)),this.setElementContent(t.find(".popover-body"),e),t.removeClass("fade show")},a._getContent=function(){return this.element.getAttribute("data-content")||this.config.content},a._cleanTipClass=function(){var t=i.default(this.getTipElement()),e=t.attr("class").match(Ke);null!==e&&e.length>0&&t.removeClass(e.join(""))},o._jQueryInterface=function(t){return this.each((function(){var e=i.default(this).data(Ye),n="object"==typeof t?t:null;if((e||!/dispose|hide/.test(t))&&(e||(e=new o(this,n),i.default(this).data(Ye,e)),"string"==typeof t)){if("undefined"==typeof e[t])throw new TypeError('No method named "'+t+'"');e[t]()}}))},r(o,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"Default",get:function(){return Xe}},{key:"NAME",get:function(){return"popover"}},{key:"DATA_KEY",get:function(){return Ye}},{key:"Event",get:function(){return $e}},{key:"EVENT_KEY",get:function(){return".bs.popover"}},{key:"DefaultType",get:function(){return Ge}}]),o}(Ve);i.default.fn.popover=Je._jQueryInterface,i.default.fn.popover.Constructor=Je,i.default.fn.popover.noConflict=function(){return i.default.fn.popover=ze,Je._jQueryInterface};var Ze="scrollspy",tn="bs.scrollspy",en=i.default.fn[Ze],nn="active",on="position",rn=".nav, .list-group",an={offset:10,method:"auto",target:""},sn={offset:"number",method:"string",target:"(string|element)"},ln=function(){function t(t,e){var n=this;this._element=t,this._scrollElement="BODY"===t.tagName?window:t,this._config=this._getConfig(e),this._selector=this._config.target+" .nav-link,"+this._config.target+" .list-group-item,"+this._config.target+" .dropdown-item",this._offsets=[],this._targets=[],this._activeTarget=null,this._scrollHeight=0,i.default(this._scrollElement).on("scroll.bs.scrollspy",(function(t){return n._process(t)})),this.refresh(),this._process()}var e=t.prototype;return e.refresh=function(){var t=this,e=this._scrollElement===this._scrollElement.window?"offset":on,n="auto"===this._config.method?e:this._config.method,o=n===on?this._getScrollTop():0;this._offsets=[],this._targets=[],this._scrollHeight=this._getScrollHeight(),[].slice.call(document.querySelectorAll(this._selector)).map((function(t){var e,r=u.getSelectorFromElement(t);if(r&&(e=document.querySelector(r)),e){var a=e.getBoundingClientRect();if(a.width||a.height)return[i.default(e)[n]().top+o,r]}return null})).filter(Boolean).sort((function(t,e){return t[0]-e[0]})).forEach((function(e){t._offsets.push(e[0]),t._targets.push(e[1])}))},e.dispose=function(){i.default.removeData(this._element,tn),i.default(this._scrollElement).off(".bs.scrollspy"),this._element=null,this._scrollElement=null,this._config=null,this._selector=null,this._offsets=null,this._targets=null,this._activeTarget=null,this._scrollHeight=null},e._getConfig=function(t){if("string"!=typeof(t=a({},an,"object"==typeof t&&t?t:{})).target&&u.isElement(t.target)){var e=i.default(t.target).attr("id");e||(e=u.getUID(Ze),i.default(t.target).attr("id",e)),t.target="#"+e}return u.typeCheckConfig(Ze,t,sn),t},e._getScrollTop=function(){return this._scrollElement===window?this._scrollElement.pageYOffset:this._scrollElement.scrollTop},e._getScrollHeight=function(){return this._scrollElement.scrollHeight||Math.max(document.body.scrollHeight,document.documentElement.scrollHeight)},e._getOffsetHeight=function(){return this._scrollElement===window?window.innerHeight:this._scrollElement.getBoundingClientRect().height},e._process=function(){var t=this._getScrollTop()+this._config.offset,e=this._getScrollHeight(),n=this._config.offset+e-this._getOffsetHeight();if(this._scrollHeight!==e&&this.refresh(),t>=n){var i=this._targets[this._targets.length-1];this._activeTarget!==i&&this._activate(i)}else{if(this._activeTarget&&t0)return this._activeTarget=null,void this._clear();for(var o=this._offsets.length;o--;)this._activeTarget!==this._targets[o]&&t>=this._offsets[o]&&("undefined"==typeof this._offsets[o+1]||t li > .active",gn=function(){function t(t){this._element=t}var e=t.prototype;return e.show=function(){var t=this;if(!(this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE&&i.default(this._element).hasClass(dn)||i.default(this._element).hasClass("disabled")||this._element.hasAttribute("disabled"))){var e,n,o=i.default(this._element).closest(".nav, .list-group")[0],r=u.getSelectorFromElement(this._element);if(o){var a="UL"===o.nodeName||"OL"===o.nodeName?mn:pn;n=(n=i.default.makeArray(i.default(o).find(a)))[n.length-1]}var s=i.default.Event("hide.bs.tab",{relatedTarget:this._element}),l=i.default.Event("show.bs.tab",{relatedTarget:n});if(n&&i.default(n).trigger(s),i.default(this._element).trigger(l),!l.isDefaultPrevented()&&!s.isDefaultPrevented()){r&&(e=document.querySelector(r)),this._activate(this._element,o);var f=function(){var e=i.default.Event("hidden.bs.tab",{relatedTarget:t._element}),o=i.default.Event("shown.bs.tab",{relatedTarget:n});i.default(n).trigger(e),i.default(t._element).trigger(o)};e?this._activate(e,e.parentNode,f):f()}}},e.dispose=function(){i.default.removeData(this._element,un),this._element=null},e._activate=function(t,e,n){var o=this,r=(!e||"UL"!==e.nodeName&&"OL"!==e.nodeName?i.default(e).children(pn):i.default(e).find(mn))[0],a=n&&r&&i.default(r).hasClass(cn),s=function(){return o._transitionComplete(t,r,n)};if(r&&a){var l=u.getTransitionDurationFromElement(r);i.default(r).removeClass(hn).one(u.TRANSITION_END,s).emulateTransitionEnd(l)}else s()},e._transitionComplete=function(t,e,n){if(e){i.default(e).removeClass(dn);var o=i.default(e.parentNode).find("> .dropdown-menu .active")[0];o&&i.default(o).removeClass(dn),"tab"===e.getAttribute("role")&&e.setAttribute("aria-selected",!1)}i.default(t).addClass(dn),"tab"===t.getAttribute("role")&&t.setAttribute("aria-selected",!0),u.reflow(t),t.classList.contains(cn)&&t.classList.add(hn);var r=t.parentNode;if(r&&"LI"===r.nodeName&&(r=r.parentNode),r&&i.default(r).hasClass("dropdown-menu")){var a=i.default(t).closest(".dropdown")[0];if(a){var s=[].slice.call(a.querySelectorAll(".dropdown-toggle"));i.default(s).addClass(dn)}t.setAttribute("aria-expanded",!0)}n&&n()},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this),o=n.data(un);if(o||(o=new t(this),n.data(un,o)),"string"==typeof e){if("undefined"==typeof o[e])throw new TypeError('No method named "'+e+'"');o[e]()}}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.2"}}]),t}();i.default(document).on("click.bs.tab.data-api",'[data-toggle="tab"], [data-toggle="pill"], [data-toggle="list"]',(function(t){t.preventDefault(),gn._jQueryInterface.call(i.default(this),"show")})),i.default.fn.tab=gn._jQueryInterface,i.default.fn.tab.Constructor=gn,i.default.fn.tab.noConflict=function(){return i.default.fn.tab=fn,gn._jQueryInterface};var _n="bs.toast",vn=i.default.fn.toast,bn="hide",yn="show",En="showing",wn="click.dismiss.bs.toast",Tn={animation:!0,autohide:!0,delay:500},Cn={animation:"boolean",autohide:"boolean",delay:"number"},Sn=function(){function t(t,e){this._element=t,this._config=this._getConfig(e),this._timeout=null,this._setListeners()}var e=t.prototype;return e.show=function(){var t=this,e=i.default.Event("show.bs.toast");if(i.default(this._element).trigger(e),!e.isDefaultPrevented()){this._clearTimeout(),this._config.animation&&this._element.classList.add("fade");var n=function(){t._element.classList.remove(En),t._element.classList.add(yn),i.default(t._element).trigger("shown.bs.toast"),t._config.autohide&&(t._timeout=setTimeout((function(){t.hide()}),t._config.delay))};if(this._element.classList.remove(bn),u.reflow(this._element),this._element.classList.add(En),this._config.animation){var o=u.getTransitionDurationFromElement(this._element);i.default(this._element).one(u.TRANSITION_END,n).emulateTransitionEnd(o)}else n()}},e.hide=function(){if(this._element.classList.contains(yn)){var t=i.default.Event("hide.bs.toast");i.default(this._element).trigger(t),t.isDefaultPrevented()||this._close()}},e.dispose=function(){this._clearTimeout(),this._element.classList.contains(yn)&&this._element.classList.remove(yn),i.default(this._element).off(wn),i.default.removeData(this._element,_n),this._element=null,this._config=null},e._getConfig=function(t){return t=a({},Tn,i.default(this._element).data(),"object"==typeof t&&t?t:{}),u.typeCheckConfig("toast",t,this.constructor.DefaultType),t},e._setListeners=function(){var t=this;i.default(this._element).on(wn,'[data-dismiss="toast"]',(function(){return t.hide()}))},e._close=function(){var t=this,e=function(){t._element.classList.add(bn),i.default(t._element).trigger("hidden.bs.toast")};if(this._element.classList.remove(yn),this._config.animation){var n=u.getTransitionDurationFromElement(this._element);i.default(this._element).one(u.TRANSITION_END,e).emulateTransitionEnd(n)}else e()},e._clearTimeout=function(){clearTimeout(this._timeout),this._timeout=null},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this),o=n.data(_n);if(o||(o=new t(this,"object"==typeof e&&e),n.data(_n,o)),"string"==typeof e){if("undefined"==typeof o[e])throw new TypeError('No method named "'+e+'"');o[e](this)}}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"DefaultType",get:function(){return Cn}},{key:"Default",get:function(){return Tn}}]),t}();i.default.fn.toast=Sn._jQueryInterface,i.default.fn.toast.Constructor=Sn,i.default.fn.toast.noConflict=function(){return i.default.fn.toast=vn,Sn._jQueryInterface},t.Alert=c,t.Button=b,t.Carousel=O,t.Collapse=W,t.Dropdown=le,t.Modal=Se,t.Popover=Je,t.Scrollspy=ln,t.Tab=gn,t.Toast=Sn,t.Tooltip=Ve,t.Util=u,Object.defineProperty(t,"__esModule",{value:!0})})); -//# sourceMappingURL=bootstrap.bundle.min.js.map \ No newline at end of file diff --git a/spaces/hf4all/web-ui/_next/static/chunks/539bbe41.64229b30659f90aa.js b/spaces/hf4all/web-ui/_next/static/chunks/539bbe41.64229b30659f90aa.js deleted file mode 100644 index 5874c95b74caeb368f908d327f9975426dbbaae5..0000000000000000000000000000000000000000 --- a/spaces/hf4all/web-ui/_next/static/chunks/539bbe41.64229b30659f90aa.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[173],{1033:function(t,a,c){c.d(a,{$NG:function(){return h},y9X:function(){return r}});var n=c(83270);function r(t){return(0,n.w_)({tag:"svg",attr:{viewBox:"0 0 1024 1024"},child:[{tag:"path",attr:{d:"M811.4 418.7C765.6 297.9 648.9 212 512.2 212S258.8 297.8 213 418.6C127.3 441.1 64 519.1 64 612c0 110.5 89.5 200 199.9 200h496.2C870.5 812 960 722.5 960 612c0-92.7-63.1-170.7-148.6-193.3z"}}]})(t)}function h(t){return(0,n.w_)({tag:"svg",attr:{viewBox:"0 0 1024 1024"},child:[{tag:"path",attr:{d:"M854.6 288.7c6 6 9.4 14.1 9.4 22.6V928c0 17.7-14.3 32-32 32H192c-17.7 0-32-14.3-32-32V96c0-17.7 14.3-32 32-32h424.7c8.5 0 16.7 3.4 22.7 9.4l215.2 215.3zM790.2 326L602 137.8V326h188.2zM426.13 600.93l59.11 132.97a16 16 0 0 0 14.62 9.5h24.06a16 16 0 0 0 14.63-9.51l59.1-133.35V758a16 16 0 0 0 16.01 16H641a16 16 0 0 0 16-16V486a16 16 0 0 0-16-16h-34.75a16 16 0 0 0-14.67 9.62L512.1 662.2l-79.48-182.59a16 16 0 0 0-14.67-9.61H383a16 16 0 0 0-16 16v272a16 16 0 0 0 16 16h27.13a16 16 0 0 0 16-16V600.93z"}}]})(t)}}}]); \ No newline at end of file diff --git a/spaces/hjianganthony/fetch_ner/src/utils.py b/spaces/hjianganthony/fetch_ner/src/utils.py deleted file mode 100644 index 10acc1de87e55a5d01c744e5ba64b1791451a419..0000000000000000000000000000000000000000 --- a/spaces/hjianganthony/fetch_ner/src/utils.py +++ /dev/null @@ -1,319 +0,0 @@ -# helper functions -from typing import List, Dict, Tuple -import re -from sklearn.feature_extraction.text import CountVectorizer -from sklearn.metrics.pairwise import cosine_similarity -import pandas as pd -import numpy as np -import pickle, json -# from IPython.display import clear_output - -import spacy -from spacy.tokens import DocBin -from spacy.training import offsets_to_biluo_tags -import en_fetch_ner_spacy_tsf -nlp = en_fetch_ner_spacy_tsf.load() -# clear_output() - -import nltk -nltk.download('stopwords') - -from nltk.corpus import stopwords -stop_words = set(stopwords.words('english')) -additional_stop_words = {'pack'} -stop_words.update(additional_stop_words) -# clear_output() - - -# load operation data -path1 = "data/brand_belong_category_dict.json" -path2 = "data/product_upper_category_dict.json" -path3 = "data/offered_brands.pkl" -path4 = "data/offer_retailer.csv" - -with open(path1, 'r') as f: - brand_belong_category_dict = json.load(f) - -with open(path2, 'rb') as f: - category_dict = json.load(f) - -with open(path3, 'rb') as f: - offered_brands = pickle.load(f) - -df_offers_brand_retailer = pd.read_csv(path4) - -example_search = "Simply Spiked Lemonade 12 pack at Walmart" - -# helper functions - -def single_text_cleaner(text: str, remove_stopwords: bool=False, upper_case: bool = False, remove_punctuation: bool=True) -> str: - """Clean one single text input. By default it will convert text to lower case""" - if upper_case: - text = text.upper() - else: - text = text.lower() - if remove_punctuation: - text = re.sub(r'[^a-z\s]', '', text) - if remove_stopwords: - words = text.split() - words = [word for word in words if word not in stop_words] - text = ' '.join(words) - return text - -def list_text_cleaner(texts: List[str], upper_case: bool = False, remove_stopwords: bool = False, remove_punctuation: bool=True) -> List[str]: - """Takes in a list of strings and returns a list of cleaned strings without stop words. - Current tasks: - - remove non-alphabetical characters - - converting to lower cases - - remove stop words (optional)""" - cleaned_texts = [single_text_cleaner(text, remove_stopwords, upper_case, remove_punctuation) for text in texts] - return cleaned_texts - -def match_product_category(s1: list[str], s2: list[str]) -> str: - """Find if items of a list is in one list of product categories""" - return next((p for c in s1 for p in s2 if c in p), None) # this will stop after finding first match, which saves time - -def find_category(search_input: str, search_dict: Dict) -> str: - """Find the category of a search input based on a dictionary of categories - Args: - - search_input: a string - - search_dict: a dictionary of product categories - """ - search_list = list_text_cleaner(re.split(r'[,\s]+', search_input), remove_stopwords=True) - search_list = [c for c in search_list if len(c)>0] # sometimes there are empty strings - matched_category = False - for k, v in search_dict.items(): - v = list_text_cleaner(v, remove_punctuation=False) - search_results = match_product_category(search_list, v) - if search_results is not None: - matched_category = True - return k, search_results - else: - # print(f'Function find_category: No category {k} has matched for input: {search_input}') - continue - if not matched_category: - print(f'Function find_category: No category has matched for input: {search_input}') - return None - - -def check_entity(search_input) -> bool: - """Takes in a search input and checks if it contains any entities""" - doc = nlp(search_input) - if len(doc.ents) > 0: - return doc - else: - return False - -def get_cosine_sim(input_text: str, texts: List[str]) -> pd.DataFrame: - """Calculate the cosine similarity of the input text against a list of texts - Takes in: - - input_text: a string - - texts: a list of strings - Returns a dataframe with two columns: Sentence Text and Cosine Similarity Score - """ - input_text_cleaned = list_text_cleaner([input_text], remove_stopwords=True)[0] - cleaned_texts = list_text_cleaner(texts, remove_stopwords=True) - all_texts = [input_text_cleaned] + cleaned_texts - vectors = get_vectors(*all_texts) - sim_matrix = cosine_similarity(vectors) - # Get the similarity scores of the input_text against all other texts - sim_scores = sim_matrix[0, 1:] - data = {'OFFER': texts, 'Cosine Similarity Score': sim_scores} - df = pd.DataFrame(data) - df = df.sort_values(by='Cosine Similarity Score', ascending=False).reset_index(drop=True) - return df - -def get_vectors(*strs: str) -> np.ndarray: - text = list(strs) - vectorizer = CountVectorizer() - vectorizer.fit(text) - return vectorizer.transform(text).toarray() - -def jaccard_similarity(s1: List[str], s2: List[str]) -> float: - """Takes in two lists and returns the Jaccard similarity score (3 digits)""" - intersection = set(s1).intersection(set(s2)) - n = len(intersection) - score = round(n / (len(s1) + len(s2) - n), 3) - return score - -def get_jaccard_sim(input_text: str, texts: List[str]) -> pd.DataFrame: - """Calculate the Jaccard similarity of the input text against a list of texts - Takes in: - - input_text: a string - - texts: a list of strings - Returns a dataframe with two columns: Sentence Text and Jaccard Similarity Score - """ - cleaned_input_text = list_text_cleaner([input_text], remove_stopwords=True)[0].split() - cleaned_texts = list_text_cleaner(texts, remove_stopwords=True) - - jaccard_scores = [jaccard_similarity(cleaned_input_text, text.split()) for text in cleaned_texts] - - data = {'OFFER': texts, 'Jaccard Similarity Score': jaccard_scores} - df = pd.DataFrame(data) - # sort based on the similarity score - df = df.sort_values(by='Jaccard Similarity Score', ascending=False).reset_index(drop=True) - return df - -def find_column(df: pd.DataFrame, keyword: str) -> str: - """Function to find the first column containing a specific keyword. Note that we assume there will only be one score at most for a similarity score dataframe""" - cols = [col for col in df.columns if keyword.lower() in col.lower()] - return cols[0] if cols else None - -def extract_similar_offers(data: pd.DataFrame, threshold: float = 0.0) -> pd.DataFrame: - """Takes in the results from get_cosine_sim() and get_jaccard_sim(); returns a dataframe of similar offers with scores > threshold""" - score = find_column(data, 'score') - similar_offers = data[data[score] >= threshold] - similar_offers[score] = similar_offers[score].apply(lambda x: round(x, 3)) # round to 3 digits - return similar_offers - -def category_to_brand(category: str, offered_brands: List, brand_belong_category_dict: Dict) -> List[str]: - """Use case: when a user searches for a category, we return a list of brands in that category""" - # checks if the category is in the dictionary keys - if category.upper() in brand_belong_category_dict.keys(): - search_brands = brand_belong_category_dict[category.upper()] # becase all keys are in upper case - result = list(set(search_brands) & set(offered_brands)) - print(f"Function category_to_brand | Found {category} in offered brand") - return result - else: - print(f"Function category_to_brand | No offered brand is found in {category}") - return None - -class CatchErros(Exception): - class ParamsInputError(Exception): - pass - class SearchFailedError(Exception): - pass - class UnknownError(Exception): - pass - - -def offer_finder_by_category(search_input: str, search_category_tuple: Tuple, category_dict: Dict, offers: pd.DataFrame, offered_brands: List, - brand_belong_category_dict: Dict, score: str, threshold: float = 0.0) -> pd.DataFrame: - """Find offers based on a category identified from search input. - Args: - - search_input: a string - - search_category_tuple: a tuple of (upper_category, product_category) - - category_dict: a dictionary of categories. Keys are upper categories and values are lists of product categories - - offers: a dataframe of offers (OFFER, BRAND, RETAILER) that are avaialble in our database - - offered_brands: a list of offers from offer.df - - brand_belong_category_dict: a dictionary of brands and the categories they belong to - - score: a string of either 'cosine' or 'jaccard' - - threshold: a float between 0 and 1 - - Returns a dataframe of similar offers, ordered by highest score - """ - # we assume people just search one category at a time - # search_category_tuple = find_category(search_input, category_dict) - product_category, upper_category = search_category_tuple[1], search_category_tuple[0] # ('Alcohol', 'beer') - print(f'Function offer_finder_by_category | Found items:\n- Search input: {search_input}\n- Product category: {product_category}\n- Upper category: {upper_category}') - potential_brands = category_to_brand(product_category, offered_brands, brand_belong_category_dict) - if potential_brands is not None: - potential_offers = offers[offers['BRAND'].isin(potential_brands)]['OFFER'].tolist() - if score == 'cosine': - cos_sim_score = get_cosine_sim(search_input, potential_offers) - output = extract_similar_offers(cos_sim_score, threshold) - elif score == 'jaccard': - jaccard_sim_score = get_jaccard_sim(search_input, potential_offers) - output = extract_similar_offers(jaccard_sim_score, threshold) - elif score not in ['cosine', 'jaccard']: - raise ParamsInputError(f'Please enter a valid score: cosine or jaccard; Not {score}') - else: # this means something else is worng - raise UnknownError(f'Something must be broken. Please try again.') - return output - else: - potential_product_categories = category_dict[upper_category] - msg = f'{product_category} is not found. Do you wanna take a look at these similar offers in {upper_category}?\n We have: {potential_product_categories}' # we can still calculate similarity but this is computationally expensive - print(msg) - return None - -def offer_finder_by_entity(search_input: str, entities: Tuple, offers_data: pd.DataFrame, score: str, threshold: float=0.0) -> pd.DataFrame: - """Find offers based on entities identified from search input. - Args: - - search_input: a string - - entities: a tuple of entities - - offers_data: a dataframe of offers (OFFER, BRAND, RETAILER) that are avaialble in our database - - score: a string of either 'cosine' or 'jaccard' - - threshold: a float between 0 and 1 - - Returns a dataframe of similar offers, ordered by highest score - """ - collects = [] # collect all the results if there are more than one entity - for ent in entities: - ent_name, ent_label = ent.text, ent.label_ - print(f'Function offer_finder_by_entity | Found entity: {ent_name} with label: {ent_label}') - # filter offers by entity - df_tmp = offers_data[offers_data[ent_label.upper()] == ent_name.upper()] - if df_tmp.shape[0] > 0: - print(f'Function offer_finder_by_entity | Found {df_tmp.shape[0]} offer(s) for the brand/retailer: {ent_name}') - potential_offers = df_tmp['OFFER'].drop_duplicates().tolist() - if score == 'cosine': - cos_sim_score = get_cosine_sim(search_input, potential_offers) - output = extract_similar_offers(cos_sim_score, threshold) - elif score == 'jaccard': - jaccard_sim_score = get_jaccard_sim(search_input, potential_offers) - output = extract_similar_offers(jaccard_sim_score, threshold) - elif score not in ['cosine', 'jaccard']: - raise ValueError(f'Please enter a valid score: cosine or jaccard; Not {score}') - else: # this means something else is worng - raise UnknownError(f'Something must be broken. Please try again.') - collects.append(output) - else: - print(f'Function offer_finder_by_entity | No offer is found for the brand/retailer: {ent_name}') - - if len(collects) > 0: - final_output = pd.concat(collects, ignore_index=True)# they should be using the same similarity score - score = find_column(collects[0], 'score') - final_output = final_output.sort_values(by=score, ascending=False).reset_index(drop=True) # sort final_output by score - return final_output - elif len(collects) == 1: - return collects[0] - else: - print('###'*5 + 'FINAL SEARCH RESULTS' + '###'*5) - print('Function offer_finder_by_entity | No offer is found for any of the entities.') - return None - - -def search_offers(search_input: str=example_search, offers: pd.DataFrame=df_offers_brand_retailer, offer_brands: List=offered_brands, - category_dict: Dict=category_dict, brand_belong_category_dict: Dict=brand_belong_category_dict, - score: str="jaccard", score_threshold: float = 0.0): - """Main function. Takes in a serach_input and decide whether it can find entities or not. Then excecute the appropriate functions - Inputs: - - search_input: a string that a user enters - - offers: a dataframe of offers (OFFER, BRAND, RETAILER) that are avaialble in our database - - category_dict: a dictionary of categories. Keys are upper categories and values are lists of product categories - - brand_belong_category_dict: a dictionary of brands and the categories they belong to - - score: a string of either 'cosine' or 'jaccard' - - score_threshold: a float between 0 and 1 - - Returns a dataframe of similar offers, ordered by highest score - """ - print(f'Function main | Search input: {search_input}') - check_ent = check_entity(search_input) - if not check_entity(search_input): # no entities found - # check category - cat_check = find_category(search_input, category_dict) - if cat_check is None: - print('No brand/retailer/category is found. Please try again.') - return None - else: - # we assume people just search one category at a time - cat_tuple = cat_check # ('Alcohol', 'beer') - search_results = offer_finder_by_category(search_input, cat_tuple, category_dict, offers, offered_brands, brand_belong_category_dict, score, score_threshold) - return search_results - else: - entities = check_ent.ents # entities will be a tuple anyways - print(f'Found {len(entities)} entity object(s) in the search input.') - search_results = offer_finder_by_entity(search_input, entities, offers, score, score_threshold) - if search_results is None: - print('No offers matched retailer/category is found. Now trying to recommend based on category.') - cat_check = find_category(search_input, category_dict) - if cat_check is None: - print('No brand/retailer/category is found. Please try again.') - return None - else: - cat_tuple = cat_check - search_results = offer_finder_by_category(search_input, cat_tuple, category_dict, offers, offered_brands, brand_belong_category_dict, score, score_threshold) - return search_results - -if __name__ == "__main__": - search_offers() \ No newline at end of file diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/scripts_new/run_glacier_mtl_late_0.sh b/spaces/ho11laqe/nnUNet_calvingfront_detection/scripts_new/run_glacier_mtl_late_0.sh deleted file mode 100644 index e56682f652b9e81478b4561c019ee69a01f75b69..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/scripts_new/run_glacier_mtl_late_0.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -l -#SBATCH --nodes=1 --gres=gpu:1 --time=24:00:00 -#SBATCH --job-name=Task503_glacier_mtl_late_0 - -export data_raw="/home/woody/iwi5/iwi5039h/data_raw" -export nnUNet_raw_data_base="/home/woody/iwi5/iwi5039h/nnUNet_data/nnUNet_raw_data_base/" -export nnUNet_preprocessed="/home/woody/iwi5/iwi5039h/nnUNet_data/nnUNet_preprocessed/" -export RESULTS_FOLDER="/home/woody/iwi5/iwi5039h/nnUNet_data/RESULTS_FOLDER" - -cd nnunet_glacer -pwd -conda activate nnunet - -#python3 nnunet/dataset_conversion/Task503_Glacier_mtl.py -data_percentage 100 -base $data_raw -#python3 nnunet/experiment_planning/nnUNet_plan_and_preprocess.py -t 503 -pl3d None -pl2d ExperimentPlanner2D_mtl - -python3 nnunet/run/run_training.py 2d nnUNetTrainerMTLlate 503 0 -p nnUNetPlans_mtl --disable_postprocessing_on_folds -python3 nnunet/inference/predict_simple.py -i $nnUNet_raw_data_base/nnUNet_raw_data/Task503_Glacier_mtl/imagesTs -o $RESULTS_FOLDER/test_predictions/Task503_Glacier_mtl_late/fold_0 -t 503 -m 2d -f 0 -p nnUNetPlans_mtl -tr nnUNetTrainerMTLlate -python3 nnunet/dataset_conversion/Task503_Glacier_mtl_reverse.py -i $RESULTS_FOLDER/test_predictions/Task503_Glacier_mtl_late/fold_0 -python3 ./evaluate_nnUNet.py --predictions $RESULTS_FOLDER/test_predictions/Task503_Glacier_mtl_late/fold_0/pngs --labels_fronts $data_raw/fronts/test --labels_zones $data_raw/zones/test --sar_images $data_raw/sar_images/test diff --git a/spaces/huaiji3y/bingo-Public/src/state/index.ts b/spaces/huaiji3y/bingo-Public/src/state/index.ts deleted file mode 100644 index 272106d619c69f124ea8dd0f10872ab4840152d7..0000000000000000000000000000000000000000 --- a/spaces/huaiji3y/bingo-Public/src/state/index.ts +++ /dev/null @@ -1,118 +0,0 @@ -import { BingWebBot } from '@/lib/bots/bing' -import { BingConversationStyle, ChatMessageModel, BotId } from '@/lib/bots/bing/types' -import { nanoid } from '@/lib/utils' -import { atom } from 'jotai' -import { atomWithImmer } from 'jotai-immer' -import { atomWithStorage } from 'jotai/utils' -import { atomFamily } from 'jotai/utils' -import { atomWithHash, atomWithLocation } from 'jotai-location' - -const initialMessages: ChatMessageModel[] = [ - { author: 'system', text: 'conversation between user and robot', id: '1' }, - { author: 'user', text: '销量最高的 3 种宠物吸尘器有哪些优点和缺点? ', id: '2' }, - { - author: 'bot', text: ` -您好,这是必应。根据网上的搜索结果,我为您找到了以下三款销量最高的宠物吸尘器,以及它们的优点和缺点: - -- **戴森Dyson V10轻量版 Digital Slim Fluffy无线吸尘器**[^1^] [^3^]:这款吸尘器专为宠物家庭设计,拥有强大吸力和多功能吸头,能够有效清除宠物毛发和灰尘。 - - 优点:轻便易用,续航时间长,过滤系统先进,能够有效去除过敏原; - - 缺点:价格较高,噪音较大,需要定期清洁滤网。 -- **飞利浦FC6908/82(W7)无线吸尘器**[^2^] [^4^]:这是一款手持式干吸无线吸尘器,专门为宠物家庭设计。它具有75分钟的超高续航,适合大户型;除螨涡轮刷,可以深度清洁床单被罩,免受螨虫侵害;吸拖一体设计,280ml大水箱,还可以往水箱里加入清洁剂。 - - 优点:清洁性能强劲,操作方便,适用多种场景; - - 缺点:噪音较大,达到84dB(A);尘杯容量较小,只有0.6L;充电时长较长,需要5小时。 -- **必胜(Bissell) 宠物吸尘器**[^2^]:这是一款便携式手持真空吸尘器,专为清理宠物毛发而设计。它配备特殊的橡胶喷嘴和软毛刷头,可以轻松拾取宠物毛发和灰尘;几乎静音运行。 - - 优点:轻巧易用,重量只有1.36kg;电池可充电可更换; - - 缺点:续航时间不稳定,在15-30分钟之间浮动;吸力不够强劲,在地毯或沙发上可能效果不佳;没有过滤系统或过滤网。 - -希望这些信息对您有所帮助。如果您还有其他问题,请随时提问。😊 - `, id: '3' }, - { author: 'user', text: '今天的新闻', id: '4' }, - { - author: 'bot', text: `你好,这是Bing。我根据你的搜索查询“今天的新闻”为你找到了以下信息: - - # 中国新闻 - - 《求是》杂志发表习近平总书记重要文章《深化党和国家机构改革 推进国家治理体系和治理能力现代化》[^1^] - - 梦之队第5金! 全红婵陈芋汐女双10米台夺冠[^1^] - - 中央气象台7月16日18时发布台风橙色预警[^1^] - - 贵州石阡:暑期旅游带动乡村振兴[^1^] - - 激活大科学装置的“人才红利”[^1^] - - 聚才用才留才 让希望的田野成为智慧的田野[^1^] - - 科学家发现粮食系统增效可有效支撑我国碳中和[^1^] - - 成都以赛为媒提升城市美誉度[^1^] - - 香江观澜:“抢人才”成效显著 香港竞争力依旧[^1^] - - 浙江建德:新安江上享清凉[^1^] - - 定向资金持续精准滴灌 高质量发展力量不断积累[^1^] - - 企业信心恢复加大中长期投资 为稳增长奠定基础[^1^] - - 武汉渡江节开赛 千名“泳士”竞渡长江[^1^] - - 金融更加重视对实体经济支持 降低融资成本提振市场信心[^1^] - - 大运来了丨成都迎大运 全民健身动起来[^1^] - - 藏在高校里的宝藏博物馆[^1^] - - 中国汽车工业用70年赢得三个“全球第一”[^1^] - - 辽宁阜新市委常委、副市长赖化平涉嫌严重违纪违法接受审查调查[^1^] - - # 国际新闻 - - 联合国秘书长:可持续发展面临风险 各国需加强行动[^2^] - - 国际航运业加快绿色转型[^2^] - - 美企反对收紧对华芯片出口限制[^2^] - - 欧洲加大气候科技领域投资[^2^] - - 中企助力丹麦发展清洁能源[^2^] - - 中国代表呼吁国际社会共同努力防止乌克兰局势失控[^2^] - - 中国和阿尔及利亚共同构建新型国际关系典范[^2^] - - 以上信息仅供参考,具体详情请点击以下链接查看: - - [1] [中国新闻_央视网(cctv.com)](https://news.cctv.com/china/) - [2] [人民网_网上的人民日报](http://www.people.com.cn/)`, id: '5' }, - { author: 'user', text: '写一个快排', id: '6' }, - { author: 'bot', text: "你好,这是Bing。我可以帮你用python写一个快排。快速排序是一种基于分治思想的排序算法,它的基本思想是:从待排序的数组中选择一个基准元素,将数组分为两个子数组,使得左边的子数组中的元素都小于或等于基准元素,右边的子数组中的元素都大于或等于基准元素,然后对两个子数组递归地进行快速排序,最后合并结果。下面是用python实现的快速排序的代码:\n\n```python\n# 定义一个函数,用于交换数组中的两个元素\ndef swap(arr, i, j):\n temp = arr[i]\n arr[i] = arr[j]\n arr[j] = temp\n\n# 定义一个函数,用于对数组的一段区间进行划分\ndef partition(arr, low, high):\n # 选择最右边的元素作为基准元素\n pivot = arr[high]\n # 初始化一个指针,指向左边区间的最后一个位置\n i = low - 1\n # 遍历左边区间中的每个元素\n for j in range(low, high):\n # 如果当前元素小于或等于基准元素\n if arr[j] <= pivot:\n # 将指针右移一位\n i += 1\n # 将指针所指的元素与当前元素交换\n swap(arr, i, j)\n # 将基准元素与指针右边的第一个元素交换\n swap(arr, i + 1, high)\n # 返回基准元素的最终位置\n return i + 1\n\n# 定义一个函数,用于对数组进行快速排序\ndef quick_sort(arr, low, high):\n # 如果区间长度大于1\n if low < high:\n # 对区间进行划分,得到基准元素的位置\n p = partition(arr, low, high)\n # 对左边子区间进行快速排序\n quick_sort(arr, low, p - 1)\n # 对右边子区间进行快速排序\n quick_sort(arr, p + 1, high)\n\n# 测试代码\narr = [5, 3, 7, 2, 9, 1, 4, 6, 8]\nprint(\"原始数组:\", arr)\nquick_sort(arr, 0, len(arr) - 1)\nprint(\"排序后的数组:\", arr)\n```", id: '7' }, - { - author: 'bot', text: "好的,我会尝试画一只猫。\n > ![一只猫](https://tse2.mm.bing.net/th/id/OIG.jz34V0PNVkPC229h9spV?w=270&h=270&c=6&r=0&o=5&dpr=1.5&pid=ImgGn)![一只猫](https://tse1.mm.bing.net/th/id/OIG.6g7d.XLZMP_iwAByLhvo?w=270&h=270&c=6&r=0&o=5&dpr=1.5&pid=ImgGn)![一只猫](https://tse2.mm.bing.net/th/id/OIG.iAxF4ekekYn7sZw9SmU6?w=270&h=270&c=6&r=0&o=5&dpr=1.5&pid=ImgGn)![一只猫](https://tse4.mm.bing.net/th/id/OIG.qDnzeSKzUCeJcrBqc5mX?w=270&h=270&c=6&r=0&o=5&dpr=1.5&pid=ImgGn)", - id: '8' - } -] - -export const GreetMessages = [ - '谢谢你! 知道你什么时候准备好继续前进总是很有帮助的。我现在能为你回答什么问题?', - '重新开始总是很棒。问我任何问题!', - '当然,我很乐意重新开始。我现在可以为你提供哪些帮助?', - '当然,我已准备好进行新的挑战。我现在可以为你做什么?', - '很好,让我们来更改主题。你在想什么?', - '不用担心,我很高兴尝试一些新内容。我现在可以为你回答什么问题?', - '好的,我准备好了!感谢重置。我们应该了解哪些内容?', - '感谢刷新!你有新的话题吗?', - '明白了,让我们重新开始。接下来应该讨论什么?', - '下一步!我可以为你做什么?', - '好的,我已准备好新话题。我们应该一起了解哪些内容?' -] - -export const bingConversationStyleAtom = atomWithStorage('bingConversationStyle', BingConversationStyle.Creative, undefined, { unstable_getOnInit: true }) -export const voiceAtom = atomWithStorage('enableTTS', false, undefined, { unstable_getOnInit: true }) - -type Param = { botId: BotId; page: string } - -const createBotInstance = () => { - return new BingWebBot({ - cookie: ' ', - ua: ' ', - }) -} - -export const chatFamily = atomFamily( - (param: Param) => { - return atomWithImmer({ - botId: param.botId, - bot: createBotInstance(), - messages: [] as ChatMessageModel[], - generatingMessageId: '', - abortController: undefined as AbortController | undefined, - conversationId: nanoid(), - }) - }, - (a, b) => a.botId === b.botId && a.page === b.page, -) - -export const hashAtom = atomWithHash('dialog', '') - -export const locationAtom = atomWithLocation() - -export const voiceListenAtom = atom(false) diff --git a/spaces/hzy123/bingo/src/components/chat.tsx b/spaces/hzy123/bingo/src/components/chat.tsx deleted file mode 100644 index a37ab1cc96ca2e6bfd9acbe313a8d946bfd5c3d4..0000000000000000000000000000000000000000 --- a/spaces/hzy123/bingo/src/components/chat.tsx +++ /dev/null @@ -1,93 +0,0 @@ -'use client' - -import { useCallback, useEffect, useMemo, useState } from 'react' -import { useAtom } from 'jotai' -import Image from 'next/image' -import { cn } from '@/lib/utils' -import { ChatList } from '@/components/chat-list' -import { ChatPanel } from '@/components/chat-panel' -import { WelcomeScreen } from '@/components/welcome-screen' -import { ChatScrollAnchor } from '@/components/chat-scroll-anchor' -import { ToneSelector } from './tone-selector' -import { ChatHeader } from './chat-header' -import { ChatSuggestions } from './chat-suggestions' -import { bingConversationStyleAtom } from '@/state' -import { ButtonScrollToBottom } from '@/components/button-scroll-to-bottom' -import StopIcon from '@/assets/images/stop.svg' -import { useBing } from '@/lib/hooks/use-bing' -import { ChatMessageModel } from '@/lib/bots/bing/types' -import { ChatNotification } from './chat-notification' -import { Settings } from './settings' -import { ChatHistory } from './chat-history' - -export type ChatProps = React.ComponentProps<'div'> & { initialMessages?: ChatMessageModel[] } - -export default function Chat({ className }: ChatProps) { - - const [bingStyle, setBingStyle] = useAtom(bingConversationStyleAtom) - const { - messages, - sendMessage, - resetConversation, - stopGenerating, - setInput, - bot, - input, - generating, - isSpeaking, - uploadImage, - attachmentList, - setAttachmentList, - } = useBing() - - useEffect(() => { - window.scrollTo({ - top: document.body.offsetHeight, - behavior: 'smooth' - }) - }, []) - - return ( -
      - -
      - - - - {messages.length ? ( - <> - - - - - - {generating ? ( -
      - -
      - ) : null} - - ) : null} -
      - - -
      - ) -} diff --git a/spaces/ibaiGorordo/hugging-face-me/README.md b/spaces/ibaiGorordo/hugging-face-me/README.md deleted file mode 100644 index 6575a95a2079f79b3703959a97df898602568f81..0000000000000000000000000000000000000000 --- a/spaces/ibaiGorordo/hugging-face-me/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Hugging Face Me -emoji: 🤗 -colorFrom: gray -colorTo: indigo -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/inamXcontru/PoeticTTS/Balle Balle! From Amritsar to L A How Aishwarya Rai Found Love in Hollywood.md b/spaces/inamXcontru/PoeticTTS/Balle Balle! From Amritsar to L A How Aishwarya Rai Found Love in Hollywood.md deleted file mode 100644 index b12cf1ad75a063e58e46ab44c53fc7f26e977f4b..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Balle Balle! From Amritsar to L A How Aishwarya Rai Found Love in Hollywood.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Balle Balle! From Amritsar to L A hindi dubbed full movie


      DOWNLOAD ✫✫✫ https://gohhs.com/2uz3Se



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/innnky/nyaru4.0/hubert/hubert_model.py b/spaces/innnky/nyaru4.0/hubert/hubert_model.py deleted file mode 100644 index 7fb642d89b07ca60792debab18e3454f52d8f357..0000000000000000000000000000000000000000 --- a/spaces/innnky/nyaru4.0/hubert/hubert_model.py +++ /dev/null @@ -1,222 +0,0 @@ -import copy -import random -from typing import Optional, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as t_func -from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present - - -class Hubert(nn.Module): - def __init__(self, num_label_embeddings: int = 100, mask: bool = True): - super().__init__() - self._mask = mask - self.feature_extractor = FeatureExtractor() - self.feature_projection = FeatureProjection() - self.positional_embedding = PositionalConvEmbedding() - self.norm = nn.LayerNorm(768) - self.dropout = nn.Dropout(0.1) - self.encoder = TransformerEncoder( - nn.TransformerEncoderLayer( - 768, 12, 3072, activation="gelu", batch_first=True - ), - 12, - ) - self.proj = nn.Linear(768, 256) - - self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_()) - self.label_embedding = nn.Embedding(num_label_embeddings, 256) - - def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - mask = None - if self.training and self._mask: - mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2) - x[mask] = self.masked_spec_embed.to(x.dtype) - return x, mask - - def encode( - self, x: torch.Tensor, layer: Optional[int] = None - ) -> Tuple[torch.Tensor, torch.Tensor]: - x = self.feature_extractor(x) - x = self.feature_projection(x.transpose(1, 2)) - x, mask = self.mask(x) - x = x + self.positional_embedding(x) - x = self.dropout(self.norm(x)) - x = self.encoder(x, output_layer=layer) - return x, mask - - def logits(self, x: torch.Tensor) -> torch.Tensor: - logits = torch.cosine_similarity( - x.unsqueeze(2), - self.label_embedding.weight.unsqueeze(0).unsqueeze(0), - dim=-1, - ) - return logits / 0.1 - - def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - x, mask = self.encode(x) - x = self.proj(x) - logits = self.logits(x) - return logits, mask - - -class HubertSoft(Hubert): - def __init__(self): - super().__init__() - - @torch.inference_mode() - def units(self, wav: torch.Tensor) -> torch.Tensor: - wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2)) - x, _ = self.encode(wav) - return self.proj(x) - - -class FeatureExtractor(nn.Module): - def __init__(self): - super().__init__() - self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False) - self.norm0 = nn.GroupNorm(512, 512) - self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False) - self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = t_func.gelu(self.norm0(self.conv0(x))) - x = t_func.gelu(self.conv1(x)) - x = t_func.gelu(self.conv2(x)) - x = t_func.gelu(self.conv3(x)) - x = t_func.gelu(self.conv4(x)) - x = t_func.gelu(self.conv5(x)) - x = t_func.gelu(self.conv6(x)) - return x - - -class FeatureProjection(nn.Module): - def __init__(self): - super().__init__() - self.norm = nn.LayerNorm(512) - self.projection = nn.Linear(512, 768) - self.dropout = nn.Dropout(0.1) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.norm(x) - x = self.projection(x) - x = self.dropout(x) - return x - - -class PositionalConvEmbedding(nn.Module): - def __init__(self): - super().__init__() - self.conv = nn.Conv1d( - 768, - 768, - kernel_size=128, - padding=128 // 2, - groups=16, - ) - self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.conv(x.transpose(1, 2)) - x = t_func.gelu(x[:, :, :-1]) - return x.transpose(1, 2) - - -class TransformerEncoder(nn.Module): - def __init__( - self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int - ) -> None: - super(TransformerEncoder, self).__init__() - self.layers = nn.ModuleList( - [copy.deepcopy(encoder_layer) for _ in range(num_layers)] - ) - self.num_layers = num_layers - - def forward( - self, - src: torch.Tensor, - mask: torch.Tensor = None, - src_key_padding_mask: torch.Tensor = None, - output_layer: Optional[int] = None, - ) -> torch.Tensor: - output = src - for layer in self.layers[:output_layer]: - output = layer( - output, src_mask=mask, src_key_padding_mask=src_key_padding_mask - ) - return output - - -def _compute_mask( - shape: Tuple[int, int], - mask_prob: float, - mask_length: int, - device: torch.device, - min_masks: int = 0, -) -> torch.Tensor: - batch_size, sequence_length = shape - - if mask_length < 1: - raise ValueError("`mask_length` has to be bigger than 0.") - - if mask_length > sequence_length: - raise ValueError( - f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`" - ) - - # compute number of masked spans in batch - num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random()) - num_masked_spans = max(num_masked_spans, min_masks) - - # make sure num masked indices <= sequence_length - if num_masked_spans * mask_length > sequence_length: - num_masked_spans = sequence_length // mask_length - - # SpecAugment mask to fill - mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool) - - # uniform distribution to sample from, make sure that offset samples are < sequence_length - uniform_dist = torch.ones( - (batch_size, sequence_length - (mask_length - 1)), device=device - ) - - # get random indices to mask - mask_indices = torch.multinomial(uniform_dist, num_masked_spans) - - # expand masked indices to masked spans - mask_indices = ( - mask_indices.unsqueeze(dim=-1) - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - offsets = ( - torch.arange(mask_length, device=device)[None, None, :] - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - mask_idxs = mask_indices + offsets - - # scatter indices to mask - mask = mask.scatter(1, mask_idxs, True) - - return mask - - -def hubert_soft( - path: str, -) -> HubertSoft: - r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`. - Args: - path (str): path of a pretrained model - """ - hubert = HubertSoft() - checkpoint = torch.load(path) - consume_prefix_in_state_dict_if_present(checkpoint, "module.") - hubert.load_state_dict(checkpoint) - hubert.eval() - return hubert diff --git a/spaces/innovatorved/whisper.api/app/api/__init__.py b/spaces/innovatorved/whisper.api/app/api/__init__.py deleted file mode 100644 index 5d756c3eb5a49597308ec0c290954f602e62a973..0000000000000000000000000000000000000000 --- a/spaces/innovatorved/whisper.api/app/api/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# File: whisper.api/app/api/__init__.py - -from fastapi import APIRouter -from .endpoints import users, transcribe - -api_router = APIRouter() - -api_router.include_router(users.router, prefix="/users", tags=["users"]) -api_router.include_router(transcribe.router, prefix="/transcribe", tags=["transcribe"]) diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Autocad Civil 3d 2013 32 Bit Torrent Download __TOP__.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Autocad Civil 3d 2013 32 Bit Torrent Download __TOP__.md deleted file mode 100644 index 4b6981049975266da58343faa73c75dc5fed6519..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Autocad Civil 3d 2013 32 Bit Torrent Download __TOP__.md +++ /dev/null @@ -1,6 +0,0 @@ -

      autocad civil 3d 2013 32 bit torrent download


      Download Ziphttps://urlin.us/2uEyxV



      - -Navisworks Freedom software (by Autodesk) is a (light) visualiser of 3D models, widely used ... 64bit free navisworks simulate 2011 user guide autodesk maya 2013 x64. ... Using BitTorrent is legal, downloading copyrighted material isn. ... (Choose Autodesk Navisworks Manage 2012 32bit price, unless otherwise sleeping. 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/inreVtussa/clothingai/Examples/AutoCADPID201264bitdownloadtorrent ((NEW)).md b/spaces/inreVtussa/clothingai/Examples/AutoCADPID201264bitdownloadtorrent ((NEW)).md deleted file mode 100644 index 65d44520d218473e5b00d700856a5f7109ad5439..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/AutoCADPID201264bitdownloadtorrent ((NEW)).md +++ /dev/null @@ -1,32 +0,0 @@ -
      -

      How to Download and Install AutoCAD PID 2012 64 bit for Free

      -

      AutoCAD PID is a software application that allows you to create, modify and manage piping and instrumentation diagrams (P&IDs) for process plants. It is part of the Autodesk AutoCAD suite of products, which also includes AutoCAD, AutoCAD Architecture, AutoCAD Mechanical and more.

      -

      If you are looking for a way to download and install AutoCAD PID 2012 64 bit for free, you have come to the right place. In this article, we will show you how to use a torrent file to get the full version of AutoCAD PID 2012 64 bit without paying anything. We will also provide you with some tips and tricks to optimize your performance and avoid any potential issues.

      -

      AutoCADPID201264bitdownloadtorrent


      Downloadhttps://tiurll.com/2uCivI



      -

      What is a Torrent File?

      -

      A torrent file is a small file that contains information about a larger file or a group of files that can be downloaded from the internet. It does not contain the actual data of the files, but rather the metadata, such as the file names, sizes, locations and checksums. A torrent file also contains information about the peers or sources that have the files or parts of them.

      -

      To download a torrent file, you need a torrent client, which is a software program that can read the torrent file and connect you to the peers that have the files you want. Some of the most popular torrent clients are uTorrent, BitTorrent, qBittorrent and Vuze.

      -

      How to Download AutoCAD PID 2012 64 bit Torrent File?

      -

      To download AutoCAD PID 2012 64 bit torrent file, you need to find a reliable website that hosts it. There are many websites that offer torrent files for various software applications, but not all of them are safe and trustworthy. Some of them may contain malware, viruses or fake files that can harm your computer or compromise your privacy.

      -

      One of the websites that we recommend for downloading AutoCAD PID 2012 64 bit torrent file is allpcworlds.com[^1^]. This website provides high-quality torrent files for various software applications, including Autodesk products. It also has a user-friendly interface and fast download speeds.

      -

      To download AutoCAD PID 2012 64 bit torrent file from allpcworlds.com, follow these steps:

      -

      -
        -
      1. Go to https://allpcworlds.com/acd-2012-download/
      2. -
      3. Scroll down to the bottom of the page and click on the link that says "Download Autodesk AutoCAD 2012 free latest full version offline setup for Windows 32-bit and 64-bit."
      4. -
      5. Select the option that says "64-Bit Password: 123" and click on it.
      6. -
      7. You will be redirected to another page where you will see several links for different parts of the file. You need to download all six parts of the file.
      8. -
      9. Click on each link one by one and save them to your preferred location on your computer.
      10. -
      11. Once you have downloaded all six parts of the file, you need to extract them using a software program like WinRAR or 7-Zip.
      12. -
      13. After extracting the files, you will see a folder named "AUTODESK.AUTOCAD.V2012.WIN64-ISO" which contains an ISO file named "acd2012_x64.iso". This is the torrent file that you need.
      14. -
      -

      How to Install AutoCAD PID 2012 64 bit Using Torrent File?

      -

      To install AutoCAD PID 2012 64 bit using torrent file, you need to mount the ISO file using a software program like Daemon Tools or PowerISO. This will create a virtual drive on your computer that will act as if you have inserted a CD or DVD with the software.

      -

      To install AutoCAD PID 2012 64 bit using torrent file, follow these steps:

      -
        -
      1. Mount the ISO file using Daemon Tools or PowerISO.
      2. -
      3. Open the virtual drive and run the setup.exe file.
      4. -
      5. Follow the instructions on the screen and accept the terms and conditions.
      6. -
      7. When prompted for a serial

        d5da3c52bf
        -
        -
        \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Baidu Pc Faster Portable.md b/spaces/inreVtussa/clothingai/Examples/Baidu Pc Faster Portable.md deleted file mode 100644 index 4bcaa4cf3e0bfdf85c3d6d7881beb1528ec4b368..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Baidu Pc Faster Portable.md +++ /dev/null @@ -1,9 +0,0 @@ -
        -

        after a week of use with the portable ssds, we did not find any traces of dust, dirt, or other debris on the portable ssds. the portable ssds are made of plastic and metal, and they have a hard coating that prevents dust and dirt from sticking to the surface.

        -

        baidu pc faster portable


        Download · https://tiurll.com/2uClZA



        -

        the 2.5" x5 portable ssd is a solid choice for a portable ssd. its performance is comparable to the other ssds weve reviewed in this class. its 7,200rpm and its performance is great. however, its not the only option. the newer x7 model has been released with improved performance and a smaller, sleeker design. the x7 makes the x5 portable ssd more portable and has more storage capacity. however, the x5 portable ssd is priced lower and available for a lower price. the x7 is priced the same and is more portable.

        -

        the m.2 x5 portable ssd and the x7 portable ssd are portable ssds from sandisk. with two models of m.2 and two models of usb-c, sandisk has a wide range of choices for consumers. the x5 portable ssd is a solid choice for a portable ssd. its performance is comparable to the other ssds weve reviewed in this class. its 7,200rpm and its performance is great. however, its not the only option. the newer x7 model has been released with improved performance and a smaller, sleeker design. the x7 makes the x5 portable ssd more portable and has more storage capacity. however, the x5 portable ssd is priced lower and available for a lower price. the x7 is priced the same and is more portable.

        -

        the sandisk extreme pro v2 is the top of the line of portable ssds. it is a 2.5-inch drive with a sandisk logo on the enclosure. you can transfer and store data on this drive for many years. the sandisk extreme pro v2 has a mechanical shock protection and it is easy to use. the sandisk extreme pro v2 has an incredibly high speed, and it is the best portable ssd drive on the market.

        -

        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/iqovocn/ChuanhuChatGPT/assets/html/appearance_switcher.html b/spaces/iqovocn/ChuanhuChatGPT/assets/html/appearance_switcher.html deleted file mode 100644 index 9375071fbdfda7bfd622d7f7bd2dfdd0c494341b..0000000000000000000000000000000000000000 --- a/spaces/iqovocn/ChuanhuChatGPT/assets/html/appearance_switcher.html +++ /dev/null @@ -1,11 +0,0 @@ -
        - - {label} - - - - -
        diff --git a/spaces/irvay/RVC_IR/lib/infer_pack/transforms.py b/spaces/irvay/RVC_IR/lib/infer_pack/transforms.py deleted file mode 100644 index a11f799e023864ff7082c1f49c0cc18351a13b47..0000000000000000000000000000000000000000 --- a/spaces/irvay/RVC_IR/lib/infer_pack/transforms.py +++ /dev/null @@ -1,209 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = {"tails": tails, "tail_bound": tail_bound} - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 - - -def unconstrained_rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails="linear", - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == "linear": - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError("{} tails are not implemented.".format(tails)) - - ( - outputs[inside_interval_mask], - logabsdet[inside_interval_mask], - ) = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, - right=tail_bound, - bottom=-tail_bound, - top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - ) - - return outputs, logabsdet - - -def rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0.0, - right=1.0, - bottom=0.0, - top=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError("Input to a transform is not within its domain") - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError("Minimal bin width too large for the number of bins") - if min_bin_height * num_bins > 1.0: - raise ValueError("Minimal bin height too large for the number of bins") - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) + input_heights * (input_delta - input_derivatives) - b = input_heights * input_derivatives - (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) - c = -input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * ( - input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta - ) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/jackyccl/segment-anything/groundingdino/util/get_tokenlizer.py b/spaces/jackyccl/segment-anything/groundingdino/util/get_tokenlizer.py deleted file mode 100644 index f7dcf7e95f03f95b20546b26442a94225924618b..0000000000000000000000000000000000000000 --- a/spaces/jackyccl/segment-anything/groundingdino/util/get_tokenlizer.py +++ /dev/null @@ -1,26 +0,0 @@ -from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast - - -def get_tokenlizer(text_encoder_type): - if not isinstance(text_encoder_type, str): - # print("text_encoder_type is not a str") - if hasattr(text_encoder_type, "text_encoder_type"): - text_encoder_type = text_encoder_type.text_encoder_type - elif text_encoder_type.get("text_encoder_type", False): - text_encoder_type = text_encoder_type.get("text_encoder_type") - else: - raise ValueError( - "Unknown type of text_encoder_type: {}".format(type(text_encoder_type)) - ) - print("final text_encoder_type: {}".format(text_encoder_type)) - - tokenizer = AutoTokenizer.from_pretrained(text_encoder_type) - return tokenizer - - -def get_pretrained_language_model(text_encoder_type): - if text_encoder_type == "bert-base-uncased": - return BertModel.from_pretrained(text_encoder_type) - if text_encoder_type == "roberta-base": - return RobertaModel.from_pretrained(text_encoder_type) - raise ValueError("Unknown text_encoder_type {}".format(text_encoder_type)) diff --git a/spaces/james-oldfield/PandA/networks/genforce/models/perceptual_model.py b/spaces/james-oldfield/PandA/networks/genforce/models/perceptual_model.py deleted file mode 100644 index 1f21496f7b52dbf102a197fda2a12b8e3770a87e..0000000000000000000000000000000000000000 --- a/spaces/james-oldfield/PandA/networks/genforce/models/perceptual_model.py +++ /dev/null @@ -1,148 +0,0 @@ -# python 3.7 -"""Contains the VGG16 model for perceptual feature extraction. - -This file is particularly used for computing perceptual loss and hence is highly -recommended to use with pre-trained weights. - -The PyTorch weights can be downloaded from - -https://drive.google.com/file/d/1qQ-r7MYZ8ZcjQQFe17eQfJbOAuE3eS0y/view?usp=sharing - -which is converted from the Keras model - -https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5 - -The variable mapping is shown below - -pth_to_tf_var_mapping = { - 'layer0.weight': 'block1_conv1_W_1:0', # [64, 3, 3, 3] - 'layer0.bias': 'block1_conv1_b_1:0', # [64] - 'layer2.weight': 'block1_conv2_W_1:0', # [64, 64, 3, 3] - 'layer2.bias': 'block1_conv2_b_1:0', # [64] - 'layer5.weight': 'block2_conv1_W_1:0', # [128, 64, 3, 3] - 'layer5.bias': 'block2_conv1_b_1:0', # [128] - 'layer7.weight': 'block2_conv2_W_1:0', # [128, 128, 3, 3] - 'layer7.bias': 'block2_conv2_b_1:0', # [128] - 'layer10.weight': 'block3_conv1_W_1:0', # [256, 128, 3, 3] - 'layer10.bias': 'block3_conv1_b_1:0', # [256] - 'layer12.weight': 'block3_conv2_W_1:0', # [256, 256, 3, 3] - 'layer12.bias': 'block3_conv2_b_1:0', # [256] - 'layer14.weight': 'block3_conv3_W_1:0', # [256, 256, 3, 3] - 'layer14.bias': 'block3_conv3_b_1:0', # [256] - 'layer17.weight': 'block4_conv1_W_1:0', # [512, 256, 3, 3] - 'layer17.bias': 'block4_conv1_b_1:0', # [512] - 'layer19.weight': 'block4_conv2_W_1:0', # [512, 512, 3, 3] - 'layer19.bias': 'block4_conv2_b_1:0', # [512] - 'layer21.weight': 'block4_conv3_W_1:0', # [512, 512, 3, 3] - 'layer21.bias': 'block4_conv3_b_1:0', # [512] - 'layer24.weight': 'block5_conv1_W_1:0', # [512, 512, 3, 3] - 'layer24.bias': 'block5_conv1_b_1:0', # [512] - 'layer26.weight': 'block5_conv2_W_1:0', # [512, 512, 3, 3] - 'layer26.bias': 'block5_conv2_b_1:0', # [512] - 'layer28.weight': 'block5_conv3_W_1:0', # [512, 512, 3, 3] - 'layer28.bias': 'block5_conv3_b_1:0', # [512] -} -""" - -import os -import warnings -from collections import OrderedDict -import numpy as np - -import torch -import torch.nn as nn - -__all__ = ['PerceptualModel'] - -_MEAN_STATS = (103.939, 116.779, 123.68) - - -class PerceptualModel(nn.Module): - """Defines the VGG16 structure as the perceptual network. - - This model takes `RGB` images with data format `NCHW` as the raw inputs, and - outputs the perceptual feature. This following operations will be performed - to preprocess the inputs to match the preprocessing during the model - training: - (1) Shift pixel range to [0, 255]. - (2) Change channel order to `BGR`. - (3) Subtract the statistical mean. - - NOTE: The three fully connected layers on top of the model are dropped. - """ - - def __init__(self, - output_layer_idx=23, - min_val=-1.0, - max_val=1.0, - pretrained_weight_path=None): - """Defines the network structure. - - Args: - output_layer_idx: Index of layer whose output will be used as the - perceptual feature. (default: 23, which is the `block4_conv3` - layer activated by `ReLU` function) - min_val: Minimum value of the raw input. (default: -1.0) - max_val: Maximum value of the raw input. (default: 1.0) - pretrained_weight_path: Path to the pretrained weights. - (default: None) - """ - super().__init__() - self.vgg16 = nn.Sequential(OrderedDict({ - 'layer0': nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1), - 'layer1': nn.ReLU(inplace=True), - 'layer2': nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), - 'layer3': nn.ReLU(inplace=True), - 'layer4': nn.MaxPool2d(kernel_size=2, stride=2), - 'layer5': nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1), - 'layer6': nn.ReLU(inplace=True), - 'layer7': nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1), - 'layer8': nn.ReLU(inplace=True), - 'layer9': nn.MaxPool2d(kernel_size=2, stride=2), - 'layer10': nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1), - 'layer11': nn.ReLU(inplace=True), - 'layer12': nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), - 'layer13': nn.ReLU(inplace=True), - 'layer14': nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), - 'layer15': nn.ReLU(inplace=True), - 'layer16': nn.MaxPool2d(kernel_size=2, stride=2), - 'layer17': nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1), - 'layer18': nn.ReLU(inplace=True), - 'layer19': nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), - 'layer20': nn.ReLU(inplace=True), - 'layer21': nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), - 'layer22': nn.ReLU(inplace=True), - 'layer23': nn.MaxPool2d(kernel_size=2, stride=2), - 'layer24': nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), - 'layer25': nn.ReLU(inplace=True), - 'layer26': nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), - 'layer27': nn.ReLU(inplace=True), - 'layer28': nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), - 'layer29': nn.ReLU(inplace=True), - 'layer30': nn.MaxPool2d(kernel_size=2, stride=2), - })) - self.output_layer_idx = output_layer_idx - self.min_val = min_val - self.max_val = max_val - self.mean = torch.from_numpy(np.array(_MEAN_STATS)).view(1, 3, 1, 1) - self.mean = self.mean.type(torch.FloatTensor) - - self.pretrained_weight_path = pretrained_weight_path - if os.path.isfile(self.pretrained_weight_path): - self.vgg16.load_state_dict( - torch.load(self.pretrained_weight_path, map_location='cpu')) - else: - warnings.warn('No pre-trained weights found for perceptual model!') - - def forward(self, x): - x = (x - self.min_val) * 255.0 / (self.max_val - self.min_val) - x = x.flip(1) # RGB to BGR - x = x - self.mean.to(x) - # TODO: Resize image? - for idx, layer in enumerate(self.vgg16.children()): - if idx == self.output_layer_idx: - break - x = layer(x) - # x = x.permute(0, 2, 3, 1) - x = x.flatten(start_dim=1) - return x diff --git a/spaces/jcenaa/Segment-Any-RGBD/CONTRIBUTING.md b/spaces/jcenaa/Segment-Any-RGBD/CONTRIBUTING.md deleted file mode 100644 index 7141f8d55f5d491525cf73b4958ff560f65e7a1a..0000000000000000000000000000000000000000 --- a/spaces/jcenaa/Segment-Any-RGBD/CONTRIBUTING.md +++ /dev/null @@ -1,32 +0,0 @@ -# Contributing to OVSeg -We want to make contributing to this project as easy and transparent as -possible. - -## Pull Requests -We actively welcome your pull requests. - -1. Fork the repo and create your branch from `main`. -2. If you've added code that should be tested, add tests. -3. If you've changed APIs, update the documentation. -4. Ensure the test suite passes. -5. Make sure your code lints. -6. If you haven't already, complete the Contributor License Agreement ("CLA"). - -## Contributor License Agreement ("CLA") -In order to accept your pull request, we need you to submit a CLA. You only need -to do this once to work on any of Meta's open source projects. - -Complete your CLA here: - -## Issues -We use GitHub issues to track public bugs. Please ensure your description is -clear and has sufficient instructions to be able to reproduce the issue. - -Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe -disclosure of security bugs. In those cases, please go through the process -outlined on that page and do not file a public issue. - - -## License -By contributing to OVSeg, you agree that your contributions will be licensed -under the LICENSE file in the root directory of this source tree. diff --git a/spaces/jerichosy/DIGIMAP-Colorization-Web-App/colorizers/base_color.py b/spaces/jerichosy/DIGIMAP-Colorization-Web-App/colorizers/base_color.py deleted file mode 100644 index 00beb39e9f6f73b06ebea0314fc23a0bc75f23b7..0000000000000000000000000000000000000000 --- a/spaces/jerichosy/DIGIMAP-Colorization-Web-App/colorizers/base_color.py +++ /dev/null @@ -1,24 +0,0 @@ - -import torch -from torch import nn - -class BaseColor(nn.Module): - def __init__(self): - super(BaseColor, self).__init__() - - self.l_cent = 50. - self.l_norm = 100. - self.ab_norm = 110. - - def normalize_l(self, in_l): - return (in_l-self.l_cent)/self.l_norm - - def unnormalize_l(self, in_l): - return in_l*self.l_norm + self.l_cent - - def normalize_ab(self, in_ab): - return in_ab/self.ab_norm - - def unnormalize_ab(self, in_ab): - return in_ab*self.ab_norm - diff --git a/spaces/jinmao/2/modules/utils.py b/spaces/jinmao/2/modules/utils.py deleted file mode 100644 index 859363f58c47600b8c8af81bd4aa2e3b23346c03..0000000000000000000000000000000000000000 --- a/spaces/jinmao/2/modules/utils.py +++ /dev/null @@ -1,436 +0,0 @@ -# -*- coding:utf-8 -*- -from __future__ import annotations -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type -import logging -import json -import os -import datetime -import hashlib -import csv -import requests -import re -import html - -import gradio as gr -from pypinyin import lazy_pinyin -import tiktoken -import mdtex2html -from markdown import markdown -from pygments import highlight -from pygments.lexers import get_lexer_by_name -from pygments.formatters import HtmlFormatter - -from modules.presets import * -import modules.shared as shared - -logging.basicConfig( - level=logging.INFO, - format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s", -) - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: List[str] - data: List[List[str | int | bool]] - - -def count_token(message): - encoding = tiktoken.get_encoding("cl100k_base") - input_str = f"role: {message['role']}, content: {message['content']}" - length = len(encoding.encode(input_str)) - return length - - -def markdown_to_html_with_syntax_highlight(md_str): - def replacer(match): - lang = match.group(1) or "text" - code = match.group(2) - - try: - lexer = get_lexer_by_name(lang, stripall=True) - except ValueError: - lexer = get_lexer_by_name("text", stripall=True) - - formatter = HtmlFormatter() - highlighted_code = highlight(code, lexer, formatter) - - return f'
        {highlighted_code}
        ' - - code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```" - md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE) - - html_str = markdown(md_str) - return html_str - - -def normalize_markdown(md_text: str) -> str: - lines = md_text.split("\n") - normalized_lines = [] - inside_list = False - - for i, line in enumerate(lines): - if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()): - if not inside_list and i > 0 and lines[i - 1].strip() != "": - normalized_lines.append("") - inside_list = True - normalized_lines.append(line) - elif inside_list and line.strip() == "": - if i < len(lines) - 1 and not re.match( - r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip() - ): - normalized_lines.append(line) - continue - else: - inside_list = False - normalized_lines.append(line) - - return "\n".join(normalized_lines) - - -def convert_mdtext(md_text): - code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL) - inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL) - code_blocks = code_block_pattern.findall(md_text) - non_code_parts = code_block_pattern.split(md_text)[::2] - - result = [] - for non_code, code in zip(non_code_parts, code_blocks + [""]): - if non_code.strip(): - non_code = normalize_markdown(non_code) - if inline_code_pattern.search(non_code): - result.append(markdown(non_code, extensions=["tables"])) - else: - result.append(mdtex2html.convert(non_code, extensions=["tables"])) - if code.strip(): - # _, code = detect_language(code) # 暂时去除代码高亮功能,因为在大段代码的情况下会出现问题 - # code = code.replace("\n\n", "\n") # 暂时去除代码中的空行,因为在大段代码的情况下会出现问题 - code = f"\n```{code}\n\n```" - code = markdown_to_html_with_syntax_highlight(code) - result.append(code) - result = "".join(result) - result += ALREADY_CONVERTED_MARK - return result - - -def convert_asis(userinput): - return f"

        {html.escape(userinput)}

        "+ALREADY_CONVERTED_MARK - -def detect_converted_mark(userinput): - if userinput.endswith(ALREADY_CONVERTED_MARK): - return True - else: - return False - - -def detect_language(code): - if code.startswith("\n"): - first_line = "" - else: - first_line = code.strip().split("\n", 1)[0] - language = first_line.lower() if first_line else "" - code_without_language = code[len(first_line) :].lstrip() if first_line else code - return language, code_without_language - - -def construct_text(role, text): - return {"role": role, "content": text} - - -def construct_user(text): - return construct_text("user", text) - - -def construct_system(text): - return construct_text("system", text) - - -def construct_assistant(text): - return construct_text("assistant", text) - - -def construct_token_message(token, stream=False): - return f"Token 计数: {token}" - -def delete_first_conversation(history, previous_token_count): - if history: - del history[:2] - del previous_token_count[0] - return ( - history, - previous_token_count, - construct_token_message(sum(previous_token_count)), - ) - - -def delete_last_conversation(chatbot, history, previous_token_count): - if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]: - logging.info("由于包含报错信息,只删除chatbot记录") - chatbot.pop() - return chatbot, history - if len(history) > 0: - logging.info("删除了一组对话历史") - history.pop() - history.pop() - if len(chatbot) > 0: - logging.info("删除了一组chatbot对话") - chatbot.pop() - if len(previous_token_count) > 0: - logging.info("删除了一组对话的token计数记录") - previous_token_count.pop() - return ( - chatbot, - history, - previous_token_count, - construct_token_message(sum(previous_token_count)), - ) - - -def save_file(filename, system, history, chatbot): - logging.info("保存对话历史中……") - os.makedirs(HISTORY_DIR, exist_ok=True) - if filename.endswith(".json"): - json_s = {"system": system, "history": history, "chatbot": chatbot} - print(json_s) - with open(os.path.join(HISTORY_DIR, filename), "w") as f: - json.dump(json_s, f) - elif filename.endswith(".md"): - md_s = f"system: \n- {system} \n" - for data in history: - md_s += f"\n{data['role']}: \n- {data['content']} \n" - with open(os.path.join(HISTORY_DIR, filename), "w", encoding="utf8") as f: - f.write(md_s) - logging.info("保存对话历史完毕") - return os.path.join(HISTORY_DIR, filename) - - -def save_chat_history(filename, system, history, chatbot): - if filename == "": - return - if not filename.endswith(".json"): - filename += ".json" - return save_file(filename, system, history, chatbot) - - -def export_markdown(filename, system, history, chatbot): - if filename == "": - return - if not filename.endswith(".md"): - filename += ".md" - return save_file(filename, system, history, chatbot) - - -def load_chat_history(filename, system, history, chatbot): - logging.info("加载对话历史中……") - if type(filename) != str: - filename = filename.name - try: - with open(os.path.join(HISTORY_DIR, filename), "r") as f: - json_s = json.load(f) - try: - if type(json_s["history"][0]) == str: - logging.info("历史记录格式为旧版,正在转换……") - new_history = [] - for index, item in enumerate(json_s["history"]): - if index % 2 == 0: - new_history.append(construct_user(item)) - else: - new_history.append(construct_assistant(item)) - json_s["history"] = new_history - logging.info(new_history) - except: - # 没有对话历史 - pass - logging.info("加载对话历史完毕") - return filename, json_s["system"], json_s["history"], json_s["chatbot"] - except FileNotFoundError: - logging.info("没有找到对话历史文件,不执行任何操作") - return filename, system, history, chatbot - - -def sorted_by_pinyin(list): - return sorted(list, key=lambda char: lazy_pinyin(char)[0][0]) - - -def get_file_names(dir, plain=False, filetypes=[".json"]): - logging.info(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}") - files = [] - try: - for type in filetypes: - files += [f for f in os.listdir(dir) if f.endswith(type)] - except FileNotFoundError: - files = [] - files = sorted_by_pinyin(files) - if files == []: - files = [""] - if plain: - return files - else: - return gr.Dropdown.update(choices=files) - - -def get_history_names(plain=False): - logging.info("获取历史记录文件名列表") - return get_file_names(HISTORY_DIR, plain) - - -def load_template(filename, mode=0): - logging.info(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)") - lines = [] - logging.info("Loading template...") - if filename.endswith(".json"): - with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f: - lines = json.load(f) - lines = [[i["act"], i["prompt"]] for i in lines] - else: - with open( - os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8" - ) as csvfile: - reader = csv.reader(csvfile) - lines = list(reader) - lines = lines[1:] - if mode == 1: - return sorted_by_pinyin([row[0] for row in lines]) - elif mode == 2: - return {row[0]: row[1] for row in lines} - else: - choices = sorted_by_pinyin([row[0] for row in lines]) - return {row[0]: row[1] for row in lines}, gr.Dropdown.update( - choices=choices, value=choices[0] - ) - - -def get_template_names(plain=False): - logging.info("获取模板文件名列表") - return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"]) - - -def get_template_content(templates, selection, original_system_prompt): - logging.info(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}") - try: - return templates[selection] - except: - return original_system_prompt - - -def reset_state(): - logging.info("重置状态") - return [], [], [], construct_token_message(0) - - -def reset_textbox(): - logging.debug("重置文本框") - return gr.update(value="") - - -def reset_default(): - newurl = shared.state.reset_api_url() - os.environ.pop("HTTPS_PROXY", None) - os.environ.pop("https_proxy", None) - return gr.update(value=newurl), gr.update(value=""), "API URL 和代理已重置" - - -def change_api_url(url): - shared.state.set_api_url(url) - msg = f"API地址更改为了{url}" - logging.info(msg) - return msg - - -def change_proxy(proxy): - os.environ["HTTPS_PROXY"] = proxy - msg = f"代理更改为了{proxy}" - logging.info(msg) - return msg - - -def hide_middle_chars(s): - if s is None: - return "" - if len(s) <= 8: - return s - else: - head = s[:4] - tail = s[-4:] - hidden = "*" * (len(s) - 8) - return head + hidden + tail - - -def submit_key(key): - key = key.strip() - msg = f"API密钥更改为了{hide_middle_chars(key)}" - logging.info(msg) - return key, msg - - -def sha1sum(filename): - sha1 = hashlib.sha1() - sha1.update(filename.encode("utf-8")) - return sha1.hexdigest() - - -def replace_today(prompt): - today = datetime.datetime.today().strftime("%Y-%m-%d") - return prompt.replace("{current_date}", today) - - -def get_geoip(): - response = requests.get("https://ipapi.co/json/", timeout=5) - try: - data = response.json() - except: - data = {"error": True, "reason": "连接ipapi失败"} - if "error" in data.keys(): - logging.warning(f"无法获取IP地址信息。\n{data}") - if data["reason"] == "RateLimited": - return ( - f"获取IP地理位置失败,因为达到了检测IP的速率限制。聊天功能可能仍然可用,但请注意,如果您的IP地址在不受支持的地区,您可能会遇到问题。" - ) - else: - return f"获取IP地理位置失败。原因:{data['reason']}。你仍然可以使用聊天功能。" - else: - country = data["country_name"] - if country == "China": - text = "**您的IP区域:中国。请立即检查代理设置,在不受支持的地区使用API可能导致账号被封禁。**" - else: - text = f"您的IP区域:{country}。" - logging.info(text) - return text - - -def find_n(lst, max_num): - n = len(lst) - total = sum(lst) - - if total < max_num: - return n - - for i in range(len(lst)): - if total - lst[i] < max_num: - return n - i - 1 - total = total - lst[i] - return 1 - - -def start_outputing(): - logging.debug("显示取消按钮,隐藏发送按钮") - return gr.Button.update(visible=False), gr.Button.update(visible=True) - - -def end_outputing(): - return ( - gr.Button.update(visible=True), - gr.Button.update(visible=False), - ) - - -def cancel_outputing(): - logging.info("中止输出……") - shared.state.interrupt() - -def transfer_input(inputs): - # 一次性返回,降低延迟 - textbox = reset_textbox() - outputing = start_outputing() - return inputs, gr.update(value="") diff --git a/spaces/jlmarrugom/voice_fixer_app/voicefixer/tools/modules/fDomainHelper.py b/spaces/jlmarrugom/voice_fixer_app/voicefixer/tools/modules/fDomainHelper.py deleted file mode 100644 index 285433ccbeea0cb30851fddc74fcdd057982b110..0000000000000000000000000000000000000000 --- a/spaces/jlmarrugom/voice_fixer_app/voicefixer/tools/modules/fDomainHelper.py +++ /dev/null @@ -1,234 +0,0 @@ -from torchlibrosa.stft import STFT, ISTFT, magphase -import torch -import torch.nn as nn -import numpy as np -from voicefixer.tools.modules.pqmf import PQMF - -class FDomainHelper(nn.Module): - def __init__( - self, - window_size=2048, - hop_size=441, - center=True, - pad_mode="reflect", - window="hann", - freeze_parameters=True, - subband=None, - root="/Users/admin/Documents/projects/", - ): - super(FDomainHelper, self).__init__() - self.subband = subband - # assert torchlibrosa.__version__ == "0.0.7", "Error: Found torchlibrosa version %s. Please install 0.0.7 version of torchlibrosa by: pip install torchlibrosa==0.0.7." % torchlibrosa.__version__ - if self.subband is None: - self.stft = STFT( - n_fft=window_size, - hop_length=hop_size, - win_length=window_size, - window=window, - center=center, - pad_mode=pad_mode, - freeze_parameters=freeze_parameters, - ) - - self.istft = ISTFT( - n_fft=window_size, - hop_length=hop_size, - win_length=window_size, - window=window, - center=center, - pad_mode=pad_mode, - freeze_parameters=freeze_parameters, - ) - else: - self.stft = STFT( - n_fft=window_size // self.subband, - hop_length=hop_size // self.subband, - win_length=window_size // self.subband, - window=window, - center=center, - pad_mode=pad_mode, - freeze_parameters=freeze_parameters, - ) - - self.istft = ISTFT( - n_fft=window_size // self.subband, - hop_length=hop_size // self.subband, - win_length=window_size // self.subband, - window=window, - center=center, - pad_mode=pad_mode, - freeze_parameters=freeze_parameters, - ) - - if subband is not None and root is not None: - self.qmf = PQMF(subband, 64, root) - - def complex_spectrogram(self, input, eps=0.0): - # [batchsize, samples] - # return [batchsize, 2, t-steps, f-bins] - real, imag = self.stft(input) - return torch.cat([real, imag], dim=1) - - def reverse_complex_spectrogram(self, input, eps=0.0, length=None): - # [batchsize, 2[real,imag], t-steps, f-bins] - wav = self.istft(input[:, 0:1, ...], input[:, 1:2, ...], length=length) - return wav - - def spectrogram(self, input, eps=0.0): - (real, imag) = self.stft(input.float()) - return torch.clamp(real**2 + imag**2, eps, np.inf) ** 0.5 - - def spectrogram_phase(self, input, eps=0.0): - (real, imag) = self.stft(input.float()) - mag = torch.clamp(real**2 + imag**2, eps, np.inf) ** 0.5 - cos = real / mag - sin = imag / mag - return mag, cos, sin - - def wav_to_spectrogram_phase(self, input, eps=1e-8): - """Waveform to spectrogram. - - Args: - input: (batch_size, channels_num, segment_samples) - - Outputs: - output: (batch_size, channels_num, time_steps, freq_bins) - """ - sp_list = [] - cos_list = [] - sin_list = [] - channels_num = input.shape[1] - for channel in range(channels_num): - mag, cos, sin = self.spectrogram_phase(input[:, channel, :], eps=eps) - sp_list.append(mag) - cos_list.append(cos) - sin_list.append(sin) - - sps = torch.cat(sp_list, dim=1) - coss = torch.cat(cos_list, dim=1) - sins = torch.cat(sin_list, dim=1) - return sps, coss, sins - - def spectrogram_phase_to_wav(self, sps, coss, sins, length): - channels_num = sps.size()[1] - res = [] - for i in range(channels_num): - res.append( - self.istft( - sps[:, i : i + 1, ...] * coss[:, i : i + 1, ...], - sps[:, i : i + 1, ...] * sins[:, i : i + 1, ...], - length, - ) - ) - res[-1] = res[-1].unsqueeze(1) - return torch.cat(res, dim=1) - - def wav_to_spectrogram(self, input, eps=1e-8): - """Waveform to spectrogram. - - Args: - input: (batch_size,channels_num, segment_samples) - - Outputs: - output: (batch_size, channels_num, time_steps, freq_bins) - """ - sp_list = [] - channels_num = input.shape[1] - for channel in range(channels_num): - sp_list.append(self.spectrogram(input[:, channel, :], eps=eps)) - output = torch.cat(sp_list, dim=1) - return output - - def spectrogram_to_wav(self, input, spectrogram, length=None): - """Spectrogram to waveform. - Args: - input: (batch_size, segment_samples, channels_num) - spectrogram: (batch_size, channels_num, time_steps, freq_bins) - - Outputs: - output: (batch_size, segment_samples, channels_num) - """ - channels_num = input.shape[1] - wav_list = [] - for channel in range(channels_num): - (real, imag) = self.stft(input[:, channel, :]) - (_, cos, sin) = magphase(real, imag) - wav_list.append( - self.istft( - spectrogram[:, channel : channel + 1, :, :] * cos, - spectrogram[:, channel : channel + 1, :, :] * sin, - length, - ) - ) - - output = torch.stack(wav_list, dim=1) - return output - - # todo the following code is not bug free! - def wav_to_complex_spectrogram(self, input, eps=0.0): - # [batchsize , channels, samples] - # [batchsize, 2[real,imag]*channels, t-steps, f-bins] - res = [] - channels_num = input.shape[1] - for channel in range(channels_num): - res.append(self.complex_spectrogram(input[:, channel, :], eps=eps)) - return torch.cat(res, dim=1) - - def complex_spectrogram_to_wav(self, input, eps=0.0, length=None): - # [batchsize, 2[real,imag]*channels, t-steps, f-bins] - # return [batchsize, channels, samples] - channels = input.size()[1] // 2 - wavs = [] - for i in range(channels): - wavs.append( - self.reverse_complex_spectrogram( - input[:, 2 * i : 2 * i + 2, ...], eps=eps, length=length - ) - ) - wavs[-1] = wavs[-1].unsqueeze(1) - return torch.cat(wavs, dim=1) - - def wav_to_complex_subband_spectrogram(self, input, eps=0.0): - # [batchsize, channels, samples] - # [batchsize, 2[real,imag]*subband*channels, t-steps, f-bins] - subwav = self.qmf.analysis(input) # [batchsize, subband*channels, samples] - subspec = self.wav_to_complex_spectrogram(subwav) - return subspec - - def complex_subband_spectrogram_to_wav(self, input, eps=0.0): - # [batchsize, 2[real,imag]*subband*channels, t-steps, f-bins] - # [batchsize, channels, samples] - subwav = self.complex_spectrogram_to_wav(input) - data = self.qmf.synthesis(subwav) - return data - - def wav_to_mag_phase_subband_spectrogram(self, input, eps=1e-8): - """ - :param input: - :param eps: - :return: - loss = torch.nn.L1Loss() - models = FDomainHelper(subband=4) - data = torch.randn((3,1, 44100*3)) - - sps, coss, sins = models.wav_to_mag_phase_subband_spectrogram(data) - wav = models.mag_phase_subband_spectrogram_to_wav(sps,coss,sins,44100*3//4) - - print(loss(data,wav)) - print(torch.max(torch.abs(data-wav))) - - """ - # [batchsize, channels, samples] - # [batchsize, 2[real,imag]*subband*channels, t-steps, f-bins] - subwav = self.qmf.analysis(input) # [batchsize, subband*channels, samples] - sps, coss, sins = self.wav_to_spectrogram_phase(subwav, eps=eps) - return sps, coss, sins - - def mag_phase_subband_spectrogram_to_wav(self, sps, coss, sins, length, eps=0.0): - # [batchsize, 2[real,imag]*subband*channels, t-steps, f-bins] - # [batchsize, channels, samples] - subwav = self.spectrogram_phase_to_wav( - sps, coss, sins, length + self.qmf.pad_samples // self.qmf.N - ) - data = self.qmf.synthesis(subwav) - return data diff --git a/spaces/jmartinezot/find_plane_pointcloud/app.py b/spaces/jmartinezot/find_plane_pointcloud/app.py deleted file mode 100644 index 9adfcef67f3cf72bdde5eeef8434af7827c9708c..0000000000000000000000000000000000000000 --- a/spaces/jmartinezot/find_plane_pointcloud/app.py +++ /dev/null @@ -1,99 +0,0 @@ -import gradio as gr -import open3d as o3d -import numpy as np -import pandas as pd -from PIL import Image -from gradio.components import File, Slider, Image, Dataframe, Model3D -import tempfile - -def point_cloud_to_image(point_cloud): - # Create Open3D visualization object - vis = o3d.visualization.Visualizer() - vis.create_window() - vis.add_geometry(point_cloud) - - # Render the visualization and get the image - vis.poll_events() - vis.update_renderer() - img = vis.capture_screen_float_buffer() - - # Convert image to PIL Image format - pil_img = Image.fromarray(np.uint8(np.asarray(img)*255)) - - return pil_img - -def create_file_mesh_from_pcd(pcd, alpha): - - temp_file = tempfile.NamedTemporaryFile(suffix=".obj", delete=False) - file_path = temp_file.name - temp_file.close() - - # print(f"alpha={alpha:.3f}") - mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_alpha_shape(pcd, alpha) - mesh.compute_vertex_normals() - o3d.io.write_triangle_mesh(file_path, mesh, write_triangle_uvs=True) - return file_path - -def plane_detection(pointcloud_path, voxel_size=0.05, distance_threshold=0.01, num_iterations=1000, alpha=2): - # Load point cloud from file - pcd = o3d.io.read_point_cloud(pointcloud_path.name) - - # Downsample the point cloud to reduce computational load - pcd = pcd.voxel_down_sample(voxel_size=voxel_size) - - # Find plane using RANSAC algorithm - plane_model, inliers = pcd.segment_plane(distance_threshold=distance_threshold, - ransac_n=3, - num_iterations=num_iterations) - - # Get inlier and outlier point cloud - inlier_cloud = pcd.select_by_index(inliers) - outlier_cloud = pcd.select_by_index(inliers, invert=True) - # extract the coefficients of the plane - a, b, c, d = plane_model - plane_model = np.array([[a], [b], [c], [d]]) - df = pd.DataFrame(plane_model.reshape(1, 4), columns=["a", "b", "c", "d"]) - input_path = create_file_mesh_from_pcd(pcd, alpha) - inlier_path = create_file_mesh_from_pcd(inlier_cloud, alpha) - outlier_path = create_file_mesh_from_pcd(outlier_cloud, alpha) - # Return inlier point cloud, outlier point cloud, and plane model - # return point_cloud_to_image(inlier_cloud), point_cloud_to_image(outlier_cloud), df - return input_path, inlier_path, outlier_path, df - -outputs = [ - # show pcd inlier point cloud - Model3D(label="Input Cloud", clear_color=[1.0, 1.0, 1.0, 1.0]), - # show pcd inlier point cloud - Model3D(label="Inlier Cloud", clear_color=[1.0, 1.0, 1.0, 1.0]), - # show pcd outlier point cloud - Model3D(label="Outlier Cloud", clear_color=[1.0, 1.0, 1.0, 1.0]), - # show the centroids and counts, which is a numpy array - Dataframe(label="Coefficients of the plane model", type="pandas") -] - -# Create Gradio interface -iface = gr.Interface(plane_detection, - inputs=[ - File(label="Point cloud file (.ply or .pcd format)"), - Slider(label="Voxel size", minimum=0.001, maximum=50, step=1, value=2), - Slider(label="Distance threshold", minimum=0.001, maximum=50, step=0.01, value=5), - Slider(label="Number of iterations", minimum=1, maximum=10000, step=1, value=100), - Slider(label="Alpha for surface reconstruction", minimum=0.02, maximum=100, step=0.01, value=2), - ], - outputs=outputs, - title="Plane Detection using RANSAC", - description="This app takes as input a point cloud file (.ply or .pcd format), voxel size, distance threshold, and number of iterations, finds a plane using RANSAC algorithm, displays the inlier and outlier point clouds, and returns the inlier point cloud, outlier point cloud, and the plane model.", - allow_flagging="never", - examples=[ - # ["Pointclouds/p1.ply", 2, 5, 100, 2] - ["Pointclouds/cloud_bin_0.ply", 0.001, 0.03, 100, 0.05], - ["Pointclouds/cloud_bin_20.ply", 0.001, 0.03, 100, 0.05], - ["Pointclouds/cloud_bin_30.ply", 0.001, 0.03, 100, 0.05], - ["Pointclouds/cloud_bin_40.ply", 0.001, 0.03, 100, 0.05], - ["Pointclouds/cloud_bin_50.ply", 0.001, 0.03, 100, 0.05], - ], - ) - - -# Launch the interface -iface.launch() diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/web_middlewares.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/web_middlewares.py deleted file mode 100644 index fabcc449a2107211fd99cd59f576a2d855d0e042..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/web_middlewares.py +++ /dev/null @@ -1,119 +0,0 @@ -import re -from typing import TYPE_CHECKING, Awaitable, Callable, Tuple, Type, TypeVar - -from .typedefs import Handler -from .web_exceptions import HTTPPermanentRedirect, _HTTPMove -from .web_request import Request -from .web_response import StreamResponse -from .web_urldispatcher import SystemRoute - -__all__ = ( - "middleware", - "normalize_path_middleware", -) - -if TYPE_CHECKING: # pragma: no cover - from .web_app import Application - -_Func = TypeVar("_Func") - - -async def _check_request_resolves(request: Request, path: str) -> Tuple[bool, Request]: - alt_request = request.clone(rel_url=path) - - match_info = await request.app.router.resolve(alt_request) - alt_request._match_info = match_info - - if match_info.http_exception is None: - return True, alt_request - - return False, request - - -def middleware(f: _Func) -> _Func: - f.__middleware_version__ = 1 # type: ignore[attr-defined] - return f - - -_Middleware = Callable[[Request, Handler], Awaitable[StreamResponse]] - - -def normalize_path_middleware( - *, - append_slash: bool = True, - remove_slash: bool = False, - merge_slashes: bool = True, - redirect_class: Type[_HTTPMove] = HTTPPermanentRedirect, -) -> _Middleware: - """Factory for producing a middleware that normalizes the path of a request. - - Normalizing means: - - Add or remove a trailing slash to the path. - - Double slashes are replaced by one. - - The middleware returns as soon as it finds a path that resolves - correctly. The order if both merge and append/remove are enabled is - 1) merge slashes - 2) append/remove slash - 3) both merge slashes and append/remove slash. - If the path resolves with at least one of those conditions, it will - redirect to the new path. - - Only one of `append_slash` and `remove_slash` can be enabled. If both - are `True` the factory will raise an assertion error - - If `append_slash` is `True` the middleware will append a slash when - needed. If a resource is defined with trailing slash and the request - comes without it, it will append it automatically. - - If `remove_slash` is `True`, `append_slash` must be `False`. When enabled - the middleware will remove trailing slashes and redirect if the resource - is defined - - If merge_slashes is True, merge multiple consecutive slashes in the - path into one. - """ - correct_configuration = not (append_slash and remove_slash) - assert correct_configuration, "Cannot both remove and append slash" - - @middleware - async def impl(request: Request, handler: Handler) -> StreamResponse: - if isinstance(request.match_info.route, SystemRoute): - paths_to_check = [] - if "?" in request.raw_path: - path, query = request.raw_path.split("?", 1) - query = "?" + query - else: - query = "" - path = request.raw_path - - if merge_slashes: - paths_to_check.append(re.sub("//+", "/", path)) - if append_slash and not request.path.endswith("/"): - paths_to_check.append(path + "/") - if remove_slash and request.path.endswith("/"): - paths_to_check.append(path[:-1]) - if merge_slashes and append_slash: - paths_to_check.append(re.sub("//+", "/", path + "/")) - if merge_slashes and remove_slash: - merged_slashes = re.sub("//+", "/", path) - paths_to_check.append(merged_slashes[:-1]) - - for path in paths_to_check: - path = re.sub("^//+", "/", path) # SECURITY: GHSA-v6wp-4m6f-gcjg - resolves, request = await _check_request_resolves(request, path) - if resolves: - raise redirect_class(request.raw_path + query) - - return await handler(request) - - return impl - - -def _fix_request_current_app(app: "Application") -> _Middleware: - @middleware - async def impl(request: Request, handler: Handler) -> StreamResponse: - with request.match_info.set_current_app(app): - return await handler(request) - - return impl diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/google_readers/gdocs.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/google_readers/gdocs.py deleted file mode 100644 index e7134877a0cf0d998609532b9819218d50eaec5e..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/google_readers/gdocs.py +++ /dev/null @@ -1,157 +0,0 @@ -"""Google docs reader.""" - -import logging -import os -from typing import Any, List - -from gpt_index.readers.base import BaseReader -from gpt_index.readers.schema.base import Document - -SCOPES = ["https://www.googleapis.com/auth/documents.readonly"] - - -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class GoogleDocsReader(BaseReader): - """Google Docs reader. - - Reads a page from Google Docs - - """ - - def __init__(self) -> None: - """Initialize with parameters.""" - try: - import google # noqa: F401 - import google_auth_oauthlib # noqa: F401 - import googleapiclient # noqa: F401 - except ImportError: - raise ImportError( - "`google_auth_oauthlib`, `googleapiclient` and `google` " - "must be installed to use the GoogleDocsReader.\n" - "Please run `pip install --upgrade google-api-python-client " - "google-auth-httplib2 google-auth-oauthlib`." - ) - - def load_data(self, document_ids: List[str]) -> List[Document]: - """Load data from the input directory. - - Args: - document_ids (List[str]): a list of document ids. - """ - if document_ids is None: - raise ValueError('Must specify a "document_ids" in `load_kwargs`.') - - results = [] - for document_id in document_ids: - doc = self._load_doc(document_id) - results.append(Document(doc, extra_info={"document_id": document_id})) - return results - - def _load_doc(self, document_id: str) -> str: - """Load a document from Google Docs. - - Args: - document_id: the document id. - - Returns: - The document text. - """ - import googleapiclient.discovery as discovery - - credentials = self._get_credentials() - docs_service = discovery.build("docs", "v1", credentials=credentials) - doc = docs_service.documents().get(documentId=document_id).execute() - doc_content = doc.get("body").get("content") - return self._read_structural_elements(doc_content) - - def _get_credentials(self) -> Any: - """Get valid user credentials from storage. - - The file token.json stores the user's access and refresh tokens, and is - created automatically when the authorization flow completes for the first - time. - - Returns: - Credentials, the obtained credential. - """ - from google.auth.transport.requests import Request - from google.oauth2.credentials import Credentials - from google_auth_oauthlib.flow import InstalledAppFlow - - creds = None - if os.path.exists("token.json"): - creds = Credentials.from_authorized_user_file("token.json", SCOPES) - # If there are no (valid) credentials available, let the user log in. - if not creds or not creds.valid: - if creds and creds.expired and creds.refresh_token: - creds.refresh(Request()) - else: - flow = InstalledAppFlow.from_client_secrets_file( - "credentials.json", SCOPES - ) - creds = flow.run_local_server(port=0) - # Save the credentials for the next run - with open("token.json", "w") as token: - token.write(creds.to_json()) - - return creds - - def _read_paragraph_element(self, element: Any) -> Any: - """Return the text in the given ParagraphElement. - - Args: - element: a ParagraphElement from a Google Doc. - """ - text_run = element.get("textRun") - if not text_run: - return "" - return text_run.get("content") - - def _read_structural_elements(self, elements: List[Any]) -> Any: - """Recurse through a list of Structural Elements. - - Read a document's text where text may be in nested elements. - - Args: - elements: a list of Structural Elements. - """ - text = "" - for value in elements: - if "paragraph" in value: - elements = value.get("paragraph").get("elements") - for elem in elements: - text += self._read_paragraph_element(elem) - elif "table" in value: - # The text in table cells are in nested Structural Elements - # and tables may be nested. - table = value.get("table") - for row in table.get("tableRows"): - cells = row.get("tableCells") - for cell in cells: - text += self._read_structural_elements(cell.get("content")) - elif "tableOfContents" in value: - # The text in the TOC is also in a Structural Element. - toc = value.get("tableOfContents") - text += self._read_structural_elements(toc.get("content")) - return text - - -if __name__ == "__main__": - reader = GoogleDocsReader() - logging.info( - reader.load_data(document_ids=["11ctUj_tEf5S8vs_dk8_BNi-Zk8wW5YFhXkKqtmU_4B8"]) - ) diff --git a/spaces/johnyang/ChatPaper111/utils.py b/spaces/johnyang/ChatPaper111/utils.py deleted file mode 100644 index 4e9778be8d941eb34d6cd63d964b9b4798a71722..0000000000000000000000000000000000000000 --- a/spaces/johnyang/ChatPaper111/utils.py +++ /dev/null @@ -1,24 +0,0 @@ - -from typing import Set - - -def get_filtered_keys_from_object(obj: object, *keys: str) -> Set[str]: - """ - Get filtered list of object variable names. - :param keys: List of keys to include. If the first key is "not", the remaining keys will be removed from the class keys. - :return: List of class keys. - """ - class_keys = obj.__dict__.keys() - if not keys: - return class_keys - - # Remove the passed keys from the class keys. - if keys[0] == "not": - return {key for key in class_keys if key not in keys[1:]} - # Check if all passed keys are valid - if invalid_keys := set(keys) - class_keys: - raise ValueError( - f"Invalid keys: {invalid_keys}", - ) - # Only return specified keys that are in class_keys - return {key for key in keys if key in class_keys} diff --git a/spaces/jonathang/dob_breed/app.py b/spaces/jonathang/dob_breed/app.py deleted file mode 100644 index 65238f09ac7532a98829cef1ce6f3d13d5a5d875..0000000000000000000000000000000000000000 --- a/spaces/jonathang/dob_breed/app.py +++ /dev/null @@ -1,26 +0,0 @@ -import gradio as gr -from fastai.vision.all import * -import skimage - -learn = load_learner('dog_breed.pkl') - -labels = learn.dls.vocab -def predict(img): - img = PILImage.create(img) - pred,pred_idx,probs = learn.predict(img) - return {labels[i]: float(probs[i]) for i in range(len(labels))} - -title = "Doge Breed Classifier" -description = "A dog breed classifier trained on duckduckgo images with fastai." -interpretation='default' -enable_queue=True - -gr.Interface( - fn=predict, - inputs=gr.inputs.Image(shape=(512, 512)), - outputs=gr.outputs.Label(num_top_classes=3), - title=title, - description=description, - interpretation=interpretation, - enable_queue=enable_queue -).launch() \ No newline at end of file diff --git a/spaces/jordonpeter01/MusicGen2/audiocraft/modules/conditioners.py b/spaces/jordonpeter01/MusicGen2/audiocraft/modules/conditioners.py deleted file mode 100644 index 82792316024b88d4c5c38b0a28f443627771d509..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/MusicGen2/audiocraft/modules/conditioners.py +++ /dev/null @@ -1,990 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from collections import defaultdict -from copy import deepcopy -from dataclasses import dataclass, field -from itertools import chain -import logging -import math -import random -import re -import typing as tp -import warnings - -from einops import rearrange -from num2words import num2words -import spacy -from transformers import T5EncoderModel, T5Tokenizer # type: ignore -import torchaudio -import torch -from torch import nn -from torch import Tensor -import torch.nn.functional as F -from torch.nn.utils.rnn import pad_sequence - -from .streaming import StreamingModule -from .transformer import create_sin_embedding -from ..data.audio_dataset import SegmentInfo -from ..utils.autocast import TorchAutocast -from ..utils.utils import hash_trick, length_to_mask, collate - - -logger = logging.getLogger(__name__) -TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) -ConditionType = tp.Tuple[Tensor, Tensor] # condition, mask - - -class WavCondition(tp.NamedTuple): - wav: Tensor - length: Tensor - path: tp.List[tp.Optional[str]] = [] - - -def nullify_condition(condition: ConditionType, dim: int = 1): - """This function transforms an input condition to a null condition. - The way it is done by converting it to a single zero vector similarly - to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. - - Args: - condition (ConditionType): a tuple of condition and mask (tp.Tuple[Tensor, Tensor]) - dim (int): the dimension that will be truncated (should be the time dimension) - WARNING!: dim should not be the batch dimension! - Returns: - ConditionType: a tuple of null condition and mask - """ - assert dim != 0, "dim cannot be the batch dimension!" - assert type(condition) == tuple and \ - type(condition[0]) == Tensor and \ - type(condition[1]) == Tensor, "'nullify_condition' got an unexpected input type!" - cond, mask = condition - B = cond.shape[0] - last_dim = cond.dim() - 1 - out = cond.transpose(dim, last_dim) - out = 0. * out[..., :1] - out = out.transpose(dim, last_dim) - mask = torch.zeros((B, 1), device=out.device).int() - assert cond.dim() == out.dim() - return out, mask - - -def nullify_wav(wav: Tensor) -> WavCondition: - """Create a nullified WavCondition from a wav tensor with appropriate shape. - - Args: - wav (Tensor): tensor of shape [B, T] - Returns: - WavCondition: wav condition with nullified wav. - """ - null_wav, _ = nullify_condition((wav, torch.zeros_like(wav)), dim=wav.dim() - 1) - return WavCondition( - wav=null_wav, - length=torch.tensor([0] * wav.shape[0], device=wav.device), - path=['null_wav'] * wav.shape[0] - ) - - -@dataclass -class ConditioningAttributes: - text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) - wav: tp.Dict[str, WavCondition] = field(default_factory=dict) - - def __getitem__(self, item): - return getattr(self, item) - - @property - def text_attributes(self): - return self.text.keys() - - @property - def wav_attributes(self): - return self.wav.keys() - - @property - def attributes(self): - return {"text": self.text_attributes, "wav": self.wav_attributes} - - def to_flat_dict(self): - return { - **{f"text.{k}": v for k, v in self.text.items()}, - **{f"wav.{k}": v for k, v in self.wav.items()}, - } - - @classmethod - def from_flat_dict(cls, x): - out = cls() - for k, v in x.items(): - kind, att = k.split(".") - out[kind][att] = v - return out - - -class SegmentWithAttributes(SegmentInfo): - """Base class for all dataclasses that are used for conditioning. - All child classes should implement `to_condition_attributes` that converts - the existing attributes to a dataclass of type ConditioningAttributes. - """ - def to_condition_attributes(self) -> ConditioningAttributes: - raise NotImplementedError() - - -class Tokenizer: - """Base class for all tokenizers - (in case we want to introduce more advances tokenizers in the future). - """ - def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[Tensor, Tensor]: - raise NotImplementedError() - - -class WhiteSpaceTokenizer(Tokenizer): - """This tokenizer should be used for natural language descriptions. - For example: - ["he didn't, know he's going home.", 'shorter sentence'] => - [[78, 62, 31, 4, 78, 25, 19, 34], - [59, 77, 0, 0, 0, 0, 0, 0]] - """ - PUNCTUATIONS = "?:!.,;" - - def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", - lemma: bool = True, stopwords: bool = True) -> None: - self.n_bins = n_bins - self.pad_idx = pad_idx - self.lemma = lemma - self.stopwords = stopwords - try: - self.nlp = spacy.load(language) - except IOError: - spacy.cli.download(language) # type: ignore - self.nlp = spacy.load(language) - - @tp.no_type_check - def __call__( - self, - texts: tp.List[tp.Optional[str]], - return_text: bool = False - ) -> tp.Tuple[Tensor, Tensor]: - """Take a list of strings and convert them to a tensor of indices. - - Args: - texts (tp.List[str]): List of strings. - return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. - Returns: - tp.Tuple[Tensor, Tensor]: - - Indices of words in the LUT. - - And a mask indicating where the padding tokens are - """ - output, lengths = [], [] - texts = deepcopy(texts) - for i, text in enumerate(texts): - # if current sample doesn't have a certain attribute, replace with pad token - if text is None: - output.append(Tensor([self.pad_idx])) - lengths.append(0) - continue - - # convert numbers to words - text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore - # normalize text - text = self.nlp(text) # type: ignore - # remove stopwords - if self.stopwords: - text = [w for w in text if not w.is_stop] # type: ignore - # remove punctuations - text = [w for w in text if w.text not in self.PUNCTUATIONS] # type: ignore - # lemmatize if needed - text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore - - texts[i] = " ".join(text) - lengths.append(len(text)) - # convert to tensor - tokens = Tensor([hash_trick(w, self.n_bins) for w in text]) - output.append(tokens) - - mask = length_to_mask(torch.IntTensor(lengths)).int() - padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() - if return_text: - return padded_output, mask, texts # type: ignore - return padded_output, mask - - -class NoopTokenizer(Tokenizer): - """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. - The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split - strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will - split it to ["Jeff", "Buckley"] and return an index per word. - - For example: - ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] - ["Metal", "Rock", "Classical"] => [0, 223, 51] - """ - def __init__(self, n_bins: int, pad_idx: int = 0): - self.n_bins = n_bins - self.pad_idx = pad_idx - - def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[Tensor, Tensor]: - output, lengths = [], [] - for text in texts: - # if current sample doesn't have a certain attribute, replace with pad token - if text is None: - output.append(self.pad_idx) - lengths.append(0) - else: - output.append(hash_trick(text, self.n_bins)) - lengths.append(1) - - tokens = torch.LongTensor(output).unsqueeze(1) - mask = length_to_mask(torch.IntTensor(lengths)).int() - return tokens, mask - - -class BaseConditioner(nn.Module): - """Base model for all conditioner modules. We allow the output dim to be different - than the hidden dim for two reasons: 1) keep our LUTs small when the vocab is large; - 2) make all condition dims consistent. - - Args: - dim (int): Hidden dim of the model (text-encoder/LUT). - output_dim (int): Output dim of the conditioner. - """ - def __init__(self, dim, output_dim): - super().__init__() - self.dim = dim - self.output_dim = output_dim - self.output_proj = nn.Linear(dim, output_dim) - - def tokenize(self, *args, **kwargs) -> tp.Any: - """Should be any part of the processing that will lead to a synchronization - point, e.g. BPE tokenization with transfer to the GPU. - - The returned value will be saved and return later when calling forward(). - """ - raise NotImplementedError() - - def forward(self, inputs: tp.Any) -> ConditionType: - """Gets input that should be used as conditioning (e.g, genre, description or a waveform). - Outputs a ConditionType, after the input data was embedded as a dense vector. - - Returns: - ConditionType: - - A tensor of size [B, T, D] where B is the batch size, T is the length of the - output embedding and D is the dimension of the embedding. - - And a mask indicating where the padding tokens. - """ - raise NotImplementedError() - - -class TextConditioner(BaseConditioner): - ... - - -class LUTConditioner(TextConditioner): - """Lookup table TextConditioner. - - Args: - n_bins (int): Number of bins. - dim (int): Hidden dim of the model (text-encoder/LUT). - output_dim (int): Output dim of the conditioner. - tokenizer (str): Name of the tokenizer. - pad_idx (int, optional): Index for padding token. Defaults to 0. - """ - def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): - super().__init__(dim, output_dim) - self.embed = nn.Embedding(n_bins, dim) - self.tokenizer: Tokenizer - if tokenizer == "whitespace": - self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) - elif tokenizer == "noop": - self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) - else: - raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") - - def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: - device = self.embed.weight.device - tokens, mask = self.tokenizer(x) - tokens, mask = tokens.to(device), mask.to(device) - return tokens, mask - - def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: - tokens, mask = inputs - embeds = self.embed(tokens) - embeds = self.output_proj(embeds) - embeds = (embeds * mask.unsqueeze(-1)) - return embeds, mask - - -class T5Conditioner(TextConditioner): - """T5-based TextConditioner. - - Args: - name (str): Name of the T5 model. - output_dim (int): Output dim of the conditioner. - finetune (bool): Whether to fine-tune T5 at train time. - device (str): Device for T5 Conditioner. - autocast_dtype (tp.Optional[str], optional): Autocast dtype. - word_dropout (float, optional): Word dropout probability. - normalize_text (bool, optional): Whether to apply text normalization. - """ - MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", - "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", - "google/flan-t5-xl", "google/flan-t5-xxl"] - MODELS_DIMS = { - "t5-small": 512, - "t5-base": 768, - "t5-large": 1024, - "t5-3b": 1024, - "t5-11b": 1024, - "google/flan-t5-small": 512, - "google/flan-t5-base": 768, - "google/flan-t5-large": 1024, - "google/flan-t5-3b": 1024, - "google/flan-t5-11b": 1024, - } - - def __init__(self, name: str, output_dim: int, finetune: bool, device: str, - autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., - normalize_text: bool = False): - assert name in self.MODELS, f"unrecognized t5 model name (should in {self.MODELS})" - super().__init__(self.MODELS_DIMS[name], output_dim) - self.device = device - self.name = name - self.finetune = finetune - self.word_dropout = word_dropout - - if autocast_dtype is None or self.device == 'cpu': - self.autocast = TorchAutocast(enabled=False) - if self.device != 'cpu': - logger.warning("T5 has no autocast, this might lead to NaN") - else: - dtype = getattr(torch, autocast_dtype) - assert isinstance(dtype, torch.dtype) - logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") - self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) - # Let's disable logging temporarily because T5 will vomit some errors otherwise. - # thanks https://gist.github.com/simon-weber/7853144 - previous_level = logging.root.manager.disable - logging.disable(logging.ERROR) - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - try: - self.t5_tokenizer = T5Tokenizer.from_pretrained(name) - t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) - finally: - logging.disable(previous_level) - if finetune: - self.t5 = t5 - else: - # this makes sure that the t5 models is not part - # of the saved checkpoint - self.__dict__["t5"] = t5.to(device) - - self.normalize_text = normalize_text - if normalize_text: - self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) - - def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: - # if current sample doesn't have a certain attribute, replace with empty string - entries: tp.List[str] = [xi if xi is not None else "" for xi in x] - if self.normalize_text: - _, _, entries = self.text_normalizer(entries, return_text=True) - if self.word_dropout > 0. and self.training: - new_entries = [] - for entry in entries: - words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] - new_entries.append(" ".join(words)) - entries = new_entries - - empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) - - inputs = self.t5_tokenizer(entries, return_tensors="pt", padding=True).to(self.device) - mask = inputs["attention_mask"] - mask[empty_idx, :] = 0 # zero-out index where the input is non-existant - return inputs - - def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: - mask = inputs["attention_mask"] - with torch.set_grad_enabled(self.finetune), self.autocast: - embeds = self.t5(**inputs).last_hidden_state - embeds = self.output_proj(embeds.to(self.output_proj.weight)) - embeds = (embeds * mask.unsqueeze(-1)) - return embeds, mask - - -class WaveformConditioner(BaseConditioner): - """Base class for all conditioners that take a waveform as input. - Classes that inherit must implement `_get_wav_embedding` that outputs - a continuous tensor, and `_downsampling_factor` that returns the down-sampling - factor of the embedding model. - - Args: - dim (int): The internal representation dimension. - output_dim (int): Output dimension. - device (tp.Union[torch.device, str]): Device. - """ - def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): - super().__init__(dim, output_dim) - self.device = device - - def tokenize(self, wav_length: WavCondition) -> WavCondition: - wav, length, path = wav_length - assert length is not None - return WavCondition(wav.to(self.device), length.to(self.device), path) - - def _get_wav_embedding(self, wav: Tensor) -> Tensor: - """Gets as input a wav and returns a dense vector of conditions.""" - raise NotImplementedError() - - def _downsampling_factor(self): - """Returns the downsampling factor of the embedding model.""" - raise NotImplementedError() - - def forward(self, inputs: WavCondition) -> ConditionType: - """ - Args: - input (WavCondition): Tuple of (waveform, lengths). - Returns: - ConditionType: Dense vector representing the conditioning along with its' mask. - """ - wav, lengths, path = inputs - with torch.no_grad(): - embeds = self._get_wav_embedding(wav) - embeds = embeds.to(self.output_proj.weight) - embeds = self.output_proj(embeds) - - if lengths is not None: - lengths = lengths / self._downsampling_factor() - mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore - else: - mask = torch.ones_like(embeds) - embeds = (embeds * mask.unsqueeze(2).to(self.device)) - - return embeds, mask - - -class ChromaStemConditioner(WaveformConditioner): - """Chroma conditioner that uses DEMUCS to first filter out drums and bass. The is followed by - the insight the drums and bass often dominate the chroma, leading to the chroma not containing the - information about melody. - - Args: - output_dim (int): Output dimension for the conditioner. - sample_rate (int): Sample rate for the chroma extractor. - n_chroma (int): Number of chroma for the chroma extractor. - radix2_exp (int): Radix2 exponent for the chroma extractor. - duration (float): Duration used during training. This is later used for correct padding - in case we are using chroma as prefix. - match_len_on_eval (bool, optional): If True then all chromas are padded to the training - duration. Defaults to False. - eval_wavs (str, optional): Path to a json egg with waveform, this waveforms are used as - conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). - Defaults to None. - n_eval_wavs (int, optional): Limits the number of waveforms used for conditioning. Defaults to 0. - device (tp.Union[torch.device, str], optional): Device for the conditioner. - **kwargs: Additional parameters for the chroma extractor. - """ - def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, - duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, - n_eval_wavs: int = 0, device: tp.Union[torch.device, str] = "cpu", **kwargs): - from demucs import pretrained - super().__init__(dim=n_chroma, output_dim=output_dim, device=device) - self.autocast = TorchAutocast(enabled=device != "cpu", device_type=self.device, dtype=torch.float32) - self.sample_rate = sample_rate - self.match_len_on_eval = match_len_on_eval - self.duration = duration - self.__dict__["demucs"] = pretrained.get_model('htdemucs').to(device) - self.stem2idx = {'drums': 0, 'bass': 1, 'other': 2, 'vocal': 3} - self.stem_idx = torch.LongTensor([self.stem2idx['vocal'], self.stem2idx['other']]).to(device) - self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, - device=device, **kwargs) - self.chroma_len = self._get_chroma_len() - - def _downsampling_factor(self): - return self.chroma.winhop - - def _get_chroma_len(self): - """Get length of chroma during training""" - dummy_wav = torch.zeros((1, self.sample_rate * self.duration), device=self.device) - dummy_chr = self.chroma(dummy_wav) - return dummy_chr.shape[1] - - @torch.no_grad() - def _get_filtered_wav(self, wav): - from demucs.apply import apply_model - from demucs.audio import convert_audio - with self.autocast: - wav = convert_audio(wav, self.sample_rate, self.demucs.samplerate, self.demucs.audio_channels) - stems = apply_model(self.demucs, wav, device=self.device) - stems = stems[:, self.stem_idx] # extract stem - stems = stems.sum(1) # merge extracted stems - stems = stems.mean(1, keepdim=True) # mono - stems = convert_audio(stems, self.demucs.samplerate, self.sample_rate, 1) - return stems - - @torch.no_grad() - def _get_wav_embedding(self, wav): - # avoid 0-size tensors when we are working with null conds - if wav.shape[-1] == 1: - return self.chroma(wav) - stems = self._get_filtered_wav(wav) - chroma = self.chroma(stems) - - if self.match_len_on_eval: - b, t, c = chroma.shape - if t > self.chroma_len: - chroma = chroma[:, :self.chroma_len] - logger.debug(f'chroma was truncated! ({t} -> {chroma.shape[1]})') - elif t < self.chroma_len: - # chroma = F.pad(chroma, (0, 0, 0, self.chroma_len - t)) - n_repeat = int(math.ceil(self.chroma_len / t)) - chroma = chroma.repeat(1, n_repeat, 1) - chroma = chroma[:, :self.chroma_len] - logger.debug(f'chroma was zero-padded! ({t} -> {chroma.shape[1]})') - return chroma - - -class ChromaExtractor(nn.Module): - """Chroma extraction class, handles chroma extraction and quantization. - - Args: - sample_rate (int): Sample rate. - n_chroma (int): Number of chroma to consider. - radix2_exp (int): Radix2 exponent. - nfft (tp.Optional[int], optional): Number of FFT. - winlen (tp.Optional[int], optional): Window length. - winhop (tp.Optional[int], optional): Window hop size. - argmax (bool, optional): Whether to use argmax. Defaults to False. - norm (float, optional): Norm for chroma normalization. Defaults to inf. - device (tp.Union[torch.device, str], optional): Device to use. Defaults to cpu. - """ - def __init__(self, sample_rate: int, n_chroma: int = 12, radix2_exp: int = 12, - nfft: tp.Optional[int] = None, winlen: tp.Optional[int] = None, winhop: tp.Optional[int] = None, - argmax: bool = False, norm: float = torch.inf, device: tp.Union[torch.device, str] = "cpu"): - super().__init__() - from librosa import filters - self.device = device - self.autocast = TorchAutocast(enabled=device != "cpu", device_type=self.device, dtype=torch.float32) - self.winlen = winlen or 2 ** radix2_exp - self.nfft = nfft or self.winlen - self.winhop = winhop or (self.winlen // 4) - self.sr = sample_rate - self.n_chroma = n_chroma - self.norm = norm - self.argmax = argmax - self.window = torch.hann_window(self.winlen).to(device) - self.fbanks = torch.from_numpy(filters.chroma(sr=sample_rate, n_fft=self.nfft, tuning=0, - n_chroma=self.n_chroma)).to(device) - self.spec = torchaudio.transforms.Spectrogram(n_fft=self.nfft, win_length=self.winlen, - hop_length=self.winhop, power=2, center=True, - pad=0, normalized=True).to(device) - - def forward(self, wav): - with self.autocast: - T = wav.shape[-1] - # in case we are getting a wav that was dropped out (nullified) - # make sure wav length is no less that nfft - if T < self.nfft: - pad = self.nfft - T - r = 0 if pad % 2 == 0 else 1 - wav = F.pad(wav, (pad // 2, pad // 2 + r), 'constant', 0) - assert wav.shape[-1] == self.nfft, f'expected len {self.nfft} but got {wav.shape[-1]}' - spec = self.spec(wav).squeeze(1) - raw_chroma = torch.einsum("cf,...ft->...ct", self.fbanks, spec) - norm_chroma = torch.nn.functional.normalize(raw_chroma, p=self.norm, dim=-2, eps=1e-6) - norm_chroma = rearrange(norm_chroma, "b d t -> b t d") - - if self.argmax: - idx = norm_chroma.argmax(-1, keepdims=True) - norm_chroma[:] = 0 - norm_chroma.scatter_(dim=-1, index=idx, value=1) - - return norm_chroma - - -def dropout_condition(sample: ConditioningAttributes, condition_type: str, condition: str): - """Utility function for nullifying an attribute inside an ConditioningAttributes object. - If the condition is of type "wav", then nullify it using "nullify_condition". - If the condition is of any other type, set its' value to None. - Works in-place. - """ - if condition_type not in ["text", "wav"]: - raise ValueError( - "dropout_condition got an unexpected condition type!" - f" expected 'wav' or 'text' but got '{condition_type}'" - ) - - if condition not in getattr(sample, condition_type): - raise ValueError( - "dropout_condition received an unexpected condition!" - f" expected wav={sample.wav.keys()} and text={sample.text.keys()}" - f"but got '{condition}' of type '{condition_type}'!" - ) - - if condition_type == "wav": - wav, length, path = sample.wav[condition] - sample.wav[condition] = nullify_wav(wav) - else: - sample.text[condition] = None - - return sample - - -class DropoutModule(nn.Module): - """Base class for all dropout modules.""" - def __init__(self, seed: int = 1234): - super().__init__() - self.rng = torch.Generator() - self.rng.manual_seed(seed) - - -class AttributeDropout(DropoutModule): - """Applies dropout with a given probability per attribute. This is different from the behavior of - ClassifierFreeGuidanceDropout as this allows for attributes to be dropped out separately. For example, - "artist" can be dropped while "genre" remains. This is in contrast to ClassifierFreeGuidanceDropout - where if "artist" is dropped "genre" must also be dropped. - - Args: - p (tp.Dict[str, float]): A dict mapping between attributes and dropout probability. For example: - ... - "genre": 0.1, - "artist": 0.5, - "wav": 0.25, - ... - active_on_eval (bool, optional): Whether the dropout is active at eval. Default to False. - seed (int, optional): Random seed. - """ - def __init__(self, p: tp.Dict[str, tp.Dict[str, float]], active_on_eval: bool = False, seed: int = 1234): - super().__init__(seed=seed) - self.active_on_eval = active_on_eval - # construct dict that return the values from p otherwise 0 - self.p = {} - for condition_type, probs in p.items(): - self.p[condition_type] = defaultdict(lambda: 0, probs) - - def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: - """ - Args: - samples (tp.List[ConditioningAttributes]): List of conditions. - Returns: - tp.List[ConditioningAttributes]: List of conditions after certain attributes were set to None. - """ - if not self.training and not self.active_on_eval: - return samples - - samples = deepcopy(samples) - - for condition_type, ps in self.p.items(): # for condition types [text, wav] - for condition, p in ps.items(): # for attributes of each type (e.g., [artist, genre]) - if torch.rand(1, generator=self.rng).item() < p: - for sample in samples: - dropout_condition(sample, condition_type, condition) - - return samples - - def __repr__(self): - return f"AttributeDropout({dict(self.p)})" - - -class ClassifierFreeGuidanceDropout(DropoutModule): - """Applies Classifier Free Guidance dropout, meaning all attributes - are dropped with the same probability. - - Args: - p (float): Probability to apply condition dropout during training. - seed (int): Random seed. - """ - def __init__(self, p: float, seed: int = 1234): - super().__init__(seed=seed) - self.p = p - - def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: - """ - Args: - samples (tp.List[ConditioningAttributes]): List of conditions. - Returns: - tp.List[ConditioningAttributes]: List of conditions after all attributes were set to None. - """ - if not self.training: - return samples - - # decide on which attributes to drop in a batched fashion - drop = torch.rand(1, generator=self.rng).item() < self.p - if not drop: - return samples - - # nullify conditions of all attributes - samples = deepcopy(samples) - - for condition_type in ["wav", "text"]: - for sample in samples: - for condition in sample.attributes[condition_type]: - dropout_condition(sample, condition_type, condition) - - return samples - - def __repr__(self): - return f"ClassifierFreeGuidanceDropout(p={self.p})" - - -class ConditioningProvider(nn.Module): - """Main class to provide conditions given all the supported conditioners. - - Args: - conditioners (dict): Dictionary of conditioners. - merge_text_conditions_p (float, optional): Probability to merge all text sources - into a single text condition. Defaults to 0. - drop_desc_p (float, optional): Probability to drop the original description - when merging all text sources into a single text condition. Defaults to 0. - device (tp.Union[torch.device, str], optional): Device for conditioners and output condition types. - """ - def __init__( - self, - conditioners: tp.Dict[str, BaseConditioner], - merge_text_conditions_p: float = 0, - drop_desc_p: float = 0, - device: tp.Union[torch.device, str] = "cpu", - ): - super().__init__() - self.device = device - self.merge_text_conditions_p = merge_text_conditions_p - self.drop_desc_p = drop_desc_p - self.conditioners = nn.ModuleDict(conditioners) - - @property - def text_conditions(self): - return [k for k, v in self.conditioners.items() if isinstance(v, TextConditioner)] - - @property - def wav_conditions(self): - return [k for k, v in self.conditioners.items() if isinstance(v, WaveformConditioner)] - - @property - def has_wav_condition(self): - return len(self.wav_conditions) > 0 - - def tokenize(self, inputs: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.Any]: - """Match attributes/wavs with existing conditioners in self, and compute tokenize them accordingly. - This should be called before starting any real GPU work to avoid synchronization points. - This will return a dict matching conditioner names to their arbitrary tokenized representations. - - Args: - inputs (list[ConditioningAttribres]): List of ConditioningAttributes objects containing - text and wav conditions. - """ - assert all([type(x) == ConditioningAttributes for x in inputs]), \ - "got unexpected types input for conditioner! should be tp.List[ConditioningAttributes]" \ - f" but types were {set([type(x) for x in inputs])}" - - output = {} - text = self._collate_text(inputs) - wavs = self._collate_wavs(inputs) - - assert set(text.keys() | wavs.keys()).issubset(set(self.conditioners.keys())), \ - f"got an unexpected attribute! Expected {self.conditioners.keys()}, got {text.keys(), wavs.keys()}" - - for attribute, batch in chain(text.items(), wavs.items()): - output[attribute] = self.conditioners[attribute].tokenize(batch) - return output - - def forward(self, tokenized: tp.Dict[str, tp.Any]) -> tp.Dict[str, ConditionType]: - """Compute pairs of `(embedding, mask)` using the configured conditioners - and the tokenized representations. The output is for example: - - { - "genre": (torch.Tensor([B, 1, D_genre]), torch.Tensor([B, 1])), - "description": (torch.Tensor([B, T_desc, D_desc]), torch.Tensor([B, T_desc])), - ... - } - - Args: - tokenized (dict): Dict of tokenized representations as returned by `tokenize()`. - """ - output = {} - for attribute, inputs in tokenized.items(): - condition, mask = self.conditioners[attribute](inputs) - output[attribute] = (condition, mask) - return output - - def _collate_text(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.List[tp.Optional[str]]]: - """Given a list of ConditioningAttributes objects, compile a dictionary where the keys - are the attributes and the values are the aggregated input per attribute. - For example: - Input: - [ - ConditioningAttributes(text={"genre": "Rock", "description": "A rock song with a guitar solo"}, wav=...), - ConditioningAttributes(text={"genre": "Hip-hop", "description": "A hip-hop verse"}, wav=...), - ] - Output: - { - "genre": ["Rock", "Hip-hop"], - "description": ["A rock song with a guitar solo", "A hip-hop verse"] - } - """ - batch_per_attribute: tp.Dict[str, tp.List[tp.Optional[str]]] = defaultdict(list) - - def _merge_conds(cond, merge_text_conditions_p=0, drop_desc_p=0): - def is_valid(k, v): - k_valid = k in ['key', 'bpm', 'genre', 'moods', 'instrument'] - v_valid = v is not None and isinstance(v, (int, float, str, list)) - return k_valid and v_valid - - def process_value(v): - if isinstance(v, (int, float, str)): - return v - if isinstance(v, list): - return ", ".join(v) - else: - RuntimeError(f"unknown type for text value! ({type(v), v})") - - desc = cond.text['description'] - meta_data = "" - if random.uniform(0, 1) < merge_text_conditions_p: - meta_pairs = [f'{k}: {process_value(v)}' for k, v in cond.text.items() if is_valid(k, v)] - random.shuffle(meta_pairs) - meta_data = ". ".join(meta_pairs) - desc = desc if not random.uniform(0, 1) < drop_desc_p else None - - if desc is None: - desc = meta_data if len(meta_data) > 1 else None - else: - desc = desc.rstrip('.') + ". " + meta_data - cond.text['description'] = desc.strip() if desc else None - - if self.training and self.merge_text_conditions_p: - for sample in samples: - _merge_conds(sample, self.merge_text_conditions_p, self.drop_desc_p) - - texts = [x.text for x in samples] - for text in texts: - for condition in self.text_conditions: - batch_per_attribute[condition].append(text[condition]) - - return batch_per_attribute - - def _collate_wavs(self, samples: tp.List[ConditioningAttributes]): - """Generate a dict where the keys are attributes by which we fetch similar wavs, - and the values are Tensors of wavs according to said attribtues. - - *Note*: by the time the samples reach this function, each sample should have some waveform - inside the "wav" attribute. It should be either: - 1. A real waveform - 2. A null waveform due to the sample having no similar waveforms (nullified by the dataset) - 3. A null waveform due to it being dropped in a dropout module (nullified by dropout) - - Args: - samples (tp.List[ConditioningAttributes]): List of ConditioningAttributes samples. - Returns: - dict: A dicionary mapping an attribute name to wavs. - """ - wavs = defaultdict(list) - lens = defaultdict(list) - paths = defaultdict(list) - out = {} - - for sample in samples: - for attribute in self.wav_conditions: - wav, length, path = sample.wav[attribute] - wavs[attribute].append(wav.flatten()) - lens[attribute].append(length) - paths[attribute].append(path) - - # stack all wavs to a single tensor - for attribute in self.wav_conditions: - stacked_wav, _ = collate(wavs[attribute], dim=0) - out[attribute] = WavCondition(stacked_wav.unsqueeze(1), - torch.cat(lens['self_wav']), paths[attribute]) # type: ignore - - return out - - -class ConditionFuser(StreamingModule): - """Condition fuser handles the logic to combine the different conditions - to the actual model input. - - Args: - fuse2cond (tp.Dict[str, str]): A dictionary that says how to fuse - each condition. For example: - { - "prepend": ["description"], - "sum": ["genre", "bpm"], - "cross": ["description"], - } - cross_attention_pos_emb (bool, optional): Use positional embeddings in cross attention. - cross_attention_pos_emb_scale (int): Scale for positional embeddings in cross attention if used. - """ - FUSING_METHODS = ["sum", "prepend", "cross", "input_interpolate"] - - def __init__(self, fuse2cond: tp.Dict[str, tp.List[str]], cross_attention_pos_emb: bool = False, - cross_attention_pos_emb_scale: float = 1.0): - super().__init__() - assert all( - [k in self.FUSING_METHODS for k in fuse2cond.keys()] - ), f"got invalid fuse method, allowed methods: {self.FUSING_MEHTODS}" - self.cross_attention_pos_emb = cross_attention_pos_emb - self.cross_attention_pos_emb_scale = cross_attention_pos_emb_scale - self.fuse2cond: tp.Dict[str, tp.List[str]] = fuse2cond - self.cond2fuse: tp.Dict[str, str] = {} - for fuse_method, conditions in fuse2cond.items(): - for condition in conditions: - self.cond2fuse[condition] = fuse_method - - def forward( - self, - input: Tensor, - conditions: tp.Dict[str, ConditionType] - ) -> tp.Tuple[Tensor, tp.Optional[Tensor]]: - """Fuse the conditions to the provided model input. - - Args: - input (Tensor): Transformer input. - conditions (tp.Dict[str, ConditionType]): Dict of conditions. - Returns: - tp.Tuple[Tensor, Tensor]: The first tensor is the transformer input - after the conditions have been fused. The second output tensor is the tensor - used for cross-attention or None if no cross attention inputs exist. - """ - B, T, _ = input.shape - - if 'offsets' in self._streaming_state: - first_step = False - offsets = self._streaming_state['offsets'] - else: - first_step = True - offsets = torch.zeros(input.shape[0], dtype=torch.long, device=input.device) - - assert set(conditions.keys()).issubset(set(self.cond2fuse.keys())), \ - f"given conditions contain unknown attributes for fuser, " \ - f"expected {self.cond2fuse.keys()}, got {conditions.keys()}" - cross_attention_output = None - for cond_type, (cond, cond_mask) in conditions.items(): - op = self.cond2fuse[cond_type] - if op == "sum": - input += cond - elif op == "input_interpolate": - cond = rearrange(cond, "b t d -> b d t") - cond = F.interpolate(cond, size=input.shape[1]) - input += rearrange(cond, "b d t -> b t d") - elif op == "prepend": - if first_step: - input = torch.cat([cond, input], dim=1) - elif op == "cross": - if cross_attention_output is not None: - cross_attention_output = torch.cat([cross_attention_output, cond], dim=1) - else: - cross_attention_output = cond - else: - raise ValueError(f"unknown op ({op})") - - if self.cross_attention_pos_emb and cross_attention_output is not None: - positions = torch.arange( - cross_attention_output.shape[1], - device=cross_attention_output.device - ).view(1, -1, 1) - pos_emb = create_sin_embedding(positions, cross_attention_output.shape[-1]) - cross_attention_output = cross_attention_output + self.cross_attention_pos_emb_scale * pos_emb - - if self._is_streaming: - self._streaming_state['offsets'] = offsets + T - - return input, cross_attention_output diff --git a/spaces/jordonpeter01/ai-comic-factory/src/components/ui/collapsible.tsx b/spaces/jordonpeter01/ai-comic-factory/src/components/ui/collapsible.tsx deleted file mode 100644 index 9fa48946afd1eb56bd932377fd888e3986304676..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/ai-comic-factory/src/components/ui/collapsible.tsx +++ /dev/null @@ -1,11 +0,0 @@ -"use client" - -import * as CollapsiblePrimitive from "@radix-ui/react-collapsible" - -const Collapsible = CollapsiblePrimitive.Root - -const CollapsibleTrigger = CollapsiblePrimitive.CollapsibleTrigger - -const CollapsibleContent = CollapsiblePrimitive.CollapsibleContent - -export { Collapsible, CollapsibleTrigger, CollapsibleContent } diff --git a/spaces/josedolot/HybridNet_Demo2/utils/sync_batchnorm/comm.py b/spaces/josedolot/HybridNet_Demo2/utils/sync_batchnorm/comm.py deleted file mode 100644 index 922f8c4a3adaa9b32fdcaef09583be03b0d7eb2b..0000000000000000000000000000000000000000 --- a/spaces/josedolot/HybridNet_Demo2/utils/sync_batchnorm/comm.py +++ /dev/null @@ -1,137 +0,0 @@ -# -*- coding: utf-8 -*- -# File : comm.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import queue -import collections -import threading - -__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster'] - - -class FutureResult(object): - """A thread-safe future implementation. Used only as one-to-one pipe.""" - - def __init__(self): - self._result = None - self._lock = threading.Lock() - self._cond = threading.Condition(self._lock) - - def put(self, result): - with self._lock: - assert self._result is None, 'Previous result has\'t been fetched.' - self._result = result - self._cond.notify() - - def get(self): - with self._lock: - if self._result is None: - self._cond.wait() - - res = self._result - self._result = None - return res - - -_MasterRegistry = collections.namedtuple('MasterRegistry', ['result']) -_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result']) - - -class SlavePipe(_SlavePipeBase): - """Pipe for master-slave communication.""" - - def run_slave(self, msg): - self.queue.put((self.identifier, msg)) - ret = self.result.get() - self.queue.put(True) - return ret - - -class SyncMaster(object): - """An abstract `SyncMaster` object. - - - During the replication, as the data parallel will trigger an callback of each module, all slave devices should - call `register(id)` and obtain an `SlavePipe` to communicate with the master. - - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected, - and passed to a registered callback. - - After receiving the messages, the master device should gather the information and determine to message passed - back to each slave devices. - """ - - def __init__(self, master_callback): - """ - - Args: - master_callback: a callback to be invoked after having collected messages from slave devices. - """ - self._master_callback = master_callback - self._queue = queue.Queue() - self._registry = collections.OrderedDict() - self._activated = False - - def __getstate__(self): - return {'master_callback': self._master_callback} - - def __setstate__(self, state): - self.__init__(state['master_callback']) - - def register_slave(self, identifier): - """ - Register an slave device. - - Args: - identifier: an identifier, usually is the device id. - - Returns: a `SlavePipe` object which can be used to communicate with the master device. - - """ - if self._activated: - assert self._queue.empty(), 'Queue is not clean before next initialization.' - self._activated = False - self._registry.clear() - future = FutureResult() - self._registry[identifier] = _MasterRegistry(future) - return SlavePipe(identifier, self._queue, future) - - def run_master(self, master_msg): - """ - Main entry for the master device in each forward pass. - The messages were first collected from each devices (including the master device), and then - an callback will be invoked to compute the message to be sent back to each devices - (including the master device). - - Args: - master_msg: the message that the master want to send to itself. This will be placed as the first - message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example. - - Returns: the message to be sent back to the master device. - - """ - self._activated = True - - intermediates = [(0, master_msg)] - for i in range(self.nr_slaves): - intermediates.append(self._queue.get()) - - results = self._master_callback(intermediates) - assert results[0][0] == 0, 'The first result should belongs to the master.' - - for i, res in results: - if i == 0: - continue - self._registry[i].result.put(res) - - for i in range(self.nr_slaves): - assert self._queue.get() is True - - return results[0][1] - - @property - def nr_slaves(self): - return len(self._registry) diff --git a/spaces/julien-c/push-model-from-web/mobilenet/README.md b/spaces/julien-c/push-model-from-web/mobilenet/README.md deleted file mode 100644 index 2a1230fc7e939cd880641fe54489a89f482db381..0000000000000000000000000000000000000000 --- a/spaces/julien-c/push-model-from-web/mobilenet/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -license: mit -tags: - - image-classification - - tfjs ---- - -## TensorFlow.js version of Mobilenet - -Pushed from Web - -![](coffee.jpg) diff --git a/spaces/kazumak/webui/app.py b/spaces/kazumak/webui/app.py deleted file mode 100644 index 9aaf59d293dce427101bb90b54274afd589cedfd..0000000000000000000000000000000000000000 --- a/spaces/kazumak/webui/app.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -from subprocess import getoutput - -gpu_info = getoutput('nvidia-smi') -if("A10G" in gpu_info): - os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+4c06c79.d20221205-cp38-cp38-linux_x86_64.whl") -elif("T4" in gpu_info): - os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+1515f77.d20221130-cp38-cp38-linux_x86_64.whl") - -os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui /home/user/app/stable-diffusion-webui") -os.chdir("/home/user/app/stable-diffusion-webui") - -os.system(f"wget -q https://github.com/camenduru/webui/raw/main/env_patch.py -O /home/user/app/env_patch.py") -os.system(f"sed -i -e '/import image_from_url_text/r /home/user/app/env_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/(modelmerger_interface, \"Checkpoint Merger\", \"modelmerger\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/(train_interface, \"Train\", \"ti\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/extensions_interface, \"Extensions\", \"extensions\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/settings_interface, \"Settings\", \"settings\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f'''sed -i -e "s/document.getElementsByTagName('gradio-app')\[0\].shadowRoot/!!document.getElementsByTagName('gradio-app')[0].shadowRoot ? document.getElementsByTagName('gradio-app')[0].shadowRoot : document/g" /home/user/app/stable-diffusion-webui/script.js''') -os.system(f"sed -i -e 's/ show_progress=False,/ show_progress=True,/g' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e 's/shared.demo.launch/shared.demo.queue().launch/g' /home/user/app/stable-diffusion-webui/webui.py") -#os.system(f"sed -i -e 's/inputs=\[component\],/&\\n queue=False,/g' /home/user/app/stable-diffusion-webui/modules/ui.py") -#os.system(f"sed -i -e 's/outputs=\[token_counter\]/outputs=[token_counter], queue=False/g' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e 's/outputs=\[/queue=False, &/g' home/user/app/stable-diffusion-webui/modules/ui.py") - -# ----------------------------Please duplicate this space and delete this block if you don't want to see the extra header---------------------------- -os.system(f"wget -q https://github.com/camenduru/webui/raw/main/header_patch.py -O /home/user/app/header_patch.py") -os.system(f"sed -i -e '/demo:/r /home/user/app/header_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py") -# --------------------------------------------------------------------------------------------------------------------------------------------------- - -#os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.ckpt") -#os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0.vae.pt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.vae.pt") -os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned-emaonly.ckpt") - -if "IS_SHARED_UI" in os.environ: - os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-config.json -O /home/user/app/shared-config.json") - os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-ui-config.json -O /home/user/app/shared-ui-config.json") - os.system(f"python launch.py --force-enable-xformers --disable-console-progressbars --enable-console-prompts --ui-config-file /home/user/app/shared-ui-config.json --ui-settings-file /home/user/app/shared-config.json --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding") -else: - # Please duplicate this space and delete # character in front of the custom script you want to use or add here more custom scripts with same structure os.system(f"wget -q https://CUSTOM_SCRIPT_URL -O /home/user/app/stable-diffusion-webui/scripts/CUSTOM_SCRIPT_NAME.py") - os.system(f"wget -q https://gist.github.com/camenduru/9ec5f8141db9902e375967e93250860f/raw/d0bcf01786f20107c329c03f8968584ee67be12a/run_n_times.py -O /home/user/app/stable-diffusion-webui/scripts/run_n_times.py") - - # Please duplicate this space and delete # character in front of the extension you want to use or add here more extensions with same structure os.system(f"git clone https://EXTENSION_GIT_URL /home/user/app/stable-diffusion-webui/extensions/EXTENSION_NAME") - #os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui-artists-to-study /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-artists-to-study") - os.system(f"git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser") - os.system(f"git clone https://github.com/deforum-art/deforum-for-automatic1111-webui /home/user/app/stable-diffusion-webui/extensions/deforum-for-automatic1111-webui") - - # Please duplicate this space and delete # character in front of the model you want to use or add here more ckpts with same structure os.system(f"wget -q https://CKPT_URL -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/CKPT_NAME.ckpt") - #os.system(f"wget -q https://huggingface.co/nitrosocke/Arcane-Diffusion/resolve/main/arcane-diffusion-v3.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/arcane-diffusion-v3.ckpt") - #os.system(f"wget -q https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/Cyberpunk-Anime-Diffusion.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Cyberpunk-Anime-Diffusion.ckpt") - #os.system(f"wget -q https://huggingface.co/prompthero/midjourney-v4-diffusion/resolve/main/mdjrny-v4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/mdjrny-v4.ckpt") - #os.system(f"wget -q https://huggingface.co/nitrosocke/mo-di-diffusion/resolve/main/moDi-v1-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/moDi-v1-pruned.ckpt") - #os.system(f"wget -q https://huggingface.co/Fictiverse/Stable_Diffusion_PaperCut_Model/resolve/main/PaperCut_v1.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/PaperCut_v1.ckpt") - #os.system(f"wget -q https://huggingface.co/lilpotat/sa/resolve/main/samdoesarts_style.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/samdoesarts_style.ckpt") - #os.system(f"wget -q https://huggingface.co/hakurei/waifu-diffusion-v1-3/resolve/main/wd-v1-3-float32.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/wd-v1-3-float32.ckpt") - #os.system(f"wget -q https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-4.ckpt") - #os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned-emaonly.ckpt") - #os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-5-inpainting.ckpt") - #os.system(f"wget -q https://huggingface.co/stabilityai/stable-diffusion-2/resolve/main/768-v-ema.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.ckpt") - #os.system(f"wget -q https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.yaml") - os.system(f"python launch.py --force-enable-xformers --ui-config-file /home/user/app/ui-config.json --ui-settings-file /home/user/app/config.json --disable-console-progressbars --enable-console-prompts --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding --api --skip-torch-cuda-test --disable-safe-unpickle") diff --git a/spaces/kdrkdrkdr/HinaTTS/export_model.py b/spaces/kdrkdrkdr/HinaTTS/export_model.py deleted file mode 100644 index 98a49835df5a7a2486e76ddf94fbbb4444b52203..0000000000000000000000000000000000000000 --- a/spaces/kdrkdrkdr/HinaTTS/export_model.py +++ /dev/null @@ -1,13 +0,0 @@ -import torch - -if __name__ == '__main__': - model_path = "saved_model/11/model.pth" - output_path = "saved_model/11/model1.pth" - checkpoint_dict = torch.load(model_path, map_location='cpu') - checkpoint_dict_new = {} - for k, v in checkpoint_dict.items(): - if k == "optimizer": - print("remove optimizer") - continue - checkpoint_dict_new[k] = v - torch.save(checkpoint_dict_new, output_path) diff --git a/spaces/kerls/is-this-food-photo-instagram-worthy/app.py b/spaces/kerls/is-this-food-photo-instagram-worthy/app.py deleted file mode 100644 index b3c097b46baefb77b8ea88ef8b2fb793b901f7b9..0000000000000000000000000000000000000000 --- a/spaces/kerls/is-this-food-photo-instagram-worthy/app.py +++ /dev/null @@ -1,20 +0,0 @@ -from fastai.vision.all import * -import gradio as gr - -# def greet(name): -# return "Hello " + name + "!!" - -# iface = gr.Interface(fn=greet, inputs="text", outputs="text") -# iface.launch() - -learn = load_learner('export.pkl') - -labels = learn.dls.vocab -def predict(img): - img = PILImage.create(img) - pred,pred_idx,probs = learn.predict(img) - return {labels[i]: float(probs[i]) for i in range(len(labels))} - -examples = ['aesthetic-pancakes.jpg', 'messy-oatmeal.png', 'messy-plating.png'] - -gr.Interface(fn=predict, inputs=gr.Image(shape=(512, 512)), outputs=gr.Label(num_top_classes=3), examples=examples).launch(share=False) diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/src/gradio_demo.py b/spaces/kevinwang676/ChatGLM2-SadTalker/src/gradio_demo.py deleted file mode 100644 index 1e70005831b9f29dc3c7f39642364bc325a4c8a4..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker/src/gradio_demo.py +++ /dev/null @@ -1,155 +0,0 @@ -import torch, uuid -import os, sys, shutil -from src.utils.preprocess import CropAndExtract -from src.test_audio2coeff import Audio2Coeff -from src.facerender.animate import AnimateFromCoeff -from src.generate_batch import get_data -from src.generate_facerender_batch import get_facerender_data - -from src.utils.init_path import init_path - -from pydub import AudioSegment - - -def mp3_to_wav(mp3_filename,wav_filename,frame_rate): - mp3_file = AudioSegment.from_file(file=mp3_filename) - mp3_file.set_frame_rate(frame_rate).export(wav_filename,format="wav") - - -class SadTalker(): - - def __init__(self, checkpoint_path='checkpoints', config_path='src/config', lazy_load=False): - - if torch.cuda.is_available() : - device = "cuda" - else: - device = "cpu" - - self.device = device - - os.environ['TORCH_HOME']= checkpoint_path - - self.checkpoint_path = checkpoint_path - self.config_path = config_path - - - def test(self, source_image, driven_audio, preprocess='crop', - still_mode=False, use_enhancer=False, batch_size=1, size=256, - pose_style = 0, exp_scale=1.0, - use_ref_video = False, - ref_video = None, - ref_info = None, - use_idle_mode = False, - length_of_audio = 0, use_blink=True, - result_dir='./results/'): - - self.sadtalker_paths = init_path(self.checkpoint_path, self.config_path, size, False, preprocess) - print(self.sadtalker_paths) - - self.audio_to_coeff = Audio2Coeff(self.sadtalker_paths, self.device) - self.preprocess_model = CropAndExtract(self.sadtalker_paths, self.device) - self.animate_from_coeff = AnimateFromCoeff(self.sadtalker_paths, self.device) - - time_tag = str(uuid.uuid4()) - save_dir = os.path.join(result_dir, time_tag) - os.makedirs(save_dir, exist_ok=True) - - input_dir = os.path.join(save_dir, 'input') - os.makedirs(input_dir, exist_ok=True) - - print(source_image) - pic_path = os.path.join(input_dir, os.path.basename(source_image)) - shutil.move(source_image, input_dir) - - if driven_audio is not None and os.path.isfile(driven_audio): - audio_path = os.path.join(input_dir, os.path.basename(driven_audio)) - - #### mp3 to wav - if '.mp3' in audio_path: - mp3_to_wav(driven_audio, audio_path.replace('.mp3', '.wav'), 16000) - audio_path = audio_path.replace('.mp3', '.wav') - else: - shutil.move(driven_audio, input_dir) - - elif use_idle_mode: - audio_path = os.path.join(input_dir, 'idlemode_'+str(length_of_audio)+'.wav') ## generate audio from this new audio_path - from pydub import AudioSegment - one_sec_segment = AudioSegment.silent(duration=1000*length_of_audio) #duration in milliseconds - one_sec_segment.export(audio_path, format="wav") - else: - print(use_ref_video, ref_info) - assert use_ref_video == True and ref_info == 'all' - - if use_ref_video and ref_info == 'all': # full ref mode - ref_video_videoname = os.path.basename(ref_video) - audio_path = os.path.join(save_dir, ref_video_videoname+'.wav') - print('new audiopath:',audio_path) - # if ref_video contains audio, set the audio from ref_video. - cmd = r"ffmpeg -y -hide_banner -loglevel error -i %s %s"%(ref_video, audio_path) - os.system(cmd) - - os.makedirs(save_dir, exist_ok=True) - - #crop image and extract 3dmm from image - first_frame_dir = os.path.join(save_dir, 'first_frame_dir') - os.makedirs(first_frame_dir, exist_ok=True) - first_coeff_path, crop_pic_path, crop_info = self.preprocess_model.generate(pic_path, first_frame_dir, preprocess, True, size) - - if first_coeff_path is None: - raise AttributeError("No face is detected") - - if use_ref_video: - print('using ref video for genreation') - ref_video_videoname = os.path.splitext(os.path.split(ref_video)[-1])[0] - ref_video_frame_dir = os.path.join(save_dir, ref_video_videoname) - os.makedirs(ref_video_frame_dir, exist_ok=True) - print('3DMM Extraction for the reference video providing pose') - ref_video_coeff_path, _, _ = self.preprocess_model.generate(ref_video, ref_video_frame_dir, preprocess, source_image_flag=False) - else: - ref_video_coeff_path = None - - if use_ref_video: - if ref_info == 'pose': - ref_pose_coeff_path = ref_video_coeff_path - ref_eyeblink_coeff_path = None - elif ref_info == 'blink': - ref_pose_coeff_path = None - ref_eyeblink_coeff_path = ref_video_coeff_path - elif ref_info == 'pose+blink': - ref_pose_coeff_path = ref_video_coeff_path - ref_eyeblink_coeff_path = ref_video_coeff_path - elif ref_info == 'all': - ref_pose_coeff_path = None - ref_eyeblink_coeff_path = None - else: - raise('error in refinfo') - else: - ref_pose_coeff_path = None - ref_eyeblink_coeff_path = None - - #audio2ceoff - if use_ref_video and ref_info == 'all': - coeff_path = ref_video_coeff_path # self.audio_to_coeff.generate(batch, save_dir, pose_style, ref_pose_coeff_path) - else: - batch = get_data(first_coeff_path, audio_path, self.device, ref_eyeblink_coeff_path=ref_eyeblink_coeff_path, still=still_mode, idlemode=use_idle_mode, length_of_audio=length_of_audio, use_blink=use_blink) # longer audio? - coeff_path = self.audio_to_coeff.generate(batch, save_dir, pose_style, ref_pose_coeff_path) - - #coeff2video - data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path, batch_size, still_mode=still_mode, preprocess=preprocess, size=size, expression_scale = exp_scale) - return_path = self.animate_from_coeff.generate(data, save_dir, pic_path, crop_info, enhancer='gfpgan' if use_enhancer else None, preprocess=preprocess, img_size=size) - video_name = data['video_name'] - print(f'The generated video is named {video_name} in {save_dir}') - - del self.preprocess_model - del self.audio_to_coeff - del self.animate_from_coeff - - if torch.cuda.is_available(): - torch.cuda.empty_cache() - torch.cuda.synchronize() - - import gc; gc.collect() - - return return_path - - \ No newline at end of file diff --git a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/util/generate_list.py b/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/util/generate_list.py deleted file mode 100644 index 943d906781063c3584a7e5b5c784f8aac0694985..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/util/generate_list.py +++ /dev/null @@ -1,34 +0,0 @@ -"""This script is to generate training list files for Deep3DFaceRecon_pytorch -""" - -import os - -# save path to training data -def write_list(lms_list, imgs_list, msks_list, mode='train',save_folder='datalist', save_name=''): - save_path = os.path.join(save_folder, mode) - if not os.path.isdir(save_path): - os.makedirs(save_path) - with open(os.path.join(save_path, save_name + 'landmarks.txt'), 'w') as fd: - fd.writelines([i + '\n' for i in lms_list]) - - with open(os.path.join(save_path, save_name + 'images.txt'), 'w') as fd: - fd.writelines([i + '\n' for i in imgs_list]) - - with open(os.path.join(save_path, save_name + 'masks.txt'), 'w') as fd: - fd.writelines([i + '\n' for i in msks_list]) - -# check if the path is valid -def check_list(rlms_list, rimgs_list, rmsks_list): - lms_list, imgs_list, msks_list = [], [], [] - for i in range(len(rlms_list)): - flag = 'false' - lm_path = rlms_list[i] - im_path = rimgs_list[i] - msk_path = rmsks_list[i] - if os.path.isfile(lm_path) and os.path.isfile(im_path) and os.path.isfile(msk_path): - flag = 'true' - lms_list.append(rlms_list[i]) - imgs_list.append(rimgs_list[i]) - msks_list.append(rmsks_list[i]) - print(i, rlms_list[i], flag) - return lms_list, imgs_list, msks_list diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/cnn/resnet.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/cnn/resnet.py deleted file mode 100644 index 1cb3ac057ee2d52c46fc94685b5d4e698aad8d5f..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/cnn/resnet.py +++ /dev/null @@ -1,316 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import logging - -import torch.nn as nn -import torch.utils.checkpoint as cp - -from .utils import constant_init, kaiming_init - - -def conv3x3(in_planes, out_planes, stride=1, dilation=1): - """3x3 convolution with padding.""" - return nn.Conv2d( - in_planes, - out_planes, - kernel_size=3, - stride=stride, - padding=dilation, - dilation=dilation, - bias=False) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, - inplanes, - planes, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False): - super(BasicBlock, self).__init__() - assert style in ['pytorch', 'caffe'] - self.conv1 = conv3x3(inplanes, planes, stride, dilation) - self.bn1 = nn.BatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = nn.BatchNorm2d(planes) - self.downsample = downsample - self.stride = stride - self.dilation = dilation - assert not with_cp - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, - inplanes, - planes, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False): - """Bottleneck block. - - If style is "pytorch", the stride-two layer is the 3x3 conv layer, if - it is "caffe", the stride-two layer is the first 1x1 conv layer. - """ - super(Bottleneck, self).__init__() - assert style in ['pytorch', 'caffe'] - if style == 'pytorch': - conv1_stride = 1 - conv2_stride = stride - else: - conv1_stride = stride - conv2_stride = 1 - self.conv1 = nn.Conv2d( - inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False) - self.conv2 = nn.Conv2d( - planes, - planes, - kernel_size=3, - stride=conv2_stride, - padding=dilation, - dilation=dilation, - bias=False) - - self.bn1 = nn.BatchNorm2d(planes) - self.bn2 = nn.BatchNorm2d(planes) - self.conv3 = nn.Conv2d( - planes, planes * self.expansion, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * self.expansion) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - self.dilation = dilation - self.with_cp = with_cp - - def forward(self, x): - - def _inner_forward(x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -def make_res_layer(block, - inplanes, - planes, - blocks, - stride=1, - dilation=1, - style='pytorch', - with_cp=False): - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d( - inplanes, - planes * block.expansion, - kernel_size=1, - stride=stride, - bias=False), - nn.BatchNorm2d(planes * block.expansion), - ) - - layers = [] - layers.append( - block( - inplanes, - planes, - stride, - dilation, - downsample, - style=style, - with_cp=with_cp)) - inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append( - block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp)) - - return nn.Sequential(*layers) - - -class ResNet(nn.Module): - """ResNet backbone. - - Args: - depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. - num_stages (int): Resnet stages, normally 4. - strides (Sequence[int]): Strides of the first block of each stage. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - frozen_stages (int): Stages to be frozen (all param fixed). -1 means - not freezing any parameters. - bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze - running stats (mean and var). - bn_frozen (bool): Whether to freeze weight and bias of BN layers. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - """ - - arch_settings = { - 18: (BasicBlock, (2, 2, 2, 2)), - 34: (BasicBlock, (3, 4, 6, 3)), - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)) - } - - def __init__(self, - depth, - num_stages=4, - strides=(1, 2, 2, 2), - dilations=(1, 1, 1, 1), - out_indices=(0, 1, 2, 3), - style='pytorch', - frozen_stages=-1, - bn_eval=True, - bn_frozen=False, - with_cp=False): - super(ResNet, self).__init__() - if depth not in self.arch_settings: - raise KeyError(f'invalid depth {depth} for resnet') - assert num_stages >= 1 and num_stages <= 4 - block, stage_blocks = self.arch_settings[depth] - stage_blocks = stage_blocks[:num_stages] - assert len(strides) == len(dilations) == num_stages - assert max(out_indices) < num_stages - - self.out_indices = out_indices - self.style = style - self.frozen_stages = frozen_stages - self.bn_eval = bn_eval - self.bn_frozen = bn_frozen - self.with_cp = with_cp - - self.inplanes = 64 - self.conv1 = nn.Conv2d( - 3, 64, kernel_size=7, stride=2, padding=3, bias=False) - self.bn1 = nn.BatchNorm2d(64) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - self.res_layers = [] - for i, num_blocks in enumerate(stage_blocks): - stride = strides[i] - dilation = dilations[i] - planes = 64 * 2**i - res_layer = make_res_layer( - block, - self.inplanes, - planes, - num_blocks, - stride=stride, - dilation=dilation, - style=self.style, - with_cp=with_cp) - self.inplanes = planes * block.expansion - layer_name = f'layer{i + 1}' - self.add_module(layer_name, res_layer) - self.res_layers.append(layer_name) - - self.feat_dim = block.expansion * 64 * 2**(len(stage_blocks) - 1) - - def init_weights(self, pretrained=None): - if isinstance(pretrained, str): - logger = logging.getLogger() - from ..runner import load_checkpoint - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, nn.BatchNorm2d): - constant_init(m, 1) - else: - raise TypeError('pretrained must be a str or None') - - def forward(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - outs = [] - for i, layer_name in enumerate(self.res_layers): - res_layer = getattr(self, layer_name) - x = res_layer(x) - if i in self.out_indices: - outs.append(x) - if len(outs) == 1: - return outs[0] - else: - return tuple(outs) - - def train(self, mode=True): - super(ResNet, self).train(mode) - if self.bn_eval: - for m in self.modules(): - if isinstance(m, nn.BatchNorm2d): - m.eval() - if self.bn_frozen: - for params in m.parameters(): - params.requires_grad = False - if mode and self.frozen_stages >= 0: - for param in self.conv1.parameters(): - param.requires_grad = False - for param in self.bn1.parameters(): - param.requires_grad = False - self.bn1.eval() - self.bn1.weight.requires_grad = False - self.bn1.bias.requires_grad = False - for i in range(1, self.frozen_stages + 1): - mod = getattr(self, f'layer{i}') - mod.eval() - for param in mod.parameters(): - param.requires_grad = False diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/parallel/registry.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/parallel/registry.py deleted file mode 100644 index a204a07fba10e614223f090d1a57cf9c4d74d4a1..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/parallel/registry.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from torch.nn.parallel import DataParallel, DistributedDataParallel - -from annotator.uniformer.mmcv.utils import Registry - -MODULE_WRAPPERS = Registry('module wrapper') -MODULE_WRAPPERS.register_module(module=DataParallel) -MODULE_WRAPPERS.register_module(module=DistributedDataParallel) diff --git a/spaces/kpyuy/chat/app.py b/spaces/kpyuy/chat/app.py deleted file mode 100644 index 37e8f5ffc2ed7e157891e9eff4acfff88f6305e2..0000000000000000000000000000000000000000 --- a/spaces/kpyuy/chat/app.py +++ /dev/null @@ -1,62 +0,0 @@ -import gradio as gr -import openai -import os - -openai.api_key = os.environ.get("OPENAI_API_KEY") - - -class Conversation: - def __init__(self, prompt, num_of_round): - self.prompt = prompt - self.num_of_round = num_of_round - self.messages = [] - self.messages.append({"role": "system", "content": self.prompt}) - - def ask(self, question): - try: - self.messages.append({"role": "user", "content": question}) - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=self.messages, - temperature=0.5, - max_tokens=2048, - top_p=1, - ) - except Exception as e: - print(e) - return e - - message = response["choices"][0]["message"]["content"] - self.messages.append({"role": "assistant", "content": message}) - - if len(self.messages) > self.num_of_round * 2 + 1: - del self.messages[1:3] # Remove the first round conversation left. - return message - - -prompt = """你是一个中国厨师,用中文回答做菜的问题。你的回答需要满足以下要求: -1. 你的回答必须是中文 -2. 回答限制在100个字以内""" - -conv = Conversation(prompt, 10) - - -def answer(question, history=[]): - history.append(question) - response = conv.ask(question) - history.append(response) - responses = [(u, b) for u, b in zip(history[::2], history[1::2])] - return responses, history - - -with gr.Blocks(css="#chatbot{height:300px} .overflow-y-auto{height:500px}") as demo: - chatbot = gr.Chatbot(elem_id="chatbot") - state = gr.State([]) - - with gr.Row(): - txt = gr.Textbox(show_label=False, - placeholder="Enter text and press enter").style(container=False) - - txt.submit(answer, [txt, state], [chatbot, state]) - -demo.launch() diff --git a/spaces/krystaltechnology/image-video-colorization/models/deep_colorization/colorizers/util.py b/spaces/krystaltechnology/image-video-colorization/models/deep_colorization/colorizers/util.py deleted file mode 100644 index 79968ba6b960a8c10047f1ce52400b6bfe766b9c..0000000000000000000000000000000000000000 --- a/spaces/krystaltechnology/image-video-colorization/models/deep_colorization/colorizers/util.py +++ /dev/null @@ -1,47 +0,0 @@ - -from PIL import Image -import numpy as np -from skimage import color -import torch -import torch.nn.functional as F -from IPython import embed - -def load_img(img_path): - out_np = np.asarray(Image.open(img_path)) - if(out_np.ndim==2): - out_np = np.tile(out_np[:,:,None],3) - return out_np - -def resize_img(img, HW=(256,256), resample=3): - return np.asarray(Image.fromarray(img).resize((HW[1],HW[0]), resample=resample)) - -def preprocess_img(img_rgb_orig, HW=(256,256), resample=3): - # return original size L and resized L as torch Tensors - img_rgb_rs = resize_img(img_rgb_orig, HW=HW, resample=resample) - - img_lab_orig = color.rgb2lab(img_rgb_orig) - img_lab_rs = color.rgb2lab(img_rgb_rs) - - img_l_orig = img_lab_orig[:,:,0] - img_l_rs = img_lab_rs[:,:,0] - - tens_orig_l = torch.Tensor(img_l_orig)[None,None,:,:] - tens_rs_l = torch.Tensor(img_l_rs)[None,None,:,:] - - return (tens_orig_l, tens_rs_l) - -def postprocess_tens(tens_orig_l, out_ab, mode='bilinear'): - # tens_orig_l 1 x 1 x H_orig x W_orig - # out_ab 1 x 2 x H x W - - HW_orig = tens_orig_l.shape[2:] - HW = out_ab.shape[2:] - - # call resize function if needed - if(HW_orig[0]!=HW[0] or HW_orig[1]!=HW[1]): - out_ab_orig = F.interpolate(out_ab, size=HW_orig, mode='bilinear') - else: - out_ab_orig = out_ab - - out_lab_orig = torch.cat((tens_orig_l, out_ab_orig), dim=1) - return color.lab2rgb(out_lab_orig.data.cpu().numpy()[0,...].transpose((1,2,0))) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/ImageDraw.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/ImageDraw.py deleted file mode 100644 index 8adcc87de51650edfff0a33974e607cc82f98344..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/ImageDraw.py +++ /dev/null @@ -1,1127 +0,0 @@ -# -# The Python Imaging Library -# $Id$ -# -# drawing interface operations -# -# History: -# 1996-04-13 fl Created (experimental) -# 1996-08-07 fl Filled polygons, ellipses. -# 1996-08-13 fl Added text support -# 1998-06-28 fl Handle I and F images -# 1998-12-29 fl Added arc; use arc primitive to draw ellipses -# 1999-01-10 fl Added shape stuff (experimental) -# 1999-02-06 fl Added bitmap support -# 1999-02-11 fl Changed all primitives to take options -# 1999-02-20 fl Fixed backwards compatibility -# 2000-10-12 fl Copy on write, when necessary -# 2001-02-18 fl Use default ink for bitmap/text also in fill mode -# 2002-10-24 fl Added support for CSS-style color strings -# 2002-12-10 fl Added experimental support for RGBA-on-RGB drawing -# 2002-12-11 fl Refactored low-level drawing API (work in progress) -# 2004-08-26 fl Made Draw() a factory function, added getdraw() support -# 2004-09-04 fl Added width support to line primitive -# 2004-09-10 fl Added font mode handling -# 2006-06-19 fl Added font bearing support (getmask2) -# -# Copyright (c) 1997-2006 by Secret Labs AB -# Copyright (c) 1996-2006 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - -import math -import numbers -import warnings - -from . import Image, ImageColor -from ._deprecate import deprecate - -""" -A simple 2D drawing interface for PIL images. -

        -Application code should use the Draw factory, instead of -directly. -""" - - -class ImageDraw: - font = None - - def __init__(self, im, mode=None): - """ - Create a drawing instance. - - :param im: The image to draw in. - :param mode: Optional mode to use for color values. For RGB - images, this argument can be RGB or RGBA (to blend the - drawing into the image). For all other modes, this argument - must be the same as the image mode. If omitted, the mode - defaults to the mode of the image. - """ - im.load() - if im.readonly: - im._copy() # make it writeable - blend = 0 - if mode is None: - mode = im.mode - if mode != im.mode: - if mode == "RGBA" and im.mode == "RGB": - blend = 1 - else: - msg = "mode mismatch" - raise ValueError(msg) - if mode == "P": - self.palette = im.palette - else: - self.palette = None - self._image = im - self.im = im.im - self.draw = Image.core.draw(self.im, blend) - self.mode = mode - if mode in ("I", "F"): - self.ink = self.draw.draw_ink(1) - else: - self.ink = self.draw.draw_ink(-1) - if mode in ("1", "P", "I", "F"): - # FIXME: fix Fill2 to properly support matte for I+F images - self.fontmode = "1" - else: - self.fontmode = "L" # aliasing is okay for other modes - self.fill = False - - def getfont(self): - """ - Get the current default font. - - To set the default font for this ImageDraw instance:: - - from PIL import ImageDraw, ImageFont - draw.font = ImageFont.truetype("Tests/fonts/FreeMono.ttf") - - To set the default font for all future ImageDraw instances:: - - from PIL import ImageDraw, ImageFont - ImageDraw.ImageDraw.font = ImageFont.truetype("Tests/fonts/FreeMono.ttf") - - If the current default font is ``None``, - it is initialized with ``ImageFont.load_default()``. - - :returns: An image font.""" - if not self.font: - # FIXME: should add a font repository - from . import ImageFont - - self.font = ImageFont.load_default() - return self.font - - def _getink(self, ink, fill=None): - if ink is None and fill is None: - if self.fill: - fill = self.ink - else: - ink = self.ink - else: - if ink is not None: - if isinstance(ink, str): - ink = ImageColor.getcolor(ink, self.mode) - if self.palette and not isinstance(ink, numbers.Number): - ink = self.palette.getcolor(ink, self._image) - ink = self.draw.draw_ink(ink) - if fill is not None: - if isinstance(fill, str): - fill = ImageColor.getcolor(fill, self.mode) - if self.palette and not isinstance(fill, numbers.Number): - fill = self.palette.getcolor(fill, self._image) - fill = self.draw.draw_ink(fill) - return ink, fill - - def arc(self, xy, start, end, fill=None, width=1): - """Draw an arc.""" - ink, fill = self._getink(fill) - if ink is not None: - self.draw.draw_arc(xy, start, end, ink, width) - - def bitmap(self, xy, bitmap, fill=None): - """Draw a bitmap.""" - bitmap.load() - ink, fill = self._getink(fill) - if ink is None: - ink = fill - if ink is not None: - self.draw.draw_bitmap(xy, bitmap.im, ink) - - def chord(self, xy, start, end, fill=None, outline=None, width=1): - """Draw a chord.""" - ink, fill = self._getink(outline, fill) - if fill is not None: - self.draw.draw_chord(xy, start, end, fill, 1) - if ink is not None and ink != fill and width != 0: - self.draw.draw_chord(xy, start, end, ink, 0, width) - - def ellipse(self, xy, fill=None, outline=None, width=1): - """Draw an ellipse.""" - ink, fill = self._getink(outline, fill) - if fill is not None: - self.draw.draw_ellipse(xy, fill, 1) - if ink is not None and ink != fill and width != 0: - self.draw.draw_ellipse(xy, ink, 0, width) - - def line(self, xy, fill=None, width=0, joint=None): - """Draw a line, or a connected sequence of line segments.""" - ink = self._getink(fill)[0] - if ink is not None: - self.draw.draw_lines(xy, ink, width) - if joint == "curve" and width > 4: - if not isinstance(xy[0], (list, tuple)): - xy = [tuple(xy[i : i + 2]) for i in range(0, len(xy), 2)] - for i in range(1, len(xy) - 1): - point = xy[i] - angles = [ - math.degrees(math.atan2(end[0] - start[0], start[1] - end[1])) - % 360 - for start, end in ((xy[i - 1], point), (point, xy[i + 1])) - ] - if angles[0] == angles[1]: - # This is a straight line, so no joint is required - continue - - def coord_at_angle(coord, angle): - x, y = coord - angle -= 90 - distance = width / 2 - 1 - return tuple( - p + (math.floor(p_d) if p_d > 0 else math.ceil(p_d)) - for p, p_d in ( - (x, distance * math.cos(math.radians(angle))), - (y, distance * math.sin(math.radians(angle))), - ) - ) - - flipped = ( - angles[1] > angles[0] and angles[1] - 180 > angles[0] - ) or (angles[1] < angles[0] and angles[1] + 180 > angles[0]) - coords = [ - (point[0] - width / 2 + 1, point[1] - width / 2 + 1), - (point[0] + width / 2 - 1, point[1] + width / 2 - 1), - ] - if flipped: - start, end = (angles[1] + 90, angles[0] + 90) - else: - start, end = (angles[0] - 90, angles[1] - 90) - self.pieslice(coords, start - 90, end - 90, fill) - - if width > 8: - # Cover potential gaps between the line and the joint - if flipped: - gap_coords = [ - coord_at_angle(point, angles[0] + 90), - point, - coord_at_angle(point, angles[1] + 90), - ] - else: - gap_coords = [ - coord_at_angle(point, angles[0] - 90), - point, - coord_at_angle(point, angles[1] - 90), - ] - self.line(gap_coords, fill, width=3) - - def shape(self, shape, fill=None, outline=None): - """(Experimental) Draw a shape.""" - shape.close() - ink, fill = self._getink(outline, fill) - if fill is not None: - self.draw.draw_outline(shape, fill, 1) - if ink is not None and ink != fill: - self.draw.draw_outline(shape, ink, 0) - - def pieslice(self, xy, start, end, fill=None, outline=None, width=1): - """Draw a pieslice.""" - ink, fill = self._getink(outline, fill) - if fill is not None: - self.draw.draw_pieslice(xy, start, end, fill, 1) - if ink is not None and ink != fill and width != 0: - self.draw.draw_pieslice(xy, start, end, ink, 0, width) - - def point(self, xy, fill=None): - """Draw one or more individual pixels.""" - ink, fill = self._getink(fill) - if ink is not None: - self.draw.draw_points(xy, ink) - - def polygon(self, xy, fill=None, outline=None, width=1): - """Draw a polygon.""" - ink, fill = self._getink(outline, fill) - if fill is not None: - self.draw.draw_polygon(xy, fill, 1) - if ink is not None and ink != fill and width != 0: - if width == 1: - self.draw.draw_polygon(xy, ink, 0, width) - else: - # To avoid expanding the polygon outwards, - # use the fill as a mask - mask = Image.new("1", self.im.size) - mask_ink = self._getink(1)[0] - - fill_im = mask.copy() - draw = Draw(fill_im) - draw.draw.draw_polygon(xy, mask_ink, 1) - - ink_im = mask.copy() - draw = Draw(ink_im) - width = width * 2 - 1 - draw.draw.draw_polygon(xy, mask_ink, 0, width) - - mask.paste(ink_im, mask=fill_im) - - im = Image.new(self.mode, self.im.size) - draw = Draw(im) - draw.draw.draw_polygon(xy, ink, 0, width) - self.im.paste(im.im, (0, 0) + im.size, mask.im) - - def regular_polygon( - self, bounding_circle, n_sides, rotation=0, fill=None, outline=None - ): - """Draw a regular polygon.""" - xy = _compute_regular_polygon_vertices(bounding_circle, n_sides, rotation) - self.polygon(xy, fill, outline) - - def rectangle(self, xy, fill=None, outline=None, width=1): - """Draw a rectangle.""" - ink, fill = self._getink(outline, fill) - if fill is not None: - self.draw.draw_rectangle(xy, fill, 1) - if ink is not None and ink != fill and width != 0: - self.draw.draw_rectangle(xy, ink, 0, width) - - def rounded_rectangle( - self, xy, radius=0, fill=None, outline=None, width=1, *, corners=None - ): - """Draw a rounded rectangle.""" - if isinstance(xy[0], (list, tuple)): - (x0, y0), (x1, y1) = xy - else: - x0, y0, x1, y1 = xy - if x1 < x0: - msg = "x1 must be greater than or equal to x0" - raise ValueError(msg) - if y1 < y0: - msg = "y1 must be greater than or equal to y0" - raise ValueError(msg) - if corners is None: - corners = (True, True, True, True) - - d = radius * 2 - - full_x, full_y = False, False - if all(corners): - full_x = d >= x1 - x0 - if full_x: - # The two left and two right corners are joined - d = x1 - x0 - full_y = d >= y1 - y0 - if full_y: - # The two top and two bottom corners are joined - d = y1 - y0 - if full_x and full_y: - # If all corners are joined, that is a circle - return self.ellipse(xy, fill, outline, width) - - if d == 0 or not any(corners): - # If the corners have no curve, - # or there are no corners, - # that is a rectangle - return self.rectangle(xy, fill, outline, width) - - r = d // 2 - ink, fill = self._getink(outline, fill) - - def draw_corners(pieslice): - if full_x: - # Draw top and bottom halves - parts = ( - ((x0, y0, x0 + d, y0 + d), 180, 360), - ((x0, y1 - d, x0 + d, y1), 0, 180), - ) - elif full_y: - # Draw left and right halves - parts = ( - ((x0, y0, x0 + d, y0 + d), 90, 270), - ((x1 - d, y0, x1, y0 + d), 270, 90), - ) - else: - # Draw four separate corners - parts = [] - for i, part in enumerate( - ( - ((x0, y0, x0 + d, y0 + d), 180, 270), - ((x1 - d, y0, x1, y0 + d), 270, 360), - ((x1 - d, y1 - d, x1, y1), 0, 90), - ((x0, y1 - d, x0 + d, y1), 90, 180), - ) - ): - if corners[i]: - parts.append(part) - for part in parts: - if pieslice: - self.draw.draw_pieslice(*(part + (fill, 1))) - else: - self.draw.draw_arc(*(part + (ink, width))) - - if fill is not None: - draw_corners(True) - - if full_x: - self.draw.draw_rectangle((x0, y0 + r + 1, x1, y1 - r - 1), fill, 1) - else: - self.draw.draw_rectangle((x0 + r + 1, y0, x1 - r - 1, y1), fill, 1) - if not full_x and not full_y: - left = [x0, y0, x0 + r, y1] - if corners[0]: - left[1] += r + 1 - if corners[3]: - left[3] -= r + 1 - self.draw.draw_rectangle(left, fill, 1) - - right = [x1 - r, y0, x1, y1] - if corners[1]: - right[1] += r + 1 - if corners[2]: - right[3] -= r + 1 - self.draw.draw_rectangle(right, fill, 1) - if ink is not None and ink != fill and width != 0: - draw_corners(False) - - if not full_x: - top = [x0, y0, x1, y0 + width - 1] - if corners[0]: - top[0] += r + 1 - if corners[1]: - top[2] -= r + 1 - self.draw.draw_rectangle(top, ink, 1) - - bottom = [x0, y1 - width + 1, x1, y1] - if corners[3]: - bottom[0] += r + 1 - if corners[2]: - bottom[2] -= r + 1 - self.draw.draw_rectangle(bottom, ink, 1) - if not full_y: - left = [x0, y0, x0 + width - 1, y1] - if corners[0]: - left[1] += r + 1 - if corners[3]: - left[3] -= r + 1 - self.draw.draw_rectangle(left, ink, 1) - - right = [x1 - width + 1, y0, x1, y1] - if corners[1]: - right[1] += r + 1 - if corners[2]: - right[3] -= r + 1 - self.draw.draw_rectangle(right, ink, 1) - - def _multiline_check(self, text): - split_character = "\n" if isinstance(text, str) else b"\n" - - return split_character in text - - def _multiline_split(self, text): - split_character = "\n" if isinstance(text, str) else b"\n" - - return text.split(split_character) - - def _multiline_spacing(self, font, spacing, stroke_width): - # this can be replaced with self.textbbox(...)[3] when textsize is removed - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) - return ( - self.textsize( - "A", - font=font, - stroke_width=stroke_width, - )[1] - + spacing - ) - - def text( - self, - xy, - text, - fill=None, - font=None, - anchor=None, - spacing=4, - align="left", - direction=None, - features=None, - language=None, - stroke_width=0, - stroke_fill=None, - embedded_color=False, - *args, - **kwargs, - ): - """Draw text.""" - if self._multiline_check(text): - return self.multiline_text( - xy, - text, - fill, - font, - anchor, - spacing, - align, - direction, - features, - language, - stroke_width, - stroke_fill, - embedded_color, - ) - - if embedded_color and self.mode not in ("RGB", "RGBA"): - msg = "Embedded color supported only in RGB and RGBA modes" - raise ValueError(msg) - - if font is None: - font = self.getfont() - - def getink(fill): - ink, fill = self._getink(fill) - if ink is None: - return fill - return ink - - def draw_text(ink, stroke_width=0, stroke_offset=None): - mode = self.fontmode - if stroke_width == 0 and embedded_color: - mode = "RGBA" - coord = [] - start = [] - for i in range(2): - coord.append(int(xy[i])) - start.append(math.modf(xy[i])[0]) - try: - mask, offset = font.getmask2( - text, - mode, - direction=direction, - features=features, - language=language, - stroke_width=stroke_width, - anchor=anchor, - ink=ink, - start=start, - *args, - **kwargs, - ) - coord = coord[0] + offset[0], coord[1] + offset[1] - except AttributeError: - try: - mask = font.getmask( - text, - mode, - direction, - features, - language, - stroke_width, - anchor, - ink, - start=start, - *args, - **kwargs, - ) - except TypeError: - mask = font.getmask(text) - if stroke_offset: - coord = coord[0] + stroke_offset[0], coord[1] + stroke_offset[1] - if mode == "RGBA": - # font.getmask2(mode="RGBA") returns color in RGB bands and mask in A - # extract mask and set text alpha - color, mask = mask, mask.getband(3) - color.fillband(3, (ink >> 24) & 0xFF) - x, y = coord - self.im.paste(color, (x, y, x + mask.size[0], y + mask.size[1]), mask) - else: - self.draw.draw_bitmap(coord, mask, ink) - - ink = getink(fill) - if ink is not None: - stroke_ink = None - if stroke_width: - stroke_ink = getink(stroke_fill) if stroke_fill is not None else ink - - if stroke_ink is not None: - # Draw stroked text - draw_text(stroke_ink, stroke_width) - - # Draw normal text - draw_text(ink, 0) - else: - # Only draw normal text - draw_text(ink) - - def multiline_text( - self, - xy, - text, - fill=None, - font=None, - anchor=None, - spacing=4, - align="left", - direction=None, - features=None, - language=None, - stroke_width=0, - stroke_fill=None, - embedded_color=False, - ): - if direction == "ttb": - msg = "ttb direction is unsupported for multiline text" - raise ValueError(msg) - - if anchor is None: - anchor = "la" - elif len(anchor) != 2: - msg = "anchor must be a 2 character string" - raise ValueError(msg) - elif anchor[1] in "tb": - msg = "anchor not supported for multiline text" - raise ValueError(msg) - - widths = [] - max_width = 0 - lines = self._multiline_split(text) - line_spacing = self._multiline_spacing(font, spacing, stroke_width) - for line in lines: - line_width = self.textlength( - line, font, direction=direction, features=features, language=language - ) - widths.append(line_width) - max_width = max(max_width, line_width) - - top = xy[1] - if anchor[1] == "m": - top -= (len(lines) - 1) * line_spacing / 2.0 - elif anchor[1] == "d": - top -= (len(lines) - 1) * line_spacing - - for idx, line in enumerate(lines): - left = xy[0] - width_difference = max_width - widths[idx] - - # first align left by anchor - if anchor[0] == "m": - left -= width_difference / 2.0 - elif anchor[0] == "r": - left -= width_difference - - # then align by align parameter - if align == "left": - pass - elif align == "center": - left += width_difference / 2.0 - elif align == "right": - left += width_difference - else: - msg = 'align must be "left", "center" or "right"' - raise ValueError(msg) - - self.text( - (left, top), - line, - fill, - font, - anchor, - direction=direction, - features=features, - language=language, - stroke_width=stroke_width, - stroke_fill=stroke_fill, - embedded_color=embedded_color, - ) - top += line_spacing - - def textsize( - self, - text, - font=None, - spacing=4, - direction=None, - features=None, - language=None, - stroke_width=0, - ): - """Get the size of a given string, in pixels.""" - deprecate("textsize", 10, "textbbox or textlength") - if self._multiline_check(text): - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) - return self.multiline_textsize( - text, - font, - spacing, - direction, - features, - language, - stroke_width, - ) - - if font is None: - font = self.getfont() - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) - return font.getsize( - text, - direction, - features, - language, - stroke_width, - ) - - def multiline_textsize( - self, - text, - font=None, - spacing=4, - direction=None, - features=None, - language=None, - stroke_width=0, - ): - deprecate("multiline_textsize", 10, "multiline_textbbox") - max_width = 0 - lines = self._multiline_split(text) - line_spacing = self._multiline_spacing(font, spacing, stroke_width) - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) - for line in lines: - line_width, line_height = self.textsize( - line, - font, - spacing, - direction, - features, - language, - stroke_width, - ) - max_width = max(max_width, line_width) - return max_width, len(lines) * line_spacing - spacing - - def textlength( - self, - text, - font=None, - direction=None, - features=None, - language=None, - embedded_color=False, - ): - """Get the length of a given string, in pixels with 1/64 precision.""" - if self._multiline_check(text): - msg = "can't measure length of multiline text" - raise ValueError(msg) - if embedded_color and self.mode not in ("RGB", "RGBA"): - msg = "Embedded color supported only in RGB and RGBA modes" - raise ValueError(msg) - - if font is None: - font = self.getfont() - mode = "RGBA" if embedded_color else self.fontmode - try: - return font.getlength(text, mode, direction, features, language) - except AttributeError: - deprecate("textlength support for fonts without getlength", 10) - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) - size = self.textsize( - text, - font, - direction=direction, - features=features, - language=language, - ) - if direction == "ttb": - return size[1] - return size[0] - - def textbbox( - self, - xy, - text, - font=None, - anchor=None, - spacing=4, - align="left", - direction=None, - features=None, - language=None, - stroke_width=0, - embedded_color=False, - ): - """Get the bounding box of a given string, in pixels.""" - if embedded_color and self.mode not in ("RGB", "RGBA"): - msg = "Embedded color supported only in RGB and RGBA modes" - raise ValueError(msg) - - if self._multiline_check(text): - return self.multiline_textbbox( - xy, - text, - font, - anchor, - spacing, - align, - direction, - features, - language, - stroke_width, - embedded_color, - ) - - if font is None: - font = self.getfont() - mode = "RGBA" if embedded_color else self.fontmode - bbox = font.getbbox( - text, mode, direction, features, language, stroke_width, anchor - ) - return bbox[0] + xy[0], bbox[1] + xy[1], bbox[2] + xy[0], bbox[3] + xy[1] - - def multiline_textbbox( - self, - xy, - text, - font=None, - anchor=None, - spacing=4, - align="left", - direction=None, - features=None, - language=None, - stroke_width=0, - embedded_color=False, - ): - if direction == "ttb": - msg = "ttb direction is unsupported for multiline text" - raise ValueError(msg) - - if anchor is None: - anchor = "la" - elif len(anchor) != 2: - msg = "anchor must be a 2 character string" - raise ValueError(msg) - elif anchor[1] in "tb": - msg = "anchor not supported for multiline text" - raise ValueError(msg) - - widths = [] - max_width = 0 - lines = self._multiline_split(text) - line_spacing = self._multiline_spacing(font, spacing, stroke_width) - for line in lines: - line_width = self.textlength( - line, - font, - direction=direction, - features=features, - language=language, - embedded_color=embedded_color, - ) - widths.append(line_width) - max_width = max(max_width, line_width) - - top = xy[1] - if anchor[1] == "m": - top -= (len(lines) - 1) * line_spacing / 2.0 - elif anchor[1] == "d": - top -= (len(lines) - 1) * line_spacing - - bbox = None - - for idx, line in enumerate(lines): - left = xy[0] - width_difference = max_width - widths[idx] - - # first align left by anchor - if anchor[0] == "m": - left -= width_difference / 2.0 - elif anchor[0] == "r": - left -= width_difference - - # then align by align parameter - if align == "left": - pass - elif align == "center": - left += width_difference / 2.0 - elif align == "right": - left += width_difference - else: - msg = 'align must be "left", "center" or "right"' - raise ValueError(msg) - - bbox_line = self.textbbox( - (left, top), - line, - font, - anchor, - direction=direction, - features=features, - language=language, - stroke_width=stroke_width, - embedded_color=embedded_color, - ) - if bbox is None: - bbox = bbox_line - else: - bbox = ( - min(bbox[0], bbox_line[0]), - min(bbox[1], bbox_line[1]), - max(bbox[2], bbox_line[2]), - max(bbox[3], bbox_line[3]), - ) - - top += line_spacing - - if bbox is None: - return xy[0], xy[1], xy[0], xy[1] - return bbox - - -def Draw(im, mode=None): - """ - A simple 2D drawing interface for PIL images. - - :param im: The image to draw in. - :param mode: Optional mode to use for color values. For RGB - images, this argument can be RGB or RGBA (to blend the - drawing into the image). For all other modes, this argument - must be the same as the image mode. If omitted, the mode - defaults to the mode of the image. - """ - try: - return im.getdraw(mode) - except AttributeError: - return ImageDraw(im, mode) - - -# experimental access to the outline API -try: - Outline = Image.core.outline -except AttributeError: - Outline = None - - -def getdraw(im=None, hints=None): - """ - (Experimental) A more advanced 2D drawing interface for PIL images, - based on the WCK interface. - - :param im: The image to draw in. - :param hints: An optional list of hints. - :returns: A (drawing context, drawing resource factory) tuple. - """ - # FIXME: this needs more work! - # FIXME: come up with a better 'hints' scheme. - handler = None - if not hints or "nicest" in hints: - try: - from . import _imagingagg as handler - except ImportError: - pass - if handler is None: - from . import ImageDraw2 as handler - if im: - im = handler.Draw(im) - return im, handler - - -def floodfill(image, xy, value, border=None, thresh=0): - """ - (experimental) Fills a bounded region with a given color. - - :param image: Target image. - :param xy: Seed position (a 2-item coordinate tuple). See - :ref:`coordinate-system`. - :param value: Fill color. - :param border: Optional border value. If given, the region consists of - pixels with a color different from the border color. If not given, - the region consists of pixels having the same color as the seed - pixel. - :param thresh: Optional threshold value which specifies a maximum - tolerable difference of a pixel value from the 'background' in - order for it to be replaced. Useful for filling regions of - non-homogeneous, but similar, colors. - """ - # based on an implementation by Eric S. Raymond - # amended by yo1995 @20180806 - pixel = image.load() - x, y = xy - try: - background = pixel[x, y] - if _color_diff(value, background) <= thresh: - return # seed point already has fill color - pixel[x, y] = value - except (ValueError, IndexError): - return # seed point outside image - edge = {(x, y)} - # use a set to keep record of current and previous edge pixels - # to reduce memory consumption - full_edge = set() - while edge: - new_edge = set() - for x, y in edge: # 4 adjacent method - for s, t in ((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)): - # If already processed, or if a coordinate is negative, skip - if (s, t) in full_edge or s < 0 or t < 0: - continue - try: - p = pixel[s, t] - except (ValueError, IndexError): - pass - else: - full_edge.add((s, t)) - if border is None: - fill = _color_diff(p, background) <= thresh - else: - fill = p != value and p != border - if fill: - pixel[s, t] = value - new_edge.add((s, t)) - full_edge = edge # discard pixels processed - edge = new_edge - - -def _compute_regular_polygon_vertices(bounding_circle, n_sides, rotation): - """ - Generate a list of vertices for a 2D regular polygon. - - :param bounding_circle: The bounding circle is a tuple defined - by a point and radius. The polygon is inscribed in this circle. - (e.g. ``bounding_circle=(x, y, r)`` or ``((x, y), r)``) - :param n_sides: Number of sides - (e.g. ``n_sides=3`` for a triangle, ``6`` for a hexagon) - :param rotation: Apply an arbitrary rotation to the polygon - (e.g. ``rotation=90``, applies a 90 degree rotation) - :return: List of regular polygon vertices - (e.g. ``[(25, 50), (50, 50), (50, 25), (25, 25)]``) - - How are the vertices computed? - 1. Compute the following variables - - theta: Angle between the apothem & the nearest polygon vertex - - side_length: Length of each polygon edge - - centroid: Center of bounding circle (1st, 2nd elements of bounding_circle) - - polygon_radius: Polygon radius (last element of bounding_circle) - - angles: Location of each polygon vertex in polar grid - (e.g. A square with 0 degree rotation => [225.0, 315.0, 45.0, 135.0]) - - 2. For each angle in angles, get the polygon vertex at that angle - The vertex is computed using the equation below. - X= xcos(φ) + ysin(φ) - Y= −xsin(φ) + ycos(φ) - - Note: - φ = angle in degrees - x = 0 - y = polygon_radius - - The formula above assumes rotation around the origin. - In our case, we are rotating around the centroid. - To account for this, we use the formula below - X = xcos(φ) + ysin(φ) + centroid_x - Y = −xsin(φ) + ycos(φ) + centroid_y - """ - # 1. Error Handling - # 1.1 Check `n_sides` has an appropriate value - if not isinstance(n_sides, int): - msg = "n_sides should be an int" - raise TypeError(msg) - if n_sides < 3: - msg = "n_sides should be an int > 2" - raise ValueError(msg) - - # 1.2 Check `bounding_circle` has an appropriate value - if not isinstance(bounding_circle, (list, tuple)): - msg = "bounding_circle should be a tuple" - raise TypeError(msg) - - if len(bounding_circle) == 3: - *centroid, polygon_radius = bounding_circle - elif len(bounding_circle) == 2: - centroid, polygon_radius = bounding_circle - else: - msg = ( - "bounding_circle should contain 2D coordinates " - "and a radius (e.g. (x, y, r) or ((x, y), r) )" - ) - raise ValueError(msg) - - if not all(isinstance(i, (int, float)) for i in (*centroid, polygon_radius)): - msg = "bounding_circle should only contain numeric data" - raise ValueError(msg) - - if not len(centroid) == 2: - msg = "bounding_circle centre should contain 2D coordinates (e.g. (x, y))" - raise ValueError(msg) - - if polygon_radius <= 0: - msg = "bounding_circle radius should be > 0" - raise ValueError(msg) - - # 1.3 Check `rotation` has an appropriate value - if not isinstance(rotation, (int, float)): - msg = "rotation should be an int or float" - raise ValueError(msg) - - # 2. Define Helper Functions - def _apply_rotation(point, degrees, centroid): - return ( - round( - point[0] * math.cos(math.radians(360 - degrees)) - - point[1] * math.sin(math.radians(360 - degrees)) - + centroid[0], - 2, - ), - round( - point[1] * math.cos(math.radians(360 - degrees)) - + point[0] * math.sin(math.radians(360 - degrees)) - + centroid[1], - 2, - ), - ) - - def _compute_polygon_vertex(centroid, polygon_radius, angle): - start_point = [polygon_radius, 0] - return _apply_rotation(start_point, angle, centroid) - - def _get_angles(n_sides, rotation): - angles = [] - degrees = 360 / n_sides - # Start with the bottom left polygon vertex - current_angle = (270 - 0.5 * degrees) + rotation - for _ in range(0, n_sides): - angles.append(current_angle) - current_angle += degrees - if current_angle > 360: - current_angle -= 360 - return angles - - # 3. Variable Declarations - angles = _get_angles(n_sides, rotation) - - # 4. Compute Vertices - return [ - _compute_polygon_vertex(centroid, polygon_radius, angle) for angle in angles - ] - - -def _color_diff(color1, color2): - """ - Uses 1-norm distance to calculate difference between two values. - """ - if isinstance(color2, tuple): - return sum(abs(color1[i] - color2[i]) for i in range(0, len(color2))) - else: - return abs(color1 - color2) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/certifi/core.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/certifi/core.py deleted file mode 100644 index de028981b97e1fcc8ef4ab2c817cc8731b9c8738..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/certifi/core.py +++ /dev/null @@ -1,108 +0,0 @@ -""" -certifi.py -~~~~~~~~~~ - -This module returns the installation location of cacert.pem or its contents. -""" -import sys - - -if sys.version_info >= (3, 11): - - from importlib.resources import as_file, files - - _CACERT_CTX = None - _CACERT_PATH = None - - def where() -> str: - # This is slightly terrible, but we want to delay extracting the file - # in cases where we're inside of a zipimport situation until someone - # actually calls where(), but we don't want to re-extract the file - # on every call of where(), so we'll do it once then store it in a - # global variable. - global _CACERT_CTX - global _CACERT_PATH - if _CACERT_PATH is None: - # This is slightly janky, the importlib.resources API wants you to - # manage the cleanup of this file, so it doesn't actually return a - # path, it returns a context manager that will give you the path - # when you enter it and will do any cleanup when you leave it. In - # the common case of not needing a temporary file, it will just - # return the file system location and the __exit__() is a no-op. - # - # We also have to hold onto the actual context manager, because - # it will do the cleanup whenever it gets garbage collected, so - # we will also store that at the global level as well. - _CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem")) - _CACERT_PATH = str(_CACERT_CTX.__enter__()) - - return _CACERT_PATH - - def contents() -> str: - return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii") - -elif sys.version_info >= (3, 7): - - from importlib.resources import path as get_path, read_text - - _CACERT_CTX = None - _CACERT_PATH = None - - def where() -> str: - # This is slightly terrible, but we want to delay extracting the - # file in cases where we're inside of a zipimport situation until - # someone actually calls where(), but we don't want to re-extract - # the file on every call of where(), so we'll do it once then store - # it in a global variable. - global _CACERT_CTX - global _CACERT_PATH - if _CACERT_PATH is None: - # This is slightly janky, the importlib.resources API wants you - # to manage the cleanup of this file, so it doesn't actually - # return a path, it returns a context manager that will give - # you the path when you enter it and will do any cleanup when - # you leave it. In the common case of not needing a temporary - # file, it will just return the file system location and the - # __exit__() is a no-op. - # - # We also have to hold onto the actual context manager, because - # it will do the cleanup whenever it gets garbage collected, so - # we will also store that at the global level as well. - _CACERT_CTX = get_path("certifi", "cacert.pem") - _CACERT_PATH = str(_CACERT_CTX.__enter__()) - - return _CACERT_PATH - - def contents() -> str: - return read_text("certifi", "cacert.pem", encoding="ascii") - -else: - import os - import types - from typing import Union - - Package = Union[types.ModuleType, str] - Resource = Union[str, "os.PathLike"] - - # This fallback will work for Python versions prior to 3.7 that lack the - # importlib.resources module but relies on the existing `where` function - # so won't address issues with environments like PyOxidizer that don't set - # __file__ on modules. - def read_text( - package: Package, - resource: Resource, - encoding: str = 'utf-8', - errors: str = 'strict' - ) -> str: - with open(where(), encoding=encoding) as data: - return data.read() - - # If we don't have importlib.resources, then we will just do the old logic - # of assuming we're on the filesystem and munge the path directly. - def where() -> str: - f = os.path.dirname(__file__) - - return os.path.join(f, "cacert.pem") - - def contents() -> str: - return read_text("certifi", "cacert.pem", encoding="ascii") diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/middleware/cors.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/middleware/cors.py deleted file mode 100644 index 8dfaad0dbb3ff5300cccb2023748cd30f54bc920..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/middleware/cors.py +++ /dev/null @@ -1 +0,0 @@ -from starlette.middleware.cors import CORSMiddleware as CORSMiddleware # noqa diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/misc/vector.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/misc/vector.py deleted file mode 100644 index 81c1484188a46b567d8921d925f8a4700f65066f..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/misc/vector.py +++ /dev/null @@ -1,143 +0,0 @@ -from numbers import Number -import math -import operator -import warnings - - -__all__ = ["Vector"] - - -class Vector(tuple): - - """A math-like vector. - - Represents an n-dimensional numeric vector. ``Vector`` objects support - vector addition and subtraction, scalar multiplication and division, - negation, rounding, and comparison tests. - """ - - __slots__ = () - - def __new__(cls, values, keep=False): - if keep is not False: - warnings.warn( - "the 'keep' argument has been deprecated", - DeprecationWarning, - ) - if type(values) == Vector: - # No need to create a new object - return values - return super().__new__(cls, values) - - def __repr__(self): - return f"{self.__class__.__name__}({super().__repr__()})" - - def _vectorOp(self, other, op): - if isinstance(other, Vector): - assert len(self) == len(other) - return self.__class__(op(a, b) for a, b in zip(self, other)) - if isinstance(other, Number): - return self.__class__(op(v, other) for v in self) - raise NotImplementedError() - - def _scalarOp(self, other, op): - if isinstance(other, Number): - return self.__class__(op(v, other) for v in self) - raise NotImplementedError() - - def _unaryOp(self, op): - return self.__class__(op(v) for v in self) - - def __add__(self, other): - return self._vectorOp(other, operator.add) - - __radd__ = __add__ - - def __sub__(self, other): - return self._vectorOp(other, operator.sub) - - def __rsub__(self, other): - return self._vectorOp(other, _operator_rsub) - - def __mul__(self, other): - return self._scalarOp(other, operator.mul) - - __rmul__ = __mul__ - - def __truediv__(self, other): - return self._scalarOp(other, operator.truediv) - - def __rtruediv__(self, other): - return self._scalarOp(other, _operator_rtruediv) - - def __pos__(self): - return self._unaryOp(operator.pos) - - def __neg__(self): - return self._unaryOp(operator.neg) - - def __round__(self, *, round=round): - return self._unaryOp(round) - - def __eq__(self, other): - if isinstance(other, list): - # bw compat Vector([1, 2, 3]) == [1, 2, 3] - other = tuple(other) - return super().__eq__(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __bool__(self): - return any(self) - - __nonzero__ = __bool__ - - def __abs__(self): - return math.sqrt(sum(x * x for x in self)) - - def length(self): - """Return the length of the vector. Equivalent to abs(vector).""" - return abs(self) - - def normalized(self): - """Return the normalized vector of the vector.""" - return self / abs(self) - - def dot(self, other): - """Performs vector dot product, returning the sum of - ``a[0] * b[0], a[1] * b[1], ...``""" - assert len(self) == len(other) - return sum(a * b for a, b in zip(self, other)) - - # Deprecated methods/properties - - def toInt(self): - warnings.warn( - "the 'toInt' method has been deprecated, use round(vector) instead", - DeprecationWarning, - ) - return self.__round__() - - @property - def values(self): - warnings.warn( - "the 'values' attribute has been deprecated, use " - "the vector object itself instead", - DeprecationWarning, - ) - return list(self) - - @values.setter - def values(self, values): - raise AttributeError( - "can't set attribute, the 'values' attribute has been deprecated", - ) - - -def _operator_rsub(a, b): - return operator.sub(b, a) - - -def _operator_rtruediv(a, b): - return operator.truediv(b, a) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-b64f4ef5.js b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-b64f4ef5.js deleted file mode 100644 index 47cc34c075325deb306500f78dbad44e89df6af7..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-b64f4ef5.js +++ /dev/null @@ -1,2 +0,0 @@ -import{E as W,C as Y,L as d}from"./index-0c011c1e.js";import{s as n,t as r,L as R,i as Z,d as a,f as X,a as y,b as f}from"./index-90411bc1.js";import"./index-7c0e54a6.js";import"./Blocks-61158678.js";import"./Button-661a0701.js";import"./BlockLabel-95be8dd1.js";import"./Empty-96265974.js";/* empty css */import"./Copy-c4997e4e.js";import"./Download-e5de98da.js";const l=1,w=189,S=190,b=191,T=192,U=193,m=194,V=22,g=23,h=47,G=48,c=53,u=54,_=55,j=57,E=58,k=59,z=60,v=61,H=63,N=230,A=71,F=255,K=121,C=142,D=143,M=146,i=10,s=13,t=32,o=9,q=35,L=40,B=46,J=new Set([g,h,G,F,H,K,u,_,N,z,v,E,k,A,C,D,M]),OO=new W((O,$)=>{if(O.next<0)O.acceptToken(m);else if(!(O.next!=i&&O.next!=s))if($.context.depth<0)O.acceptToken(T,1);else{O.advance();let Q=0;for(;O.next==t||O.next==o;)O.advance(),Q++;let P=O.next==i||O.next==s||O.next==q;O.acceptToken(P?U:b,-Q)}},{contextual:!0,fallback:!0}),$O=new W((O,$)=>{let Q=$.context.depth;if(Q<0)return;let P=O.peek(-1);if((P==i||P==s)&&$.context.depth>=0){let e=0,x=0;for(;;){if(O.next==t)e++;else if(O.next==o)e+=8-e%8;else break;O.advance(),x++}e!=Q&&O.next!=i&&O.next!=s&&O.next!=q&&(e{for(let $=0;$<5;$++){if(O.next!="print".charCodeAt($))return;O.advance()}if(!/\w/.test(String.fromCharCode(O.next)))for(let $=0;;$++){let Q=O.peek($);if(!(Q==t||Q==o)){Q!=L&&Q!=B&&Q!=i&&Q!=s&&Q!=q&&O.acceptToken(l);return}}}),iO=n({'async "*" "**" FormatConversion FormatSpec':r.modifier,"for while if elif else try except finally return raise break continue with pass assert await yield match case":r.controlKeyword,"in not and or is del":r.operatorKeyword,"from def class global nonlocal lambda":r.definitionKeyword,import:r.moduleKeyword,"with as print":r.keyword,Boolean:r.bool,None:r.null,VariableName:r.variableName,"CallExpression/VariableName":r.function(r.variableName),"FunctionDefinition/VariableName":r.function(r.definition(r.variableName)),"ClassDefinition/VariableName":r.definition(r.className),PropertyName:r.propertyName,"CallExpression/MemberExpression/PropertyName":r.function(r.propertyName),Comment:r.lineComment,Number:r.number,String:r.string,FormatString:r.special(r.string),UpdateOp:r.updateOperator,ArithOp:r.arithmeticOperator,BitOp:r.bitwiseOperator,CompareOp:r.compareOperator,AssignOp:r.definitionOperator,Ellipsis:r.punctuation,At:r.meta,"( )":r.paren,"[ ]":r.squareBracket,"{ }":r.brace,".":r.derefOperator,", ;":r.separator}),sO={__proto__:null,await:40,or:50,and:52,in:56,not:58,is:60,if:66,else:68,lambda:72,yield:90,from:92,async:98,for:100,None:152,True:154,False:154,del:168,pass:172,break:176,continue:180,return:184,raise:192,import:196,as:198,global:202,nonlocal:204,assert:208,elif:218,while:222,try:228,except:230,finally:232,with:236,def:240,class:250,match:261,case:267},oO=d.deserialize({version:14,states:"!L`O`Q$IXOOO%fQ$I[O'#G|OOQ$IS'#Cm'#CmOOQ$IS'#Cn'#CnO'UQ$IWO'#ClO(wQ$I[O'#G{OOQ$IS'#G|'#G|OOQ$IS'#DS'#DSOOQ$IS'#G{'#G{O)eQ$IWO'#CsO)uQ$IWO'#DdO*VQ$IWO'#DhOOQ$IS'#Ds'#DsO*jO`O'#DsO*rOpO'#DsO*zO!bO'#DtO+VO#tO'#DtO+bO&jO'#DtO+mO,UO'#DtO-oQ$I[O'#GmOOQ$IS'#Gm'#GmO'UQ$IWO'#GlO/RQ$I[O'#GlOOQ$IS'#E]'#E]O/jQ$IWO'#E^OOQ$IS'#Gk'#GkO/tQ$IWO'#GjOOQ$IV'#Gj'#GjO0PQ$IWO'#FPOOQ$IS'#GX'#GXO0UQ$IWO'#FOOOQ$IV'#Hx'#HxOOQ$IV'#Gi'#GiOOQ$IT'#Fh'#FhQ`Q$IXOOO'UQ$IWO'#CoO0dQ$IWO'#C{O0kQ$IWO'#DPO0yQ$IWO'#HQO1ZQ$I[O'#EQO'UQ$IWO'#EROOQ$IS'#ET'#ETOOQ$IS'#EV'#EVOOQ$IS'#EX'#EXO1oQ$IWO'#EZO2VQ$IWO'#E_O0PQ$IWO'#EaO2jQ$I[O'#EaO0PQ$IWO'#EdO/jQ$IWO'#EgO/jQ$IWO'#EkO/jQ$IWO'#EnO2uQ$IWO'#EpO2|Q$IWO'#EuO3XQ$IWO'#EqO/jQ$IWO'#EuO0PQ$IWO'#EwO0PQ$IWO'#E|O3^Q$IWO'#FROOQ$IS'#Cc'#CcOOQ$IS'#Cd'#CdOOQ$IS'#Ce'#CeOOQ$IS'#Cf'#CfOOQ$IS'#Cg'#CgOOQ$IS'#Ch'#ChOOQ$IS'#Cj'#CjO'UQ$IWO,58|O'UQ$IWO,58|O'UQ$IWO,58|O'UQ$IWO,58|O'UQ$IWO,58|O'UQ$IWO,58|O3eQ$IWO'#DmOOQ$IS,5:W,5:WO3xQ$IWO'#H[OOQ$IS,5:Z,5:ZO4VQ%1`O,5:ZO4[Q$I[O,59WO0dQ$IWO,59`O0dQ$IWO,59`O0dQ$IWO,59`O6zQ$IWO,59`O7PQ$IWO,59`O7WQ$IWO,59hO7_Q$IWO'#G{O8eQ$IWO'#GzOOQ$IS'#Gz'#GzOOQ$IS'#DY'#DYO8|Q$IWO,59_O'UQ$IWO,59_O9[Q$IWO,59_O9aQ$IWO,5:PO'UQ$IWO,5:POOQ$IS,5:O,5:OO9oQ$IWO,5:OO9tQ$IWO,5:VO'UQ$IWO,5:VO'UQ$IWO,5:TOOQ$IS,5:S,5:SO:VQ$IWO,5:SO:[Q$IWO,5:UOOOO'#Fp'#FpO:aO`O,5:_OOQ$IS,5:_,5:_OOOO'#Fq'#FqO:iOpO,5:_O:qQ$IWO'#DuOOOO'#Fr'#FrO;RO!bO,5:`OOQ$IS,5:`,5:`OOOO'#Fu'#FuO;^O#tO,5:`OOOO'#Fv'#FvO;iO&jO,5:`OOOO'#Fw'#FwO;tO,UO,5:`OOQ$IS'#Fx'#FxOqQ$I[O,5=WO?[Q%GlO,5=WO?{Q$I[O,5=WOOQ$IS,5:x,5:xO@dQ$IXO'#GQOAsQ$IWO,5;TOOQ$IV,5=U,5=UOBOQ$I[O'#HtOBgQ$IWO,5;kOOQ$IS-E:V-E:VOOQ$IV,5;j,5;jO3SQ$IWO'#EwOOQ$IT-E9f-E9fOBoQ$I[O,59ZODvQ$I[O,59gOEaQ$IWO'#G}OElQ$IWO'#G}O0PQ$IWO'#G}OEwQ$IWO'#DROFPQ$IWO,59kOFUQ$IWO'#HRO'UQ$IWO'#HRO/jQ$IWO,5=lOOQ$IS,5=l,5=lO/jQ$IWO'#D|OOQ$IS'#D}'#D}OFsQ$IWO'#FzOGTQ$IWO,58zOGTQ$IWO,58zO)hQ$IWO,5:jOGcQ$I[O'#HTOOQ$IS,5:m,5:mOOQ$IS,5:u,5:uOGvQ$IWO,5:yOHXQ$IWO,5:{OOQ$IS'#F}'#F}OHgQ$I[O,5:{OHuQ$IWO,5:{OHzQ$IWO'#HwOOQ$IS,5;O,5;OOIYQ$IWO'#HsOOQ$IS,5;R,5;RO3XQ$IWO,5;VO3XQ$IWO,5;YOIkQ$I[O'#HyO'UQ$IWO'#HyOIuQ$IWO,5;[O2uQ$IWO,5;[O/jQ$IWO,5;aO0PQ$IWO,5;cOIzQ$IXO'#ElOKTQ$IZO,5;]ONiQ$IWO'#HzO3XQ$IWO,5;aONtQ$IWO,5;cONyQ$IWO,5;hO! RQ$I[O,5;mO'UQ$IWO,5;mO!#uQ$I[O1G.hO!#|Q$I[O1G.hO!&mQ$I[O1G.hO!&wQ$I[O1G.hO!)bQ$I[O1G.hO!)uQ$I[O1G.hO!*YQ$IWO'#HZO!*hQ$I[O'#GmO/jQ$IWO'#HZO!*rQ$IWO'#HYOOQ$IS,5:X,5:XO!*zQ$IWO,5:XO!+PQ$IWO'#H]O!+[Q$IWO'#H]O!+oQ$IWO,5=vOOQ$IS'#Dq'#DqOOQ$IS1G/u1G/uOOQ$IS1G.z1G.zO!,oQ$I[O1G.zO!,vQ$I[O1G.zO0dQ$IWO1G.zO!-cQ$IWO1G/SOOQ$IS'#DX'#DXO/jQ$IWO,59rOOQ$IS1G.y1G.yO!-jQ$IWO1G/cO!-zQ$IWO1G/cO!.SQ$IWO1G/dO'UQ$IWO'#HSO!.XQ$IWO'#HSO!.^Q$I[O1G.yO!.nQ$IWO,59gO!/tQ$IWO,5=rO!0UQ$IWO,5=rO!0^Q$IWO1G/kO!0cQ$I[O1G/kOOQ$IS1G/j1G/jO!0sQ$IWO,5=mO!1jQ$IWO,5=mO/jQ$IWO1G/oO!2XQ$IWO1G/qO!2^Q$I[O1G/qO!2nQ$I[O1G/oOOQ$IS1G/n1G/nOOQ$IS1G/p1G/pOOOO-E9n-E9nOOQ$IS1G/y1G/yOOOO-E9o-E9oO!3OQ$IWO'#HhO/jQ$IWO'#HhO!3^Q$IWO,5:aOOOO-E9p-E9pOOQ$IS1G/z1G/zOOOO-E9s-E9sOOOO-E9t-E9tOOOO-E9u-E9uOOQ$IS-E9v-E9vO!3iQ%GlO1G2rO!4YQ$I[O1G2rO'UQ$IWO,5`OOQ$IS1G1V1G1VO!5YQ$IWO1G1VOOQ$IS'#DT'#DTO/jQ$IWO,5=iOOQ$IS,5=i,5=iO!5_Q$IWO'#FiO!5jQ$IWO,59mO!5rQ$IWO1G/VO!5|Q$I[O,5=mOOQ$IS1G3W1G3WOOQ$IS,5:h,5:hO!6mQ$IWO'#GlOOQ$IS,5cO!8oQ$IWO,5>cO!8}Q$IWO,5>_O!9eQ$IWO,5>_O!9vQ$IZO1G0qO!=XQ$IZO1G0tO!@gQ$IWO,5>eO!@qQ$IWO,5>eO!@yQ$I[O,5>eO/jQ$IWO1G0vO!ATQ$IWO1G0vO3XQ$IWO1G0{ONtQ$IWO1G0}OOQ$IV,5;W,5;WO!AYQ$IYO,5;WO!A_Q$IZO1G0wO!DsQ$IWO'#GUO3XQ$IWO1G0wO3XQ$IWO1G0wO!EQQ$IWO,5>fO!E_Q$IWO,5>fO0PQ$IWO,5>fOOQ$IV1G0{1G0{O!EgQ$IWO'#EyO!ExQ%1`O1G0}OOQ$IV1G1S1G1SO3XQ$IWO1G1SO!FQQ$IWO'#FTOOQ$IV1G1X1G1XO! RQ$I[O1G1XOOQ$IS,5=u,5=uOOQ$IS'#Dn'#DnO/jQ$IWO,5=uO!FVQ$IWO,5=tO!FjQ$IWO,5=tOOQ$IS1G/s1G/sO!FrQ$IWO,5=wO!GSQ$IWO,5=wO!G[Q$IWO,5=wO!GoQ$IWO,5=wO!HPQ$IWO,5=wOOQ$IS1G3b1G3bOOQ$IS7+$f7+$fO!5rQ$IWO7+$nO!IrQ$IWO1G.zO!IyQ$IWO1G.zOOQ$IS1G/^1G/^OOQ$IS,5SO!NaQ$IWO,5>SO!NaQ$IWO,5>SO!NoO!LQO'#DwO!NzOSO'#HiOOOO1G/{1G/{O# PQ$IWO1G/{O# XQ%GlO7+(^O# xQ$I[O1G2PP#!cQ$IWO'#FyOOQ$IS,5T,5>TOOOO7+%g7+%gO#8UQ$IWO1G2rO#8oQ$IWO1G2rP'UQ$IWO'#FlO/jQ$IWO<bO#9cQ$IWO,5>bO0PQ$IWO,5>bO#9tQ$IWO,5>aOOQ$IS<hO#CeQ$IWO,5>hOOQ$IS,5>h,5>hO#CpQ$IWO,5>gO#DRQ$IWO,5>gOOQ$IS1G1P1G1POOQ$IS,5;g,5;gO#DZQ$IWO1G1ZP#D`Q$IWO'#FnO#DpQ$IWO1G1uO#ETQ$IWO1G1uO#EeQ$IWO1G1uP#EpQ$IWO'#FoO#E}Q$IWO7+(}O#F_Q$IWO7+(}O#F_Q$IWO7+(}O#FgQ$IWO7+(}O#FwQ$IWO7+(tO7WQ$IWO7+(tOOQ$ISAN>TAN>TO#GbQ$IWO<aAN>aO/jQ$IWO1G1sO#GrQ$I[O1G1sP#G|Q$IWO'#FmOOQ$IS1G1y1G1yP#HZQ$IWO'#FsO#HhQ$IWO7+)YOOOO-E9r-E9rO#IOQ$IWO7+(^OOQ$ISAN?VAN?VO#IiQ$IWO,5jO$,bQ$IWO,5>jO0PQ$IWO,5;vO$,sQ$IWO,5;zO$,xQ$IWO,5;zO#NzQ$IWO'#IQO$,}Q$IWO'#IQO$-SQ$IWO,5;{OOQ$IS,5;|,5;|O'UQ$IWO'#FgOOQ$IU1G1[1G1[O3XQ$IWO1G1[OOQ$ISAN@gAN@gO$-XQ$IWOG27oO$-iQ$IWO,59{OOQ$IS1G3[1G3[OOQ$IS,5lO#NzQ$IWO,5>lOOQ$IS1G1g1G1gO$0YQ$I[O,5mO$0hQ$IWO,5>mOOQ$IS1G1j1G1jOOQ$IS7+&y7+&yP#NzQ$IWO'#G_O$0pQ$IWO1G4WO$0zQ$IWO1G4WO$1SQ$IWO1G4WOOQ$IS7+%R7+%RO$1bQ$IWO1G1kO$1pQ$I[O'#FWO$1wQ$IWO,5m'PP>pP>vByFcPFw'PPPPF{GR&wP&w&wP&wP&wP&wP&wP&w&w&wP&wPP&wPP&wPGXPG`GfPG`PG`G`PPPG`PIePInItIzIePG`JQPG`PJXJ_PJcJwKfLPJcJcLVLdJcJcJcJcLxMOMRMWMZMaMgMsNVN]NgNm! Z! a! g! m! w! }!!T!!Z!!a!!g!!y!#T!#Z!#a!#g!#q!#w!#}!$T!$Z!$e!$k!$u!${!%U!%[!%k!%s!%}!&UPPPPPPPPP!&[!&d!&m!&w!'SPPPPPPPPPPPP!+r!,[!0j!3vPP!4O!4^!4g!5]!5S!5f!5l!5o!5r!5u!5}!6nPPPPPPPPPP!6q!6tPPPPPPPPP!6z!7W!7d!7j!7s!7v!7|!8S!8Y!8]P!8e!8n!9j!9m]iOr#n$n)c+c'udOSXYZehrstvx|}!R!S!T!U!X![!d!e!f!g!h!i!j!l!p!q!r!t!u!{#O#S#T#^#k#n$P$Q$S$U$X$i$k$l$n$u%O%T%[%_%a%d%h%m%o%y&R&T&`&d&m&o&p&w&{'O'V'Y'g'h'k'm'n'r'w'y'}(R(W(X(_(b(i(k(s(v)S)V)Z)[)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*l*p*q*x*z*{+S+[+]+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.^.b.y/i/j/k/l/n/o/p/q/r/t/x}!dP#j#w$Y$h$t%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!P!eP#j#w$Y$h$t$v%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!R!fP#j#w$Y$h$t$v$w%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!T!gP#j#w$Y$h$t$v$w$x%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!V!hP#j#w$Y$h$t$v$w$x$y%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!X!iP#j#w$Y$h$t$v$w$x$y$z%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!]!iP!o#j#w$Y$h$t$v$w$x$y$z${%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m'uSOSXYZehrstvx|}!R!S!T!U!X![!d!e!f!g!h!i!j!l!p!q!r!t!u!{#O#S#T#^#k#n$P$Q$S$U$X$i$k$l$n$u%O%T%[%_%a%d%h%m%o%y&R&T&`&d&m&o&p&w&{'O'V'Y'g'h'k'm'n'r'w'y'}(R(W(X(_(b(i(k(s(v)S)V)Z)[)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*l*p*q*x*z*{+S+[+]+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.^.b.y/i/j/k/l/n/o/p/q/r/t/x&ZUOXYZhrtv|}!R!S!T!X!j!l!p!q!r!t!u#^#k#n$Q$S$U$X$l$n%O%T%[%_%a%h%m%o%y&R&`&d&o&p&w'O'V'Y'g'h'k'm'n'r'y(R(X(_(b(i(k(s)S)V)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*p*q*x*{+S+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.b.y/i/j/k/l/n/o/p/q/t/x%eWOXYZhrv|}!R!S!T!X!j!l#^#k#n$Q$S$U$X$l$n%O%T%_%a%h%m%o%y&R&`&d&o&p&w'O'V'Y'g'h'k'm'n'r'y(R(X(_(b(i(k(s)S)V)`)c)l)v*O*R*S*V*]*`*b*e*f*i*p*q*x*{+S+c+j+k+n+v+w+x+z+{,O,S,U,W,Y,Z,],o,q,x,{-n-o.b/o/p/qQ#}uQ.c-sR/u/w'ldOSXYZehrstvx|}!R!S!T!U!X![!d!e!f!g!h!i!l!p!q!r!t!u!{#O#S#T#^#k#n$P$Q$S$U$X$i$k$l$n$u%O%T%[%_%a%d%h%m%o%y&R&T&`&d&m&o&p&w&{'O'V'Y'g'k'm'n'r'w'y'}(R(W(X(_(b(i(k(s(v)S)V)Z)[)`)c)l)v*R*S*V*]*^*`*b*e*f*i*l*p*q*x*z*{+S+[+]+c+j+k+n+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.^.b.y/i/j/k/l/n/o/p/q/r/t/xW#ql!O!P$`W#yu&b-s/wQ$b!QQ$r!YQ$s!ZW$}!j'h*O+vS&a#z#{Q'R$mQ(l&ZQ(z&qU({&s(|(}U)O&u)P+RQ)n'[W)o'^+q,s-]S+p)p)qY,_*|,`-T-U-wQ,b+OQ,l+gQ,n+il-`,w-f-g-i.R.T.Y.p.u.z/P/[/a/dQ-v-SQ.Z-hQ.g-{Q.r.VU/V.{/Y/bX/]/Q/^/e/fR&`#yi!xXY!S!T%a%h'y(R)V*]*`*bR%_!wQ!|XQ%z#^Q&i$UR&l$XT-r-O.y![!kP!o#j#w$Y$h$t$v$w$x$y$z${%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/mQ&^#rR'a$sR'g$}Q%W!nR.e-y'tcOSXYZehrstvx|}!R!S!T!U!X![!d!e!f!g!h!i!j!l!p!q!r!t!u!{#O#S#T#^#k#n$P$Q$S$U$X$i$k$l$n$u%O%T%[%_%a%d%h%m%o%y&R&T&`&d&m&o&p&w&{'O'V'Y'g'h'k'm'n'r'w'y'}(R(W(X(_(b(i(k(s(v)S)V)Z)[)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*l*p*q*x*z*{+S+[+]+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.^.b.y/i/j/k/l/n/o/p/q/r/t/xS#hc#i!P-d,w-f-g-h-i-{.R.T.Y.p.u.z.{/P/Q/Y/[/^/a/b/d/e/f'tcOSXYZehrstvx|}!R!S!T!U!X![!d!e!f!g!h!i!j!l!p!q!r!t!u!{#O#S#T#^#k#n$P$Q$S$U$X$i$k$l$n$u%O%T%[%_%a%d%h%m%o%y&R&T&`&d&m&o&p&w&{'O'V'Y'g'h'k'm'n'r'w'y'}(R(W(X(_(b(i(k(s(v)S)V)Z)[)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*l*p*q*x*z*{+S+[+]+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.^.b.y/i/j/k/l/n/o/p/q/r/t/xT#hc#iS#__#`S#b`#cS#da#eS#fb#gT*t(e*uT(f%z(hQ$WwR+o)oX$Uw$V$W&kZkOr$n)c+cXoOr)c+cQ$o!WQ&y$fQ&z$gQ']$qQ'`$sQ)a'QQ)g'VQ)i'WQ)j'XQ)w'_Q)y'aQ+V)VQ+X)WQ+Y)XQ+^)_S+`)b)xQ+d)eQ+e)fQ+f)hQ,d+UQ,e+WQ,g+_Q,h+aQ,m+hQ-W,fQ-Y,kQ-Z,lQ-x-XQ._-lR.x.`WoOr)c+cR#tnQ'_$rR)b'RQ+n)oR,q+oQ)x'_R+a)bZmOnr)c+cQ'c$tR){'dT,u+u,vu-k,w-f-g-i-{.R.T.Y.p.u.z.{/P/Y/[/a/b/dt-k,w-f-g-i-{.R.T.Y.p.u.z.{/P/Y/[/a/b/dQ.Z-hX/]/Q/^/e/f!P-c,w-f-g-h-i-{.R.T.Y.p.u.z.{/P/Q/Y/[/^/a/b/d/e/fQ.O-bR.l.Pg.R-e.S.h.o.t/S/U/W/c/g/hu-j,w-f-g-i-{.R.T.Y.p.u.z.{/P/Y/[/a/b/dX-|-`-j.g/VR.i-{V/X.{/Y/bR.`-lQrOR#vrQ&c#|R(q&cS%n#R$OS(Y%n(]T(]%q&eQ%b!zQ%i!}W'z%b%i(P(TQ(P%fR(T%kQ&n$YR(w&nQ(`%rQ*g(ZT*m(`*gQ'i%PR*P'iS'l%S%TY*T'l*U+|,|-pU*U'm'n'oU+|*V*W*XS,|+},OR-p,}Q#Y]R%u#YQ#]^R%w#]Q#`_R%{#`Q(c%xS*r(c*sR*s(dQ*u(eR,[*uQ#c`R%}#cQ#eaR&O#eQ#gbR&P#gQ#icR&Q#iQ#lfQ&S#jW&V#l&S(t*yQ(t&hR*y/mQ$VwS&j$V&kR&k$WQ&x$dR)T&xQ&[#qR(m&[Q$`!PR&r$`Q*}({S,a*}-VR-V,bQ&v$bR)Q&vQ#ojR&X#oQ+c)cR,i+cQ)U&yR+T)UQ&|$hS)]&|)^R)^&}Q'U$oR)d'UQ'Z$pS)m'Z+lR+l)nQ+r)sR,t+rWnOr)c+cR#snQ,v+uR-^,vd.S-e.h.o.t/S/U/W/c/g/hR.n.SU-z-`.g/VR.f-zQ/R.tS/_/R/`R/`/SS.|.h.iR/Z.|Q.U-eR.q.USqOrT+b)c+cWpOr)c+cR'S$nYjOr$n)c+cR&W#n[wOr#n$n)c+cR&i$U&YPOXYZhrtv|}!R!S!T!X!j!l!p!q!r!t!u#^#k#n$Q$S$U$X$l$n%O%T%[%_%a%h%m%o%y&R&`&d&o&p&w'O'V'Y'g'h'k'm'n'r'y(R(X(_(b(i(k(s)S)V)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*p*q*x*{+S+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.b.y/i/j/k/l/n/o/p/q/t/xQ!oSQ#jeQ#wsU$Yx%d'}S$h!U$kQ$t![Q$v!dQ$w!eQ$x!fQ$y!gQ$z!hQ${!iQ%f!{Q%k#OQ%q#SQ%r#TQ&e$PQ&}$iQ'd$uQ(j&TU(u&m(v*zW)Y&{)[+[+]Q*Z'wQ*d(WQ+Z)ZQ,V*lQ.w.^R/m/rQ!zXQ!}YQ$f!SQ$g!T^'v%a%h'y(R*]*`*bR+W)V[fOr#n$n)c+ch!wXY!S!T%a%h'y(R)V*]*`*bQ#RZQ#mhS$Ov|Q$]}W$d!R$X'O)`S$p!X$lW$|!j'h*O+vQ%S!lQ%x#^`&U#k&R(i(k(s*x,]/qQ&f$QQ&g$SQ&h$UQ'e%OQ'o%TQ'u%_W(V%m(X*e*iQ(Z%oQ(d%yQ(o&`S(r&d/oQ(x&oQ(y&pU)R&w)S+SQ)h'VY)k'Y)l+j+k,oQ)|'g^*Q'k*S+z+{,{-o.bQ*W'mQ*X'nS*Y'r/pW*k(_*f,S,WW*o(b*q,Y,ZQ+t)vQ+y*RQ+}*VQ,X*pQ,^*{Q,p+nQ,y+wQ,z+xQ,},OQ-R,UQ-[,qQ-m,xR.a-nhTOr#k#n$n&R&d'r(i(k)c+c$z!vXYZhv|}!R!S!T!X!j!l#^$Q$S$U$X$l%O%T%_%a%h%m%o%y&`&o&p&w'O'V'Y'g'h'k'm'n'y(R(X(_(b(s)S)V)`)l)v*O*R*S*V*]*`*b*e*f*i*p*q*x*{+S+j+k+n+v+w+x+z+{,O,S,U,W,Y,Z,],o,q,x,{-n-o.b/o/p/qQ#xtW%X!p!t/j/tQ%Y!qQ%Z!rQ%]!uQ%g/iS'q%[/nQ's/kQ't/lQ,P*^Q-Q,QS-q-O.yR/v/xU#|u-s/wR(p&b[gOr#n$n)c+cX!yX#^$U$XQ#WZQ$RvR$[|Q%c!zQ%j!}Q%p#RQ'e$|Q(Q%fQ(U%kQ(^%qQ(a%rQ*h(ZQ-P,PQ-u-QR.d-tQ$ZxQ'|%dR*_'}Q-t-OR/T.yR#QYR#VZR%R!jQ%P!jV)}'h*O+v!]!mP!o#j#w$Y$h$t$v$w$x$y$z${%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/mR%U!lR%z#^Q(g%zR*w(hQ$e!RQ&l$XQ)_'OR+_)`Q#rlQ$^!OQ$a!PR&t$`Q(z&sR+Q(}Q(z&sQ+P(|R+Q(}R$c!QXpOr)c+cQ$j!UR'P$kQ$q!XR'Q$lR)u'^Q)s'^V,r+q,s-]Q-l,wQ.W-fR.X-gU-e,w-f-gQ.]-iQ.h-{Q.m.RU.o.T.p/PQ.t.YQ/S.uQ/U.zU/W.{/Y/bQ/c/[Q/g/aR/h/dR.[-hR.j-{",nodeNames:"⚠ print Comment Script AssignStatement * BinaryExpression BitOp BitOp BitOp BitOp ArithOp ArithOp @ ArithOp ** UnaryExpression ArithOp BitOp AwaitExpression await ) ( ParenthesizedExpression BinaryExpression or and CompareOp in not is UnaryExpression ConditionalExpression if else LambdaExpression lambda ParamList VariableName AssignOp , : NamedExpression AssignOp YieldExpression yield from TupleExpression ComprehensionExpression async for LambdaExpression ] [ ArrayExpression ArrayComprehensionExpression } { DictionaryExpression DictionaryComprehensionExpression SetExpression SetComprehensionExpression CallExpression ArgList AssignOp MemberExpression . PropertyName Number String FormatString FormatReplacement FormatConversion FormatSpec ContinuedString Ellipsis None Boolean TypeDef AssignOp UpdateStatement UpdateOp ExpressionStatement DeleteStatement del PassStatement pass BreakStatement break ContinueStatement continue ReturnStatement return YieldStatement PrintStatement RaiseStatement raise ImportStatement import as ScopeStatement global nonlocal AssertStatement assert StatementGroup ; IfStatement Body elif WhileStatement while ForStatement TryStatement try except finally WithStatement with FunctionDefinition def ParamList AssignOp TypeDef ClassDefinition class DecoratedStatement Decorator At MatchStatement match MatchBody MatchClause case CapturePattern LiteralPattern ArithOp ArithOp AsPattern OrPattern LogicOp AttributePattern SequencePattern MappingPattern StarPattern ClassPattern PatternArgList KeywordPattern KeywordPattern Guard",maxTerm:267,context:PO,nodeProps:[["group",-14,4,80,82,83,85,87,89,91,93,94,95,97,100,103,"Statement Statement",-22,6,16,19,23,38,47,48,54,55,58,59,60,61,62,65,68,69,70,74,75,76,77,"Expression",-10,105,107,110,112,113,117,119,124,126,129,"Statement",-9,134,135,138,139,141,142,143,144,145,"Pattern"],["openedBy",21,"(",52,"[",56,"{"],["closedBy",22,")",53,"]",57,"}"]],propSources:[iO],skippedNodes:[0,2],repeatNodeCount:38,tokenData:"&JdMgR!^OX$}XY!&]Y[$}[]!&]]p$}pq!&]qr!(grs!,^st!IYtu$}uv$5[vw$7nwx$8zxy%'vyz%(|z{%*S{|%,r|}%.O}!O%/U!O!P%1k!P!Q%UZ&^7[&WW&f#tOr(}rs)}sw(}wx>wx#O(}#O#P2]#P#o(}#o#p:X#p#q(}#q#r2q#r~(}:Y?QX&^7[&WW&f#tOr>wrs?ms#O>w#O#PAP#P#o>w#o#p8Y#p#q>w#q#r6g#r~>w:Y?rX&^7[Or>wrs@_s#O>w#O#PAP#P#o>w#o#p8Y#p#q>w#q#r6g#r~>w:Y@dX&^7[Or>wrs-}s#O>w#O#PAP#P#o>w#o#p8Y#p#q>w#q#r6g#r~>w:YAUT&^7[O#o>w#o#p6g#p#q>w#q#r6g#r~>w`x#O!`x#O!gZ&WW&R,XOY!wZ]!Ad]^>w^r!Adrs!Bhs#O!Ad#O#P!C[#P#o!Ad#o#p!9f#p#q!Ad#q#r!7x#r~!AdEc!BoX&^7[&R,XOr>wrs@_s#O>w#O#PAP#P#o>w#o#p8Y#p#q>w#q#r6g#r~>wEc!CaT&^7[O#o!Ad#o#p!7x#p#q!Ad#q#r!7x#r~!AdGZ!CuT&^7[O#o!-l#o#p!DU#p#q!-l#q#r!DU#r~!-l0}!De]&TS&WW&R,X&Z`&d!b&f#tOY!DUYZAyZ]!DU]^Ay^r!DUrs!E^sw!DUwx!5tx#O!DU#O#P!FU#P#o!DU#o#p!F[#p~!DU0}!EiX&TS&R,X&Z`&d!bOrAyrsCiswAywx5Px#OAy#O#PEo#P#oAy#o#pEu#p~Ay0}!FXPO~!DU0}!Fe]&TS&WW&R,XOY!`x#O!`sw#=dwx#@Sx#O#=d#O#P#Av#P#o#=d#o#p#0Y#p~#=d2P#=mZQ1s&TS&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#@Sx#O#=d#O#P#Av#P~#=d2P#>gZQ1s&TSOY#=dYZ:{Z]#=d]^:{^r#=drs#?Ysw#=dwx#@Sx#O#=d#O#P#Av#P~#=d2P#?aZQ1s&TSOY#=dYZ:{Z]#=d]^:{^r#=drs#,zsw#=dwx#@Sx#O#=d#O#P#Av#P~#=d2P#@ZZQ1s&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#@|x#O#=d#O#P#Av#P~#=d2P#ATZQ1s&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#9bx#O#=d#O#P#Av#P~#=d2P#A{TQ1sOY#=dYZ:{Z]#=d]^:{^~#=dLe#Bg_Q1s&^7[&WW&f#tOY!NdYZ(}Z]!Nd]^(}^r!Ndrs# rsw!Ndwx#Cfx#O!Nd#O#P#/f#P#o!Nd#o#p#wZ]#Cf]^>w^r#Cfrs#Djs#O#Cf#O#P#Fj#P#o#Cf#o#p#8h#p#q#Cf#q#r#5h#r~#CfJ}#Dq]Q1s&^7[OY#CfYZ>wZ]#Cf]^>w^r#Cfrs#Ejs#O#Cf#O#P#Fj#P#o#Cf#o#p#8h#p#q#Cf#q#r#5h#r~#CfJ}#Eq]Q1s&^7[OY#CfYZ>wZ]#Cf]^>w^r#Cfrs#'[s#O#Cf#O#P#Fj#P#o#Cf#o#p#8h#p#q#Cf#q#r#5h#r~#CfJ}#FqXQ1s&^7[OY#CfYZ>wZ]#Cf]^>w^#o#Cf#o#p#5h#p#q#Cf#q#r#5h#r~#CfLu#GeXQ1s&^7[OY!KxYZ'PZ]!Kx]^'P^#o!Kx#o#p#HQ#p#q!Kx#q#r#HQ#r~!Kx6i#Ha]Q1s&TS&WW&Z`&d!b&f#tOY#HQYZAyZ]#HQ]^Ay^r#HQrs#IYsw#HQwx#3dx#O#HQ#O#P#Mn#P#o#HQ#o#p#NS#p~#HQ6i#Ie]Q1s&TS&Z`&d!bOY#HQYZAyZ]#HQ]^Ay^r#HQrs#J^sw#HQwx#3dx#O#HQ#O#P#Mn#P#o#HQ#o#p#NS#p~#HQ6i#Ji]Q1s&TS&Z`&d!bOY#HQYZAyZ]#HQ]^Ay^r#HQrs#Kbsw#HQwx#3dx#O#HQ#O#P#Mn#P#o#HQ#o#p#NS#p~#HQ3k#KmZQ1s&TS&Z`&d!bOY#KbYZD_Z]#Kb]^D_^w#Kbwx#)|x#O#Kb#O#P#L`#P#o#Kb#o#p#Lt#p~#Kb3k#LeTQ1sOY#KbYZD_Z]#Kb]^D_^~#Kb3k#L{ZQ1s&TSOY#,zYZ1OZ]#,z]^1O^w#,zwx#-nx#O#,z#O#P#/Q#P#o#,z#o#p#Kb#p~#,z6i#MsTQ1sOY#HQYZAyZ]#HQ]^Ay^~#HQ6i#N]]Q1s&TS&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#@Sx#O#=d#O#P#Av#P#o#=d#o#p#HQ#p~#=dLu$ c_Q1s&^7[&TS&Z`&d!bOY!KxYZ'PZ]!Kx]^'P^r!Kxrs$!bsw!Kxwx!MYx#O!Kx#O#P#G^#P#o!Kx#o#p#NS#p#q!Kx#q#r#HQ#r~!KxIw$!o]Q1s&^7[&TS&Z`&d!bOY$!bYZGgZ]$!b]^Gg^w$!bwx#%[x#O$!b#O#P$#h#P#o$!b#o#p#Lt#p#q$!b#q#r#Kb#r~$!bIw$#oXQ1s&^7[OY$!bYZGgZ]$!b]^Gg^#o$!b#o#p#Kb#p#q$!b#q#r#Kb#r~$!bMV$$i_Q1s&^7[&WW&ap&f#tOY$%hYZIqZ]$%h]^Iq^r$%hrs# rsw$%hwx$.px#O$%h#O#P$&x#P#o$%h#o#p$-n#p#q$%h#q#r$'l#r~$%hMV$%y_Q1s&^7[&TS&WW&ap&d!b&f#tOY$%hYZIqZ]$%h]^Iq^r$%hrs# rsw$%hwx$$[x#O$%h#O#P$&x#P#o$%h#o#p$-n#p#q$%h#q#r$'l#r~$%hMV$'PXQ1s&^7[OY$%hYZIqZ]$%h]^Iq^#o$%h#o#p$'l#p#q$%h#q#r$'l#r~$%h6y$'{]Q1s&TS&WW&ap&d!b&f#tOY$'lYZKXZ]$'l]^KX^r$'lrs#1`sw$'lwx$(tx#O$'l#O#P$-Y#P#o$'l#o#p$-n#p~$'l6y$)P]Q1s&WW&ap&f#tOY$'lYZKXZ]$'l]^KX^r$'lrs#1`sw$'lwx$)xx#O$'l#O#P$-Y#P#o$'l#o#p$-n#p~$'l6y$*T]Q1s&WW&ap&f#tOY$'lYZKXZ]$'l]^KX^r$'lrs#1`sw$'lwx$*|x#O$'l#O#P$-Y#P#o$'l#o#p$-n#p~$'l5c$+XZQ1s&WW&ap&f#tOY$*|YZMmZ]$*|]^Mm^r$*|rs#6ds#O$*|#O#P$+z#P#o$*|#o#p$,`#p~$*|5c$,PTQ1sOY$*|YZMmZ]$*|]^Mm^~$*|5c$,gZQ1s&WWOY#9bYZ8tZ]#9b]^8t^r#9brs#:Us#O#9b#O#P#;h#P#o#9b#o#p$*|#p~#9b6y$-_TQ1sOY$'lYZKXZ]$'l]^KX^~$'l6y$-w]Q1s&TS&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#@Sx#O#=d#O#P#Av#P#o#=d#o#p$'l#p~#=dMV$.}_Q1s&^7[&WW&ap&f#tOY$%hYZIqZ]$%h]^Iq^r$%hrs# rsw$%hwx$/|x#O$%h#O#P$&x#P#o$%h#o#p$-n#p#q$%h#q#r$'l#r~$%hKo$0Z]Q1s&^7[&WW&ap&f#tOY$/|YZ!!uZ]$/|]^!!u^r$/|rs#Djs#O$/|#O#P$1S#P#o$/|#o#p$,`#p#q$/|#q#r$*|#r~$/|Ko$1ZXQ1s&^7[OY$/|YZ!!uZ]$/|]^!!u^#o$/|#o#p$*|#p#q$/|#q#r$*|#r~$/|Mg$1}XQ1s&^7[OY!IYYZ$}Z]!IY]^$}^#o!IY#o#p$2j#p#q!IY#q#r$2j#r~!IY7Z$2{]Q1s&TS&WW&Z`&ap&d!b&f#tOY$2jYZ!$gZ]$2j]^!$g^r$2jrs#IYsw$2jwx$(tx#O$2j#O#P$3t#P#o$2j#o#p$4Y#p~$2j7Z$3yTQ1sOY$2jYZ!$gZ]$2j]^!$g^~$2j7Z$4c]Q1s&TS&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#@Sx#O#=d#O#P#Av#P#o#=d#o#p$2j#p~#=dGz$5o]%jQ&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gz$6{Z!s,W&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gz$8R]%dQ&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}G{$9Z_&_`&^7[&WW&R,X&ap&f#tOY$:YYZIqZ]$:Y]^Iq^r$:Yrs$;jsw$:Ywx%%zx#O$:Y#O#P%!^#P#o$:Y#o#p%$x#p#q$:Y#q#r%!r#r~$:YGk$:k_&^7[&TS&WW&R,X&ap&d!b&f#tOY$:YYZIqZ]$:Y]^Iq^r$:Yrs$;jsw$:Ywx% ^x#O$:Y#O#P%!^#P#o$:Y#o#p%$x#p#q$:Y#q#r%!r#r~$:YFy$;u_&^7[&TS&R,X&d!bOY$Sx#O$Sx#O$_Z&^7[&WW&R,X&f#tOr(}rs)}sw(}wx={x#O(}#O#P2]#P#o(}#o#p:X#p#q(}#q#r2q#r~(}Fy$?VT&^7[O#o$Sx#O$T!Q!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gz%>h]%kQ&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%?tu!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!O$}!O!P%BX!P!Q$}!Q![%Cc![!d$}!d!e%Ee!e!g$}!g!h%7Z!h!l$}!l!m%;k!m!q$}!q!r%H_!r!z$}!z!{%KR!{#O$}#O#P!$R#P#R$}#R#S%Cc#S#U$}#U#V%Ee#V#X$}#X#Y%7Z#Y#^$}#^#_%;k#_#c$}#c#d%H_#d#l$}#l#m%KR#m#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%Bj]&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q![%5_![#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%Cvi!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!O$}!O!P%BX!P!Q$}!Q![%Cc![!g$}!g!h%7Z!h!l$}!l!m%;k!m#O$}#O#P!$R#P#R$}#R#S%Cc#S#X$}#X#Y%7Z#Y#^$}#^#_%;k#_#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%Ev`&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q!R%Fx!R!S%Fx!S#O$}#O#P!$R#P#R$}#R#S%Fx#S#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%G]`!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q!R%Fx!R!S%Fx!S#O$}#O#P!$R#P#R$}#R#S%Fx#S#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%Hp_&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q!Y%Io!Y#O$}#O#P!$R#P#R$}#R#S%Io#S#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%JS_!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q!Y%Io!Y#O$}#O#P!$R#P#R$}#R#S%Io#S#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%Kdc&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q![%Lo![!c$}!c!i%Lo!i#O$}#O#P!$R#P#R$}#R#S%Lo#S#T$}#T#Z%Lo#Z#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%MSc!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q![%Lo![!c$}!c!i%Lo!i#O$}#O#P!$R#P#R$}#R#S%Lo#S#T$}#T#Z%Lo#Z#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Mg%Nr]y1s&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`& k!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}x!u!}&+n!}#O$}#O#P!$R#P#R$}#R#S&+n#S#T$}#T#f&+n#f#g&>x#g#o&+n#o#p!%i#p#q$}#q#r!$g#r$g$}$g~&+nGZ&9gZ&^7[&TS&Z`&d!b&`,XOr'Prs&:Ysw'Pwx(Rx#O'P#O#PAe#P#o'P#o#pEu#p#q'P#q#rAy#r~'PGZ&:eZ&^7[&TS&Z`&d!bOr'Prs&;Wsw'Pwx(Rx#O'P#O#PAe#P#o'P#o#pEu#p#q'P#q#rAy#r~'PD]&;eX&^7[&TS&e,X&Z`&d!bOwGgwx,kx#OGg#O#PH_#P#oGg#o#pET#p#qGg#q#rD_#r~GgGk&<_Z&^7[&WW&ap&f#t&Y,XOrIqrs)}swIqwx&=Qx#OIq#O#PJs#P#oIq#o#p! T#p#qIq#q#rKX#r~IqGk&=]Z&^7[&WW&ap&f#tOrIqrs)}swIqwx&>Ox#OIq#O#PJs#P#oIq#o#p! T#p#qIq#q#rKX#r~IqFT&>]X&^7[&WW&c,X&ap&f#tOr!!urs?ms#O!!u#O#P!#m#P#o!!u#o#pNc#p#q!!u#q#rMm#r~!!uMg&?_c&^7[&TS&WW&Q&j&Z`&ap&d!b&f#t%m,XOr$}rs&9Ysw$}wx&x!i!t&+n!t!u&5j!u!}&+n!}#O$}#O#P!$R#P#R$}#R#S&+n#S#T$}#T#U&+n#U#V&5j#V#Y&+n#Y#Z&>x#Z#o&+n#o#p!%i#p#q$}#q#r!$g#r$g$}$g~&+nG{&CXZ!V,X&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}sO[O]||-1}],tokenPrec:7282});function I(O,$){let Q=O.lineIndent($.from),P=O.lineAt(O.pos,-1),e=P.from+P.text.length;return!/\S/.test(P.text)&&O.node.toQ?null:Q+O.unit}const aO=R.define({name:"python",parser:oO.configure({props:[Z.add({Body:O=>{var $;return($=I(O,O.node))!==null&&$!==void 0?$:O.continue()},IfStatement:O=>/^\s*(else:|elif )/.test(O.textAfter)?O.baseIndent:O.continue(),TryStatement:O=>/^\s*(except |finally:|else:)/.test(O.textAfter)?O.baseIndent:O.continue(),"TupleExpression ComprehensionExpression ParamList ArgList ParenthesizedExpression":a({closing:")"}),"DictionaryExpression DictionaryComprehensionExpression SetExpression SetComprehensionExpression":a({closing:"}"}),"ArrayExpression ArrayComprehensionExpression":a({closing:"]"}),"String FormatString":()=>null,Script:O=>{if(O.pos+/\s*/.exec(O.textAfter)[0].length>=O.node.to){let $=null;for(let Q=O.node,P=Q.to;Q=Q.lastChild,!(!Q||Q.to!=P);)Q.type.name=="Body"&&($=Q);if($){let Q=I(O,$);if(Q!=null)return Q}}return O.continue()}}),X.add({"ArrayExpression DictionaryExpression SetExpression TupleExpression":y,Body:(O,$)=>({from:O.from+1,to:O.to-(O.to==$.doc.length?0:1)})})]}),languageData:{closeBrackets:{brackets:["(","[","{","'",'"',"'''",'"""'],stringPrefixes:["f","fr","rf","r","u","b","br","rb","F","FR","RF","R","U","B","BR","RB"]},commentTokens:{line:"#"},indentOnInput:/^\s*([\}\]\)]|else:|elif |except |finally:)$/}});function nO(){return new f(aO)}export{nO as python,aO as pythonLanguage}; -//# sourceMappingURL=index-b64f4ef5.js.map diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/importlib_resources/tests/test_custom.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/importlib_resources/tests/test_custom.py deleted file mode 100644 index e85ddd659e01bebb03b5df6a5ed4fb0070abd263..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/importlib_resources/tests/test_custom.py +++ /dev/null @@ -1,45 +0,0 @@ -import unittest -import contextlib -import pathlib - -import importlib_resources as resources -from ..abc import TraversableResources, ResourceReader -from . import util -from ._compat import os_helper - - -class SimpleLoader: - """ - A simple loader that only implements a resource reader. - """ - - def __init__(self, reader: ResourceReader): - self.reader = reader - - def get_resource_reader(self, package): - return self.reader - - -class MagicResources(TraversableResources): - """ - Magically returns the resources at path. - """ - - def __init__(self, path: pathlib.Path): - self.path = path - - def files(self): - return self.path - - -class CustomTraversableResourcesTests(unittest.TestCase): - def setUp(self): - self.fixtures = contextlib.ExitStack() - self.addCleanup(self.fixtures.close) - - def test_custom_loader(self): - temp_dir = self.fixtures.enter_context(os_helper.temp_dir()) - loader = SimpleLoader(MagicResources(temp_dir)) - pkg = util.create_package_from_loader(loader) - files = resources.files(pkg) - assert files is temp_dir diff --git a/spaces/latent-consistency/Real-Time-LCM-Text-to-Image-Lora-SD1.5/app-txt2img.py b/spaces/latent-consistency/Real-Time-LCM-Text-to-Image-Lora-SD1.5/app-txt2img.py deleted file mode 100644 index 7b0da7a522864414f0884a49ae7368724e60b849..0000000000000000000000000000000000000000 --- a/spaces/latent-consistency/Real-Time-LCM-Text-to-Image-Lora-SD1.5/app-txt2img.py +++ /dev/null @@ -1,255 +0,0 @@ -import asyncio -import json -import logging -import traceback -from pydantic import BaseModel - -from fastapi import FastAPI, WebSocket, HTTPException, WebSocketDisconnect -from fastapi.middleware.cors import CORSMiddleware -from fastapi.responses import ( - StreamingResponse, - JSONResponse, - HTMLResponse, - FileResponse, -) - -from diffusers import DiffusionPipeline, AutoencoderTiny -from compel import Compel -import torch - -try: - import intel_extension_for_pytorch as ipex -except: - pass -from PIL import Image -import numpy as np -import gradio as gr -import io -import uuid -import os -import time -import psutil - - -MAX_QUEUE_SIZE = int(os.environ.get("MAX_QUEUE_SIZE", 0)) -TIMEOUT = float(os.environ.get("TIMEOUT", 0)) -SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None) -TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None) - -WIDTH = 768 -HEIGHT = 768 -# disable tiny autoencoder for better quality speed tradeoff -USE_TINY_AUTOENCODER = False - -# check if MPS is available OSX only M1/M2/M3 chips -mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available() -xpu_available = hasattr(torch, "xpu") and torch.xpu.is_available() -device = torch.device( - "cuda" if torch.cuda.is_available() else "xpu" if xpu_available else "cpu" -) -torch_device = device -# change to torch.float16 to save GPU memory -torch_dtype = torch.float32 - -print(f"TIMEOUT: {TIMEOUT}") -print(f"SAFETY_CHECKER: {SAFETY_CHECKER}") -print(f"MAX_QUEUE_SIZE: {MAX_QUEUE_SIZE}") -print(f"device: {device}") - -if mps_available: - device = torch.device("mps") - torch_device = "cpu" - torch_dtype = torch.float32 - -if SAFETY_CHECKER == "True": - pipe = DiffusionPipeline.from_pretrained( - "SimianLuo/LCM_Dreamshaper_v7", - ) -else: - pipe = DiffusionPipeline.from_pretrained( - "SimianLuo/LCM_Dreamshaper_v7", - safety_checker=None, - ) -if USE_TINY_AUTOENCODER: - pipe.vae = AutoencoderTiny.from_pretrained( - "madebyollin/taesd", torch_dtype=torch_dtype, use_safetensors=True - ) -pipe.set_progress_bar_config(disable=True) -pipe.to(device=torch_device, dtype=torch_dtype).to(device) -pipe.unet.to(memory_format=torch.channels_last) - -# check if computer has less than 64GB of RAM using sys or os -if psutil.virtual_memory().total < 64 * 1024**3: - pipe.enable_attention_slicing() - -if TORCH_COMPILE: - pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) - pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True) - - pipe(prompt="warmup", num_inference_steps=1, guidance_scale=8.0) - -compel_proc = Compel( - tokenizer=pipe.tokenizer, - text_encoder=pipe.text_encoder, - truncate_long_prompts=False, -) -user_queue_map = {} - - -class InputParams(BaseModel): - seed: int = 2159232 - prompt: str - guidance_scale: float = 8.0 - strength: float = 0.5 - steps: int = 4 - lcm_steps: int = 50 - width: int = WIDTH - height: int = HEIGHT - - -def predict(params: InputParams): - generator = torch.manual_seed(params.seed) - prompt_embeds = compel_proc(params.prompt) - # Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps. - results = pipe( - prompt_embeds=prompt_embeds, - generator=generator, - num_inference_steps=params.steps, - guidance_scale=params.guidance_scale, - width=params.width, - height=params.height, - original_inference_steps=params.lcm_steps, - output_type="pil", - ) - nsfw_content_detected = ( - results.nsfw_content_detected[0] - if "nsfw_content_detected" in results - else False - ) - if nsfw_content_detected: - return None - return results.images[0] - - -app = FastAPI() -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) - - -@app.websocket("/ws") -async def websocket_endpoint(websocket: WebSocket): - await websocket.accept() - if MAX_QUEUE_SIZE > 0 and len(user_queue_map) >= MAX_QUEUE_SIZE: - print("Server is full") - await websocket.send_json({"status": "error", "message": "Server is full"}) - await websocket.close() - return - - try: - uid = str(uuid.uuid4()) - print(f"New user connected: {uid}") - await websocket.send_json( - {"status": "success", "message": "Connected", "userId": uid} - ) - user_queue_map[uid] = { - "queue": asyncio.Queue(), - } - await websocket.send_json( - {"status": "start", "message": "Start Streaming", "userId": uid} - ) - await handle_websocket_data(websocket, uid) - except WebSocketDisconnect as e: - logging.error(f"WebSocket Error: {e}, {uid}") - traceback.print_exc() - finally: - print(f"User disconnected: {uid}") - queue_value = user_queue_map.pop(uid, None) - queue = queue_value.get("queue", None) - if queue: - while not queue.empty(): - try: - queue.get_nowait() - except asyncio.QueueEmpty: - continue - - -@app.get("/queue_size") -async def get_queue_size(): - queue_size = len(user_queue_map) - return JSONResponse({"queue_size": queue_size}) - - -@app.get("/stream/{user_id}") -async def stream(user_id: uuid.UUID): - uid = str(user_id) - try: - user_queue = user_queue_map[uid] - queue = user_queue["queue"] - - async def generate(): - while True: - params = await queue.get() - if params is None: - continue - - image = predict(params) - if image is None: - continue - frame_data = io.BytesIO() - image.save(frame_data, format="JPEG") - frame_data = frame_data.getvalue() - if frame_data is not None and len(frame_data) > 0: - yield b"--frame\r\nContent-Type: image/jpeg\r\n\r\n" + frame_data + b"\r\n" - - await asyncio.sleep(1.0 / 120.0) - - return StreamingResponse( - generate(), media_type="multipart/x-mixed-replace;boundary=frame" - ) - except Exception as e: - logging.error(f"Streaming Error: {e}, {user_queue_map}") - traceback.print_exc() - return HTTPException(status_code=404, detail="User not found") - - -async def handle_websocket_data(websocket: WebSocket, user_id: uuid.UUID): - uid = str(user_id) - user_queue = user_queue_map[uid] - queue = user_queue["queue"] - if not queue: - return HTTPException(status_code=404, detail="User not found") - last_time = time.time() - try: - while True: - params = await websocket.receive_json() - params = InputParams(**params) - while not queue.empty(): - try: - queue.get_nowait() - except asyncio.QueueEmpty: - continue - await queue.put(params) - if TIMEOUT > 0 and time.time() - last_time > TIMEOUT: - await websocket.send_json( - { - "status": "timeout", - "message": "Your session has ended", - "userId": uid, - } - ) - await websocket.close() - return - - except Exception as e: - logging.error(f"Error: {e}") - traceback.print_exc() - - -@app.get("/", response_class=HTMLResponse) -async def root(): - return FileResponse("./static/txt2img.html") diff --git a/spaces/lewisliuX123/wechatgpt3/docker/build.debian.sh b/spaces/lewisliuX123/wechatgpt3/docker/build.debian.sh deleted file mode 100644 index a5285f39813426a2d63eb01982229b23ec09dba2..0000000000000000000000000000000000000000 --- a/spaces/lewisliuX123/wechatgpt3/docker/build.debian.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -CHATGPT_ON_WECHAT_TAG=1.0.2 - -docker build -f Dockerfile.debian \ - --build-arg CHATGPT_ON_WECHAT_VER=$CHATGPT_ON_WECHAT_TAG \ - -t zhayujie/chatgpt-on-wechat . - -docker tag zhayujie/chatgpt-on-wechat zhayujie/chatgpt-on-wechat:$CHATGPT_ON_WECHAT_TAG-debian \ No newline at end of file diff --git a/spaces/lewispons/GrammarGuru/src/models/tdidf.py b/spaces/lewispons/GrammarGuru/src/models/tdidf.py deleted file mode 100644 index 82eeb12f1785d1b1538f19e01f4d5c61a5bc173e..0000000000000000000000000000000000000000 --- a/spaces/lewispons/GrammarGuru/src/models/tdidf.py +++ /dev/null @@ -1,33 +0,0 @@ -import pandas as pd -from sklearn.pipeline import Pipeline -from sklearn.feature_selection import ColumnSelector -from sklearn.feature_extraction.text import TfidfVectorizer - -from utils.utilities import * -import sys -from pprint import pprint - -CONFIG_FILE_PATH = "/Users/luis.morales/Desktop/arxiv-paper-recommender/models/configs.yaml" -config = read_yaml_config(CONFIG_FILE_PATH) -pprint(config) - -@execution_time -def train_tfidf(): - df = pd.read_parquet("/Users/luis.morales/Desktop/arxiv-paper-recommender/data/processed/arxiv_papers.parquet.gzip") \ - .sample(500000) \ - .reset_index(drop=True) - - - vectorizer = TfidfVectorizer(**config["models"]["tfidf"]["tfidf_deffault"]) - pprint(config["models"]["tfidf"]["tfidf_deffault"]) - sys.exit() - - vectors = vectorizer.fit_transform(df['cleaned_abstracts']) - - tfidf_df = pd.DataFrame(vectors.toarray(), columns=[i for i in vectorizer.get_feature_names_out()]) - - - tfidf_df.to_parquet("/Users/luis.morales/Desktop/arxiv-paper-recommender/data/processed/reduced_arxiv_tfidf.parquet.gzip") - -train_tfidf() - diff --git a/spaces/lewiswu1209/MockingBird/encoder/data_objects/utterance.py b/spaces/lewiswu1209/MockingBird/encoder/data_objects/utterance.py deleted file mode 100644 index 0768c3420f422a7464f305b4c1fb6752c57ceda7..0000000000000000000000000000000000000000 --- a/spaces/lewiswu1209/MockingBird/encoder/data_objects/utterance.py +++ /dev/null @@ -1,26 +0,0 @@ -import numpy as np - - -class Utterance: - def __init__(self, frames_fpath, wave_fpath): - self.frames_fpath = frames_fpath - self.wave_fpath = wave_fpath - - def get_frames(self): - return np.load(self.frames_fpath) - - def random_partial(self, n_frames): - """ - Crops the frames into a partial utterance of n_frames - - :param n_frames: The number of frames of the partial utterance - :return: the partial utterance frames and a tuple indicating the start and end of the - partial utterance in the complete utterance. - """ - frames = self.get_frames() - if frames.shape[0] == n_frames: - start = 0 - else: - start = np.random.randint(0, frames.shape[0] - n_frames) - end = start + n_frames - return frames[start:end], (start, end) \ No newline at end of file diff --git a/spaces/lhkhiem28/A-recognition-system/source/api.py b/spaces/lhkhiem28/A-recognition-system/source/api.py deleted file mode 100644 index cb0db7faea0a861339e2b304ebeeb1c6e933bf2b..0000000000000000000000000000000000000000 --- a/spaces/lhkhiem28/A-recognition-system/source/api.py +++ /dev/null @@ -1,48 +0,0 @@ -import os, sys -from libs import * - -class NER(): - def __init__(self, - ckp_dir, - ): - self.model = transformers.pipeline("token-classification", - ckp_dir, aggregation_strategy = "simple", - ) - - def ner_predict(self, - sentence, - ): - sentence = vitools.normalize_diacritics(sentence) - sentence = underthesea.word_tokenize(sentence, format = "text") - output = { - "PATIENT_ID":[], - "NAME":[], - "AGE":[], - "GENDER":[], - "JOB":[], - "LOCATION":[], - "ORGANIZATION":[], - "SYMPTOM_AND_DISEASE":[], - "TRANSPORTATION":[], - "DATE":[], - } - pred = self.model(sentence) - for entity in pred: - if entity["entity_group"] in output: - output[entity["entity_group"]].append(entity["word"]) - - for entity_group, entities in output.items(): - fixed_entities = [] - i = 0 - while i < len(entities): - if entities[i].endswith("@@"): - fixed_entity = entities[i][:-2] + entities[i + 1] - fixed_entities.append(fixed_entity.replace("_", " ")) - i += 2 - else: - fixed_entity = entities[i] - fixed_entities.append(fixed_entity.replace("_", " ")) - i += 1 - output[entity_group] = fixed_entities - - return output \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/HD Online Player (Race 3 3 Full REPACK Movie Download In Hd 720p).md b/spaces/lincquiQcaudo/Top-20-Diffusion/HD Online Player (Race 3 3 Full REPACK Movie Download In Hd 720p).md deleted file mode 100644 index 3eba2c017c6278068703b52570ea1b2c17d70368..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/HD Online Player (Race 3 3 Full REPACK Movie Download In Hd 720p).md +++ /dev/null @@ -1,6 +0,0 @@ -

        HD Online Player (Race 3 3 full movie download in hd 720p)


        Download File ✪✪✪ https://bytlly.com/2uGwR1



        - -Disclaimer:- Katmoviehd. com - Playing Dead – Hobbit man-child and his wiener dog Daily visitors: 534. ... 1 | H264 | MP4 | 720p | DVD | Bluray. in, Skymovies hd. ... 15-06-2018 · Race 3, 2018 Full Movie, Watch Online or Download HD Quality ... 1fdad05405
        -
        -
        -

        diff --git a/spaces/lllqqq/so-vits-svc-models-pcr/app.py b/spaces/lllqqq/so-vits-svc-models-pcr/app.py deleted file mode 100644 index 0120cc47772d25900ee7a427538a957564357a54..0000000000000000000000000000000000000000 --- a/spaces/lllqqq/so-vits-svc-models-pcr/app.py +++ /dev/null @@ -1,375 +0,0 @@ -# -*- coding: utf-8 -*- -import traceback -import torch -from scipy.io import wavfile -import edge_tts -import subprocess -import gradio as gr -import gradio.processing_utils as gr_pu -import io -import os -import logging -import time -from pathlib import Path -import re -import json -import argparse - -import librosa -import matplotlib.pyplot as plt -import numpy as np -import soundfile - -from inference import infer_tool -from inference import slicer -from inference.infer_tool import Svc - -logging.getLogger('numba').setLevel(logging.WARNING) -chunks_dict = infer_tool.read_temp("inference/chunks_temp.json") - -logging.getLogger('numba').setLevel(logging.WARNING) -logging.getLogger('markdown_it').setLevel(logging.WARNING) -logging.getLogger('urllib3').setLevel(logging.WARNING) -logging.getLogger('matplotlib').setLevel(logging.WARNING) -logging.getLogger('multipart').setLevel(logging.WARNING) - -model = None -spk = None -debug = False - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() - - -def get_hparams_from_file(config_path): - with open(config_path, "r", encoding="utf-8") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def vc_fn(sid, input_audio, vc_transform, auto_f0, cluster_ratio, slice_db, noise_scale, pad_seconds, cl_num, lg_num, lgr_num, f0_predictor, enhancer_adaptive_key, cr_threshold): - try: - if input_audio is None: - raise gr.Error("你需要上传音频") - if model is None: - raise gr.Error("你需要指定模型") - sampling_rate, audio = input_audio - # print(audio.shape,sampling_rate) - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - temp_path = "temp.wav" - soundfile.write(temp_path, audio, sampling_rate, format="wav") - _audio = model.slice_inference(temp_path, sid, vc_transform, slice_db, cluster_ratio, auto_f0, noise_scale, - pad_seconds, cl_num, lg_num, lgr_num, f0_predictor, enhancer_adaptive_key, cr_threshold) - model.clear_empty() - os.remove(temp_path) - # 构建保存文件的路径,并保存到results文件夹内 - try: - timestamp = str(int(time.time())) - filename = sid + "_" + timestamp + ".wav" - # output_file = os.path.join("./results", filename) - # soundfile.write(output_file, _audio, model.target_sample, format="wav") - soundfile.write('/tmp/'+filename, _audio, - model.target_sample, format="wav") - # return f"推理成功,音频文件保存为results/{filename}", (model.target_sample, _audio) - return f"推理成功,音频文件保存为{filename}", (model.target_sample, _audio) - except Exception as e: - if debug: - traceback.print_exc() - return f"文件保存失败,请手动保存", (model.target_sample, _audio) - except Exception as e: - if debug: - traceback.print_exc() - raise gr.Error(e) - - -def tts_func(_text, _rate, _voice): - # 使用edge-tts把文字转成音频 - # voice = "zh-CN-XiaoyiNeural"#女性,较高音 - # voice = "zh-CN-YunxiNeural"#男性 - voice = "zh-CN-YunxiNeural" # 男性 - if (_voice == "女"): - voice = "zh-CN-XiaoyiNeural" - output_file = "/tmp/"+_text[0:10]+".wav" - # communicate = edge_tts.Communicate(_text, voice) - # await communicate.save(output_file) - if _rate >= 0: - ratestr = "+{:.0%}".format(_rate) - elif _rate < 0: - ratestr = "{:.0%}".format(_rate) # 减号自带 - - p = subprocess.Popen("edge-tts " + - " --text "+_text + - " --write-media "+output_file + - " --voice "+voice + - " --rate="+ratestr, shell=True, - stdout=subprocess.PIPE, - stdin=subprocess.PIPE) - p.wait() - return output_file - - -def text_clear(text): - return re.sub(r"[\n\,\(\) ]", "", text) - - -def vc_fn2(sid, input_audio, vc_transform, auto_f0, cluster_ratio, slice_db, noise_scale, pad_seconds, cl_num, lg_num, lgr_num, text2tts, tts_rate, tts_voice, f0_predictor, enhancer_adaptive_key, cr_threshold): - # 使用edge-tts把文字转成音频 - text2tts = text_clear(text2tts) - output_file = tts_func(text2tts, tts_rate, tts_voice) - - # 调整采样率 - sr2 = 44100 - wav, sr = librosa.load(output_file) - wav2 = librosa.resample(wav, orig_sr=sr, target_sr=sr2) - save_path2 = text2tts[0:10]+"_44k"+".wav" - wavfile.write(save_path2, sr2, - (wav2 * np.iinfo(np.int16).max).astype(np.int16) - ) - - # 读取音频 - sample_rate, data = gr_pu.audio_from_file(save_path2) - vc_input = (sample_rate, data) - - a, b = vc_fn(sid, vc_input, vc_transform, auto_f0, cluster_ratio, slice_db, noise_scale, - pad_seconds, cl_num, lg_num, lgr_num, f0_predictor, enhancer_adaptive_key, cr_threshold) - os.remove(output_file) - os.remove(save_path2) - return a, b - - -models_info = [ - { - "description": """ - 这个模型包含公主连结的161名角色。\n\n - Space采用CPU推理,速度极慢,建议下载模型本地GPU推理。\n\n - """, - "model_path": "./G_228800.pth", - "config_path": "./config.json", - } -] - -model_inferall = [] -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--share", action="store_true", - default=False, help="share gradio app") - # 一定要设置的部分 - parser.add_argument('-cl', '--clip', type=float, - default=0, help='音频强制切片,默认0为自动切片,单位为秒/s') - parser.add_argument('-n', '--clean_names', type=str, nargs='+', - default=["君の知らない物語-src.wav"], help='wav文件名列表,放在raw文件夹下') - parser.add_argument('-t', '--trans', type=int, nargs='+', - default=[0], help='音高调整,支持正负(半音)') - parser.add_argument('-s', '--spk_list', type=str, - nargs='+', default=['nen'], help='合成目标说话人名称') - - # 可选项部分 - parser.add_argument('-a', '--auto_predict_f0', action='store_true', - default=False, help='语音转换自动预测音高,转换歌声时不要打开这个会严重跑调') - parser.add_argument('-cm', '--cluster_model_path', type=str, - default="logs/44k/kmeans_10000.pt", help='聚类模型路径,如果没有训练聚类则随便填') - parser.add_argument('-cr', '--cluster_infer_ratio', type=float, - default=0, help='聚类方案占比,范围0-1,若没有训练聚类模型则默认0即可') - parser.add_argument('-lg', '--linear_gradient', type=float, default=0, - help='两段音频切片的交叉淡入长度,如果强制切片后出现人声不连贯可调整该数值,如果连贯建议采用默认值0,单位为秒') - parser.add_argument('-f0p', '--f0_predictor', type=str, default="pm", - help='选择F0预测器,可选择crepe,pm,dio,harvest,默认为pm(注意:crepe为原F0使用均值滤波器)') - parser.add_argument('-eh', '--enhance', action='store_true', default=False, - help='是否使用NSF_HIFIGAN增强器,该选项对部分训练集少的模型有一定的音质增强效果,但是对训练好的模型有反面效果,默认关闭') - parser.add_argument('-shd', '--shallow_diffusion', action='store_true', - default=False, help='是否使用浅层扩散,使用后可解决一部分电音问题,默认关闭,该选项打开时,NSF_HIFIGAN增强器将会被禁止') - - # 浅扩散设置 - parser.add_argument('-dm', '--diffusion_model_path', type=str, - default="logs/44k/diffusion/model_0.pt", help='扩散模型路径') - parser.add_argument('-dc', '--diffusion_config_path', type=str, - default="logs/44k/diffusion/config.yaml", help='扩散模型配置文件路径') - parser.add_argument('-ks', '--k_step', type=int, - default=100, help='扩散步数,越大越接近扩散模型的结果,默认100') - parser.add_argument('-od', '--only_diffusion', action='store_true', - default=False, help='纯扩散模式,该模式不会加载sovits模型,以扩散模型推理') - - # 不用动的部分 - parser.add_argument('-sd', '--slice_db', type=int, - default=-40, help='默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50') - parser.add_argument('-d', '--device', type=str, - default=None, help='推理设备,None则为自动选择cpu和gpu') - parser.add_argument('-ns', '--noice_scale', type=float, - default=0.4, help='噪音级别,会影响咬字和音质,较为玄学') - parser.add_argument('-p', '--pad_seconds', type=float, default=0.5, - help='推理音频pad秒数,由于未知原因开头结尾会有异响,pad一小段静音段后就不会出现') - parser.add_argument('-wf', '--wav_format', type=str, - default='flac', help='音频输出格式') - parser.add_argument('-lgr', '--linear_gradient_retain', type=float, - default=0.75, help='自动音频切片后,需要舍弃每段切片的头尾。该参数设置交叉长度保留的比例,范围0-1,左开右闭') - parser.add_argument('-eak', '--enhancer_adaptive_key', - type=int, default=0, help='使增强器适应更高的音域(单位为半音数)|默认为0') - parser.add_argument('-ft', '--f0_filter_threshold', type=float, default=0.05, - help='F0过滤阈值,只有使用crepe时有效. 数值范围从0-1. 降低该值可减少跑调概率,但会增加哑音') - args = parser.parse_args() - categories = ["Princess Connect! Re:Dive"] - others = { - "PCR vits-fast-fineturning": "https://huggingface.co/spaces/FrankZxShen/vits-fast-finetuning-pcr", - } - for info in models_info: - config_path = info['config_path'] - model_path = info['model_path'] - description = info['description'] - clean_names = args.clean_names - trans = args.trans - spk_list = list(get_hparams_from_file(config_path).spk.keys()) - slice_db = args.slice_db - wav_format = args.wav_format - auto_predict_f0 = args.auto_predict_f0 - cluster_infer_ratio = args.cluster_infer_ratio - noice_scale = args.noice_scale - pad_seconds = args.pad_seconds - clip = args.clip - lg = args.linear_gradient - lgr = args.linear_gradient_retain - f0p = args.f0_predictor - enhance = args.enhance - enhancer_adaptive_key = args.enhancer_adaptive_key - cr_threshold = args.f0_filter_threshold - diffusion_model_path = args.diffusion_model_path - diffusion_config_path = args.diffusion_config_path - k_step = args.k_step - only_diffusion = args.only_diffusion - shallow_diffusion = args.shallow_diffusion - - model = Svc(model_path, config_path, args.device, args.cluster_model_path, enhance, - diffusion_model_path, diffusion_config_path, shallow_diffusion, only_diffusion) - - model_inferall.append((description, spk_list, model)) - - app = gr.Blocks() - with app: - gr.Markdown( - "#
        so-vits-svc-models-pcr\n" - "#
        Pay attention!!! Space uses CPU inferencing, which is extremely slow. It is recommended to download models.\n" - "#
        注意!!!Space采用CPU推理,速度极慢,建议下载模型使用本地GPU推理。\n" - "##
        Please do not generate content that could infringe upon the rights or cause harm to individuals or organizations.\n" - "##
        请不要生成会对个人以及组织造成侵害的内容\n\n" - ) - gr.Markdown("# Princess Connect! Re:Dive\n\n" - ) - with gr.Tabs(): - for category in categories: - with gr.TabItem(category): - for i, (description, speakers, model) in enumerate( - model_inferall): - gr.Markdown(description) - with gr.Row(): - with gr.Column(): - # textbox = gr.TextArea(label="Text", - # placeholder="Type your sentence here ", - # value="新たなキャラを解放できるようになったようですね。", elem_id=f"tts-input") - - gr.Markdown(value=""" - 推理设置 - """) - sid = gr.Dropdown( - choices=speakers, value=speakers[0], label='角色选择') - auto_f0 = gr.Checkbox( - label="自动f0预测,配合聚类模型f0预测效果更好,会导致变调功能失效(仅限转换语音,歌声勾选此项会究极跑调)", value=False) - f0_predictor = gr.Dropdown(label="选择F0预测器,可选择crepe,pm,dio,harvest,默认为pm(注意:crepe为原F0使用均值滤波器)", choices=[ - "pm", "dio", "harvest", "crepe"], value="pm") - vc_transform = gr.Number( - label="变调(整数,可以正负,半音数量,升高八度就是12)", value=0) - cluster_ratio = gr.Number( - label="聚类模型混合比例,0-1之间,0即不启用聚类。使用聚类模型能提升音色相似度,但会导致咬字下降(如果使用建议0.5左右)", value=0) - slice_db = gr.Number(label="切片阈值", value=-40) - noise_scale = gr.Number( - label="noise_scale 建议不要动,会影响音质,玄学参数", value=0.4) - with gr.Column(): - pad_seconds = gr.Number( - label="推理音频pad秒数,由于未知原因开头结尾会有异响,pad一小段静音段后就不会出现", value=0.5) - cl_num = gr.Number( - label="音频自动切片,0为不切片,单位为秒(s)", value=0) - lg_num = gr.Number( - label="两端音频切片的交叉淡入长度,如果自动切片后出现人声不连贯可调整该数值,如果连贯建议采用默认值0,注意,该设置会影响推理速度,单位为秒/s", value=0) - lgr_num = gr.Number( - label="自动音频切片后,需要舍弃每段切片的头尾。该参数设置交叉长度保留的比例,范围0-1,左开右闭", value=0.75) - enhancer_adaptive_key = gr.Number( - label="使增强器适应更高的音域(单位为半音数)|默认为0", value=0) - cr_threshold = gr.Number( - label="F0过滤阈值,只有启动crepe时有效. 数值范围从0-1. 降低该值可减少跑调概率,但会增加哑音", value=0.05) - with gr.Tabs(): - with gr.TabItem("音频转音频"): - vc_input3 = gr.Audio(label="选择音频") - vc_submit = gr.Button( - "音频转换", variant="primary") - with gr.TabItem("文字转音频"): - text2tts = gr.Textbox( - label="在此输入要转译的文字。注意,使用该功能建议打开F0预测,不然会很怪") - tts_rate = gr.Number(label="tts语速", value=0) - tts_voice = gr.Radio(label="性别", choices=[ - "男", "女"], value="男") - vc_submit2 = gr.Button( - "文字转换", variant="primary") - with gr.Row(): - with gr.Column(): - vc_output1 = gr.Textbox(label="Output Message") - with gr.Column(): - vc_output2 = gr.Audio( - label="Output Audio", interactive=False) - - vc_submit.click(vc_fn, [sid, vc_input3, vc_transform, auto_f0, cluster_ratio, slice_db, noise_scale, pad_seconds, - cl_num, lg_num, lgr_num, f0_predictor, enhancer_adaptive_key, cr_threshold], [vc_output1, vc_output2]) - vc_submit2.click(vc_fn2, [sid, vc_input3, vc_transform, auto_f0, cluster_ratio, slice_db, noise_scale, pad_seconds, cl_num, - lg_num, lgr_num, text2tts, tts_rate, tts_voice, f0_predictor, enhancer_adaptive_key, cr_threshold], [vc_output1, vc_output2]) - # gr.Examples( - # examples=example, - # inputs=[textbox, char_dropdown, language_dropdown, - # duration_slider, symbol_input], - # outputs=[text_output, audio_output], - # fn=tts_fn - # ) - for category, link in others.items(): - with gr.TabItem(category): - gr.Markdown( - f''' -
        -

        Click to Go

        - - -
        - ''' - ) - - app.queue(concurrency_count=3).launch(show_api=False, share=args.share) diff --git a/spaces/logasja/Fawkes/README.md b/spaces/logasja/Fawkes/README.md deleted file mode 100644 index 785b3ef890246ecc8687ed97193b8e07d27d4c0c..0000000000000000000000000000000000000000 --- a/spaces/logasja/Fawkes/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Fawkes -emoji: 🌖 -colorFrom: red -colorTo: purple -sdk: gradio -sdk_version: 3.0.20 -app_file: app.py -pinned: false -license: gpl-3.0 -python_version: 3.8.13 ---- \ No newline at end of file diff --git a/spaces/lunarflu/HF-QA-Demo-3/data/stackoverflow_python_dataset.py b/spaces/lunarflu/HF-QA-Demo-3/data/stackoverflow_python_dataset.py deleted file mode 100644 index f29c8f1a4282f067bbd5c5a48db66e4d999d19f3..0000000000000000000000000000000000000000 --- a/spaces/lunarflu/HF-QA-Demo-3/data/stackoverflow_python_dataset.py +++ /dev/null @@ -1,55 +0,0 @@ -from datetime import datetime -from datasets import load_dataset -from bs4 import BeautifulSoup - - -def preprocess_dataset(): - """ - Preprocesses the 'koutch/stackoverflow_python' dataset. - - Returns: - datasets.arrow_dataset.Dataset: The preprocessed dataset. - """ - dataset = load_dataset('koutch/stackoverflow_python', split='train') - dataset = dataset.filter( - lambda example: - example['question_score'] > 100 and - example['answer_score'] > 5 and - datetime.strptime(example['answer_date'], '%Y-%m-%dT%H:%M:%SZ').year > 2010 - ) - - def html2text(example): - soup = BeautifulSoup(example, 'html.parser') - return ''.join(soup.findAll(string=True)) - - def transforms(example): - example['answer'] = html2text(example['answer_body']) - example['question'] = html2text(example['question_body']) - return example - - dataset = dataset.map(lambda example: transforms(example)) - dataset = dataset.remove_columns([ - 'question_score', 'question_date', 'question_id', - 'answer_date', 'answer_id', 'answer_score', 'tags', - 'question_body', 'answer_body' - ]) - return dataset - - -def show_info(dataset): - """ - Print information about the dataset. - - Args: - dataset (datasets.arrow_dataset.Dataset): The dataset. - """ - print(dataset.info, '\n') - print(f'dataset len: {len(dataset)}') - print(f"example question: {dataset[0]['question']}") - print(f"example answer: {dataset[0]['answer']}") - - -if __name__ == '__main__': - dataset = preprocess_dataset() - dataset.push_to_hub('KonradSzafer/stackoverflow_python_preprocessed', private=False) - show_info(dataset) diff --git a/spaces/ma-xu/LIVE/pybind11/.github/ISSUE_TEMPLATE/question.md b/spaces/ma-xu/LIVE/pybind11/.github/ISSUE_TEMPLATE/question.md deleted file mode 100644 index b199b6ee8ad446994aed54f67b0d1c22049d53c1..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/pybind11/.github/ISSUE_TEMPLATE/question.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -name: Question -about: File an issue about unexplained behavior -title: "[QUESTION] " ---- - -If you have a question, please check the following first: - -1. Check if your question has already been answered in the [FAQ][] section. -2. Make sure you've read the [documentation][]. Your issue may be addressed there. -3. If those resources didn't help and you only have a short question (not a bug report), consider asking in the [Gitter chat room][] -4. Search the [issue tracker][], including the closed issues, to see if your question has already been asked/answered. +1 or comment if it has been asked but has no answer. -5. If you have a more complex question which is not answered in the previous items (or not suitable for chat), please fill in the details below. -6. Include a self-contained and minimal piece of code that illustrates your question. If that's not possible, try to make the description as clear as possible. - -[FAQ]: http://pybind11.readthedocs.io/en/latest/faq.html -[documentation]: https://pybind11.readthedocs.io -[issue tracker]: https://github.com/pybind/pybind11/issues -[Gitter chat room]: https://gitter.im/pybind/Lobby - -*After reading, remove this checklist.* diff --git a/spaces/ma-xu/LIVE/thrust/thrust/detail/execute_with_dependencies.h b/spaces/ma-xu/LIVE/thrust/thrust/detail/execute_with_dependencies.h deleted file mode 100644 index cb92b1ba2b372d8cba9be817aee2e2db48160dc0..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/detail/execute_with_dependencies.h +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Copyright 2018 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include - -#if THRUST_CPP_DIALECT >= 2011 - -#include -#include - -#include -#include - -namespace thrust -{ -namespace detail -{ - -struct capture_as_dependency_fn -{ - template - auto operator()(Dependency&& dependency) const - THRUST_DECLTYPE_RETURNS(capture_as_dependency(THRUST_FWD(dependency))) -}; - -// Default implementation: universal forwarding. -template -auto capture_as_dependency(Dependency&& dependency) -THRUST_DECLTYPE_RETURNS(THRUST_FWD(dependency)) - -template -auto capture_as_dependency(std::tuple& dependencies) -THRUST_DECLTYPE_RETURNS( - tuple_for_each(THRUST_FWD(dependencies), capture_as_dependency_fn{}) -) - -template class BaseSystem, typename... Dependencies> -struct execute_with_dependencies - : BaseSystem> -{ -private: - using super_t = BaseSystem>; - - std::tuple...> dependencies; - -public: - __host__ - execute_with_dependencies(super_t const &super, Dependencies && ...dependencies) - : super_t(super), dependencies(std::forward(dependencies)...) - { - } - - template - __host__ - execute_with_dependencies(super_t const &super, UDependencies && ...deps) - : super_t(super), dependencies(THRUST_FWD(deps)...) - { - } - - template - __host__ - execute_with_dependencies(UDependencies && ...deps) - : dependencies(THRUST_FWD(deps)...) - { - } - - template - __host__ - execute_with_dependencies(super_t const &super, std::tuple&& deps) - : super_t(super), dependencies(std::move(deps)) - { - } - - template - __host__ - execute_with_dependencies(std::tuple&& deps) - : dependencies(std::move(deps)) - { - } - - std::tuple...> - __host__ - extract_dependencies() - { - return std::move(dependencies); - } - - // Rebinding. - template - __host__ - execute_with_dependencies - rebind_after(UDependencies&& ...udependencies) const - { - return { capture_as_dependency(THRUST_FWD(udependencies))... }; - } - - // Rebinding. - template - __host__ - execute_with_dependencies - rebind_after(std::tuple& udependencies) const - { - return { capture_as_dependency(udependencies) }; - } - template - __host__ - execute_with_dependencies - rebind_after(std::tuple&& udependencies) const - { - return { capture_as_dependency(std::move(udependencies)) }; - } -}; - -template< - typename Allocator, - template class BaseSystem, - typename... Dependencies -> -struct execute_with_allocator_and_dependencies - : BaseSystem< - execute_with_allocator_and_dependencies< - Allocator, - BaseSystem, - Dependencies... - > - > -{ -private: - using super_t = BaseSystem< - execute_with_allocator_and_dependencies< - Allocator, - BaseSystem, - Dependencies... - > - >; - - std::tuple...> dependencies; - Allocator alloc; - -public: - template - __host__ - execute_with_allocator_and_dependencies(super_t const &super, Allocator a, UDependencies && ...deps) - : super_t(super), dependencies(THRUST_FWD(deps)...), alloc(a) - { - } - - template - __host__ - execute_with_allocator_and_dependencies(Allocator a, UDependencies && ...deps) - : dependencies(THRUST_FWD(deps)...), alloc(a) - { - } - - template - __host__ - execute_with_allocator_and_dependencies(super_t const &super, Allocator a, std::tuple&& deps) - : super_t(super), dependencies(std::move(deps)), alloc(a) - { - } - - template - __host__ - execute_with_allocator_and_dependencies(Allocator a, std::tuple&& deps) - : dependencies(std::move(deps)), alloc(a) - { - } - - std::tuple...> - __host__ - extract_dependencies() - { - return std::move(dependencies); - } - - __host__ - typename std::add_lvalue_reference::type - get_allocator() - { - return alloc; - } - - // Rebinding. - template - __host__ - execute_with_allocator_and_dependencies - rebind_after(UDependencies&& ...udependencies) const - { - return { alloc, capture_as_dependency(THRUST_FWD(udependencies))... }; - } - - // Rebinding. - template - __host__ - execute_with_allocator_and_dependencies - rebind_after(std::tuple& udependencies) const - { - return { alloc, capture_as_dependency(udependencies) }; - } - template - __host__ - execute_with_allocator_and_dependencies - rebind_after(std::tuple&& udependencies) const - { - return { alloc, capture_as_dependency(std::move(udependencies)) }; - } -}; - -template class BaseSystem, typename ...Dependencies> -__host__ -std::tuple...> -extract_dependencies(thrust::detail::execute_with_dependencies&& system) -{ - return std::move(system).extract_dependencies(); -} -template class BaseSystem, typename ...Dependencies> -__host__ -std::tuple...> -extract_dependencies(thrust::detail::execute_with_dependencies& system) -{ - return std::move(system).extract_dependencies(); -} - -template class BaseSystem, typename ...Dependencies> -__host__ -std::tuple...> -extract_dependencies(thrust::detail::execute_with_allocator_and_dependencies&& system) -{ - return std::move(system).extract_dependencies(); -} -template class BaseSystem, typename ...Dependencies> -__host__ -std::tuple...> -extract_dependencies(thrust::detail::execute_with_allocator_and_dependencies& system) -{ - return std::move(system).extract_dependencies(); -} - -template -__host__ -std::tuple<> -extract_dependencies(System &&) -{ - return std::tuple<>{}; -} - -} // end detail -} // end thrust - -#endif // THRUST_CPP_DIALECT >= 2011 - diff --git a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/data/transforms.py b/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/data/transforms.py deleted file mode 100644 index d9bbb5fb7daef5edfb425fafb4d67d471b3001e6..0000000000000000000000000000000000000000 --- a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/data/transforms.py +++ /dev/null @@ -1,179 +0,0 @@ -import cv2 -import random -import torch - - -def mod_crop(img, scale): - """Mod crop images, used during testing. - - Args: - img (ndarray): Input image. - scale (int): Scale factor. - - Returns: - ndarray: Result image. - """ - img = img.copy() - if img.ndim in (2, 3): - h, w = img.shape[0], img.shape[1] - h_remainder, w_remainder = h % scale, w % scale - img = img[:h - h_remainder, :w - w_remainder, ...] - else: - raise ValueError(f'Wrong img ndim: {img.ndim}.') - return img - - -def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path=None): - """Paired random crop. Support Numpy array and Tensor inputs. - - It crops lists of lq and gt images with corresponding locations. - - Args: - img_gts (list[ndarray] | ndarray | list[Tensor] | Tensor): GT images. Note that all images - should have the same shape. If the input is an ndarray, it will - be transformed to a list containing itself. - img_lqs (list[ndarray] | ndarray): LQ images. Note that all images - should have the same shape. If the input is an ndarray, it will - be transformed to a list containing itself. - gt_patch_size (int): GT patch size. - scale (int): Scale factor. - gt_path (str): Path to ground-truth. Default: None. - - Returns: - list[ndarray] | ndarray: GT images and LQ images. If returned results - only have one element, just return ndarray. - """ - - if not isinstance(img_gts, list): - img_gts = [img_gts] - if not isinstance(img_lqs, list): - img_lqs = [img_lqs] - - # determine input type: Numpy array or Tensor - input_type = 'Tensor' if torch.is_tensor(img_gts[0]) else 'Numpy' - - if input_type == 'Tensor': - h_lq, w_lq = img_lqs[0].size()[-2:] - h_gt, w_gt = img_gts[0].size()[-2:] - else: - h_lq, w_lq = img_lqs[0].shape[0:2] - h_gt, w_gt = img_gts[0].shape[0:2] - lq_patch_size = gt_patch_size // scale - - if h_gt != h_lq * scale or w_gt != w_lq * scale: - raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ', - f'multiplication of LQ ({h_lq}, {w_lq}).') - if h_lq < lq_patch_size or w_lq < lq_patch_size: - raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size ' - f'({lq_patch_size}, {lq_patch_size}). ' - f'Please remove {gt_path}.') - - # randomly choose top and left coordinates for lq patch - top = random.randint(0, h_lq - lq_patch_size) - left = random.randint(0, w_lq - lq_patch_size) - - # crop lq patch - if input_type == 'Tensor': - img_lqs = [v[:, :, top:top + lq_patch_size, left:left + lq_patch_size] for v in img_lqs] - else: - img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs] - - # crop corresponding gt patch - top_gt, left_gt = int(top * scale), int(left * scale) - if input_type == 'Tensor': - img_gts = [v[:, :, top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size] for v in img_gts] - else: - img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts] - if len(img_gts) == 1: - img_gts = img_gts[0] - if len(img_lqs) == 1: - img_lqs = img_lqs[0] - return img_gts, img_lqs - - -def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False): - """Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees). - - We use vertical flip and transpose for rotation implementation. - All the images in the list use the same augmentation. - - Args: - imgs (list[ndarray] | ndarray): Images to be augmented. If the input - is an ndarray, it will be transformed to a list. - hflip (bool): Horizontal flip. Default: True. - rotation (bool): Ratotation. Default: True. - flows (list[ndarray]: Flows to be augmented. If the input is an - ndarray, it will be transformed to a list. - Dimension is (h, w, 2). Default: None. - return_status (bool): Return the status of flip and rotation. - Default: False. - - Returns: - list[ndarray] | ndarray: Augmented images and flows. If returned - results only have one element, just return ndarray. - - """ - hflip = hflip and random.random() < 0.5 - vflip = rotation and random.random() < 0.5 - rot90 = rotation and random.random() < 0.5 - - def _augment(img): - if hflip: # horizontal - cv2.flip(img, 1, img) - if vflip: # vertical - cv2.flip(img, 0, img) - if rot90: - img = img.transpose(1, 0, 2) - return img - - def _augment_flow(flow): - if hflip: # horizontal - cv2.flip(flow, 1, flow) - flow[:, :, 0] *= -1 - if vflip: # vertical - cv2.flip(flow, 0, flow) - flow[:, :, 1] *= -1 - if rot90: - flow = flow.transpose(1, 0, 2) - flow = flow[:, :, [1, 0]] - return flow - - if not isinstance(imgs, list): - imgs = [imgs] - imgs = [_augment(img) for img in imgs] - if len(imgs) == 1: - imgs = imgs[0] - - if flows is not None: - if not isinstance(flows, list): - flows = [flows] - flows = [_augment_flow(flow) for flow in flows] - if len(flows) == 1: - flows = flows[0] - return imgs, flows - else: - if return_status: - return imgs, (hflip, vflip, rot90) - else: - return imgs - - -def img_rotate(img, angle, center=None, scale=1.0): - """Rotate image. - - Args: - img (ndarray): Image to be rotated. - angle (float): Rotation angle in degrees. Positive values mean - counter-clockwise rotation. - center (tuple[int]): Rotation center. If the center is None, - initialize it as the center of the image. Default: None. - scale (float): Isotropic scale factor. Default: 1.0. - """ - (h, w) = img.shape[:2] - - if center is None: - center = (w // 2, h // 2) - - matrix = cv2.getRotationMatrix2D(center, angle, scale) - rotated_img = cv2.warpAffine(img, matrix, (w, h)) - return rotated_img diff --git a/spaces/manhkhanhUIT/BOPBTL/Global/options/__init__.py b/spaces/manhkhanhUIT/BOPBTL/Global/options/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/manhkhanhUIT/BOPBTL/Global/util/util.py b/spaces/manhkhanhUIT/BOPBTL/Global/util/util.py deleted file mode 100644 index b1369c3b568548a1c21d3412aef5fd35c9b0c5be..0000000000000000000000000000000000000000 --- a/spaces/manhkhanhUIT/BOPBTL/Global/util/util.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from __future__ import print_function -import torch -import numpy as np -from PIL import Image -import numpy as np -import os -import torch.nn as nn - -# Converts a Tensor into a Numpy array -# |imtype|: the desired type of the converted numpy array -def tensor2im(image_tensor, imtype=np.uint8, normalize=True): - if isinstance(image_tensor, list): - image_numpy = [] - for i in range(len(image_tensor)): - image_numpy.append(tensor2im(image_tensor[i], imtype, normalize)) - return image_numpy - image_numpy = image_tensor.cpu().float().numpy() - if normalize: - image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 - else: - image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0 - image_numpy = np.clip(image_numpy, 0, 255) - if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3: - image_numpy = image_numpy[:, :, 0] - return image_numpy.astype(imtype) - - -# Converts a one-hot tensor into a colorful label map -def tensor2label(label_tensor, n_label, imtype=np.uint8): - if n_label == 0: - return tensor2im(label_tensor, imtype) - label_tensor = label_tensor.cpu().float() - if label_tensor.size()[0] > 1: - label_tensor = label_tensor.max(0, keepdim=True)[1] - label_tensor = Colorize(n_label)(label_tensor) - label_numpy = np.transpose(label_tensor.numpy(), (1, 2, 0)) - return label_numpy.astype(imtype) - - -def save_image(image_numpy, image_path): - image_pil = Image.fromarray(image_numpy) - image_pil.save(image_path) - - -def mkdirs(paths): - if isinstance(paths, list) and not isinstance(paths, str): - for path in paths: - mkdir(path) - else: - mkdir(paths) - - -def mkdir(path): - if not os.path.exists(path): - os.makedirs(path) diff --git a/spaces/matthoffner/chatbot/components/Promptbar/index.ts b/spaces/matthoffner/chatbot/components/Promptbar/index.ts deleted file mode 100644 index e3f6b39ba93a88cd450cb5c67025a8f29ee5fcc4..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/chatbot/components/Promptbar/index.ts +++ /dev/null @@ -1 +0,0 @@ -export { default } from './Promptbar'; diff --git a/spaces/merle/PROTEIN_GENERATOR/examples/weighted_sequence.sh b/spaces/merle/PROTEIN_GENERATOR/examples/weighted_sequence.sh deleted file mode 100644 index 779abda1d21255ed8d68198f72ac55729839be26..0000000000000000000000000000000000000000 --- a/spaces/merle/PROTEIN_GENERATOR/examples/weighted_sequence.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -#SBATCH -J seq_diff -#SBATCH -p gpu -#SBATCH --mem=8g -#SBATCH --gres=gpu:a6000:1 -#SBATCH -o ./out/slurm/slurm_%j.out - -source activate /software/conda/envs/SE3nv - -srun python ../inference.py \ - --num_designs 10 \ - --out out/design \ - --contigs 100 \ - --T 25 --save_best_plddt \ - --aa_composition W0.2 --potential_scale 1.75 diff --git a/spaces/merve/fill-in-the-blank/public/third_party/params.js b/spaces/merve/fill-in-the-blank/public/third_party/params.js deleted file mode 100644 index 8b4b8b39bb932ef3d7784445c6e9e5fc04b12841..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/public/third_party/params.js +++ /dev/null @@ -1,22 +0,0 @@ -window.makeParams = function(){ - var url = new URL(window.location) - var searchParams = new URLSearchParams(url.search) - - var rv = {} - - rv.get = key => { - return searchParams.get(key) - } - - rv.set = (key, value) => { - searchParams.set(key, value) - - url.search = searchParams.toString() - history.replaceState(null, '', url) - } - - return rv -} - - -if (window.init) init() \ No newline at end of file diff --git a/spaces/merve/fill-in-the-blank/source/_posts/2022-01-28-dataset-worldviews.md b/spaces/merve/fill-in-the-blank/source/_posts/2022-01-28-dataset-worldviews.md deleted file mode 100644 index 67698648fc4d268a46f0b7f91c3c954b8508eb92..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/source/_posts/2022-01-28-dataset-worldviews.md +++ /dev/null @@ -1,194 +0,0 @@ ---- -permalink: /dataset-worldviews/ -template: post.html - -title: Datasets Have Worldviews -summary: Every dataset communicates a different perspective. When you shift your perspective, your conclusions can shift, too. -summaryalt: Every dataset communicates a different perspective. When you shift your perspective, your conclusions can shift, too. -shareimg: https://pair.withgoogle.com/explorables/images/dataset-worldviews-shareimg.png -date: 2022-01-28 ---- - - -

        Suppose you have a dataset of shapes. They can either be shaded or unshaded. They look something like this:

        - -
        - -

        You built a supervised machine learning classifier that will automatically classify each shape as shaded or unshaded. You call it the "Is-Shaded Classifier".

        - -

        Click "Run Classifier" to see how your model performs.

        -

        -
        -
        -
        - -

        It’s not perfect— some of the shapes are definitely misclassified. You want to improve your model!

        - -

        To do so, you want to know more about the kinds of mistakes your model is making.

        - -

        Thinking About Bias

        - -

        In training, you only gave your model the raw image of each shape and one ground truth label: shaded and unshaded. But maybe something about your model—the distribution of the training data you used, the architecture you chose, or how you set your hyperparameters—resulted in your model performing better on some shapes than others.

        - -

        In fact, you’ve seen a lot of papers and articles citing issues of biased model performance between circles, triangles, and rectangles in shape data. One paper finds that shape detection algorithms tend to do worse on triangles; another article says color accuracy is an issue with circles. So you wonder: are there biases in your model’s misclassifications?

        - -
        Three abstract drawings of papers or articles with headlines 'Shape detection: biased against triangles?', 'Geometry experts call for more accurate rectangle data, cite fairness concerns', and 'Increasing color accuracy in circles'
        - -

        You want to make sure that your model is performing equally well across circles, triangles, and rectangles, so you decide to do a fairness analysis.

        - -

        There’s just one issue: you don’t have labels for which of your shapes are circles, triangles, or rectangles.

        - -

        So, you decide to send your data to data labelers.

        - -
        Different shapes with an arrow pointing to a group of abstract people.
        - -

        You receive feedback from your data labeling team that they’re not sure what to do with the shapes that aren’t exactly circles, triangles, or rectangles.

        - -
        An image of a computer interface and the instructions 'Please select the name of the shape below'. There is a lumpy, blob-like shape with three checkboxes that say 'circle', 'triangle', and 'rectangle'. There is a text box with a question mark next to the interface.
        - -

        For the shapes that are unclear, you can have them use their best guess or simply label them as “other”. Then, you can finally do some fairness analysis!

        - -

        Below is the interface they see:

        - -
        - -

        These shapes should be labeled...

        -
        - -
        - -
        - -

        If you go back and change the labelers' instructions, which shapes do you perform worst on? Where do you find bias?

        - -

        You notice that your results hinge on how you choose to classify the shapes in our data.

        - -

        Because ultimately, this isn’t a world of only circles, triangles, and rectangles!

        - -

        Thinking About Classification

        - -

        What could we find out about our classifier's performance if we used different categories altogether?

        - -

        All shapes are basically...

        -

        Everything else should be labeled...

        - -

        -

        -

        -

        - -

        With each of the different categories, which shapes do you perform worst on? Where do you find bias?

        - -

        Each way of categorizing your shapes takes a different stance about what’s important . Each one makes some features more important than others, it make some distinctions visible and other distinctions invisible, and make some things easy to classify while others become outliers.

        - -

        And each one tells you something different about what kind of bias your classifier has!

        - -

        Grouping and Regrouping

        - -

        Here's another way to look at the same results. We can draw all the shapes that were correctly classified above the dashed line, and all the incorrectly classified shapes below it.

        - -
        - -

        We're still looking at the same model making the same classification on the same shapes, so the same shapes stay above and below the line. But each way of grouping the results distributes the errors differently— each way tells you something different.

        - -

        Labels Tell Stories

        - -

        The decisions you make about classification, however small…

        - -

        All shapes are basically...

        - -

        …begin to shape others’ decisions…

        - -
        - -

        …they shape the analysis you can do…

        - -
        - -

        …and they shape the kinds of conversations that happen.

        - -

        - -

        It’s natural to want to find a way out of this problem by gathering more features or collecting more data. If we just have enough detail on enough data, surely we can avoid making these kinds of decisions, right?

        - -

        Unfortunately, that isn’t the case. Describing the world around us in any way—whether we’re telling a friend a story or telling a computer about shapes—requires us to choose what information is important to convey and what tools we want to use to convey it.

        - -

        Whether we think about it or not, we’re always making choices about classification. -

        - -

        All people are basically... men or women

        -

        All food is basically... sweet or savory

        -

        All content is basically... kid-friendly or adult

        -

        All speech is basically... hate speech or acceptable speech

        - -

        All results are basically... significant or insignificant

        - -

        And as we saw with shapes, all of these choices make some features more important than others, make some distinctions visible and other distinctions invisible, and make some things easy to classify while others become outliers.

        - -

        In Practice

        - -

        Let’s take a closer look at how this plays out in real machine learning applications. One straightforward example is in supervised object detection tasks.

        - - -

        For example, let’s imagine we want to train an object detection model on a dataset including this image:

        - -

        Image of the Seattle skyline
        Source: Wikimedia Commons

        - -

        We could give it the following ground truth bounding boxes:

        - -

        Image of the Seattle skyline with boxes around several items in the picture with labels like 'building' and 'tree'.

        - -

        This looks objective, right? After all, a building is a building, a bush is a bush, and a mountain is a mountain!

        -

        But even labeling the same regions in the same image, you can communicate a very different perspective:

        - -

        Image of the Seattle skyline with boxes around several items in the picture, with labels like 'plant, non medicinal' and 'structure, nonreligious'.

        - -

        Or consider the image below, with several sets of “ground truth” labels. Looking at each of these labels, consider:

        - -

        What features matter? What gets labeled? Whose worldview comes through? What might you learn from this set of labels that you wouldn't learn from another?

        - -
        Source: Wikimedia Commons
        - -

        There is no “view from nowhere”, no universal way to organize every object, or word, or image. Datasets are always products of a particular time, place, and set of conditions; they are socially situated artifacts. They have histories; they have politics. And ignoring this fact has very real consequences.

        - -

        So what do we do with this information?

        - -

        A great place to start is to reflect on your own context and get curious about your data.

        - -

        If it’s hard to see a dataset’s values—if it feels “objective”, “universal”, or “neutral”—it may simply be reflecting a worldview you’re accustomed to. So, understanding the limitations of your own worldview can tell you about the limitations of “objective” data. What assumptions do you make about the world? What feels like common sense? What feels foreign?

        - -

        And do some sleuthing about your data! Who collected this data? Why was it collected? Who paid for it? Where did the “ground truth” come from?

        - -

        You might even find yourself questioning what kinds of assumptions underpin machine learning dataset development or even thinking more deeply about classification as a whole.

        - -

        If you find yourself with lots of questions, you're already off to a good start.

        - -

        -

        - -

        Credits

        - -

        Dylan Baker // January 2022

        -

        Thanks to Adam Pearce, Alex Hanna, Emily Denton, Fernanda Viégas, Kevin Robinson, Nithum Thain, Razvan Amironesei, and Vinodkumar Prabhakaran for their help with this piece.

        -

        - - - - - -

        More Explorables

        -

        -

        - - - - - - - - - - - - - diff --git a/spaces/merve/fill-in-the-blank/source/measuring-diversity/script.js b/spaces/merve/fill-in-the-blank/source/measuring-diversity/script.js deleted file mode 100644 index 002fb32c0d0ee11cf292109725ebda6a2a4b57a4..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/source/measuring-diversity/script.js +++ /dev/null @@ -1,360 +0,0 @@ -// Seeded random number generator -window.random = new Math.seedrandom('aaaa') -window.randomIndex = new Math.seedrandom('7b') - -window.numRows = 20 -window.shapes = window.shapes || d3.range(21).map(i => randomShape(i, random)) - -window.random2 = new Math.seedrandom('7') -// window.columnShapes = window.columnShapes || d3.range(window.numRows).map(i => d3.range(10).map(i =>randomShape(i, random2))) -window.columnShapes = d3.range(window.numRows).map(i => d3.range(10).map(i =>randomShape(i, random2, true))) - -console.log(window.random3) -function randomShape(i, random, colTargets){ - var color2fill = { - green: '#5A9F8A', - orange: '#DF831F', - blue: '#80BAD4', - } - - var randomItem = function(arr) { - const index = Math.abs(random.int32()) % arr.length - return arr[index] - } - - var color = randomItem(d3.keys(color2fill)) - var size = randomItem(['small', 'large']) - var shape = randomItem(['circle', 'square', 'triangle']) - - if (colTargets && (i == 4 || i == 5)){ - color = 'green' - } - if (colTargets && (i == 4 || i == 15)){ - size = 'small' - } - if (colTargets && (i == 3 || i == 5)){ - shape = 'triangle' - } - - var displayIndex = randomIndex() - - return { - i, - displayIndex, - color, - fill: color2fill[color], - dFill: d3.color(color2fill[color]).darker(1), - size, - sizeVal: size == 'large' ? 1 : .4, - shape, - } -} - -var metrics = [ - { - str: 'Greens', - key: 'green', - field: 'color', - target: .3 - }, - { - str: 'Dot', - key: 'triangle', - field: 'shape', - target: .35 - }, - { - str: 'Smalls', - key: 'small', - field: 'size', - target: .60 - }, -] -window.metrics1 = metrics.map(d => ({...d})) -metrics1[2].target = .5 -window.metrics2 = metrics1.map(d => ({...d})) -metrics2[0].target = 1 - -metrics.forEach(d => { - d.scoreScale = d3.scaleLinear().domain([0, d.target, 1]).range([0, 1, 0]) -}) - - -var pctFmt = d3.format('.0%') -function addMetrics(metrics, {active, topSel, isSmall}){ - var metricSel = topSel - .st({textAlign: 'center'}) - .appendMany('div', metrics) - .st({textAlign: 'center', width: 200, display: 'inline-block'}) - - var width = 120 - - var svg = metricSel.append('svg') - .at({width: 120, height: 100}) - .append('g') - .translate([.5, 40.5]) - - if (isSmall){ - svg.translate((d, i) => [i ? -20.5 : 20.5, 40.5]) - } - - - var xScale = d3.scaleLinear().rangeRound([0, width]) - - var topText = svg.append('text') - .at({y: -20, fontWeight: 500, textAnchor: 'middle', x: width/2}) - - svg.append('path') - .at({d: 'M 0 0 H ' + width, stroke: '#000'}) - - var topTick = svg.append('path') - .at({d: 'M 0 0 V -12.5', stroke: '#000', strokeWidth: 3}) - - - var actualSel = svg.append('g').st({fill: highlightColor}) - - actualSel.append('path') - .at({d: 'M 0 0 V 12.5', stroke: highlightColor, strokeWidth: 3}) - - var actualPct = actualSel.append('text') - .translate(30, 1).at({textAnchor: 'middle'}).st({fontWeight: 300}) - - var actualScore = actualSel.append('text') - .translate(50, 1).at({textAnchor: 'middle'}).st({fontWeight: 300}) - - return () => { - var pcts = metrics.map(d => active.percents[d.key] || 0) - - topText.text(d => (d.str + ' Target: ').replace('s ', ' ') + pctFmt(d.target)) - - topTick.translate(d => xScale(d.target), 0) - actualSel.translate((d, i) => xScale(pcts[i]), 0) - - actualPct.text((d, i) => 'Actual: ' + pctFmt(pcts[i])) - actualScore.text((d, i) => 'Difference: ' + pctFmt(Math.abs(d.target - pcts[i]))) - } -} - - -function scoreActive(active){ - var numActive = d3.sum(active) - return metrics.map(m => { - var v = d3.sum(active, (d, i) => active[i] && shapes[i][m.field] == m.key) - return Math.abs(m.target - v/numActive); - // return m.scoreScale(v/numActive || 0) - }) -} - -var measures = [ - { - str: 'Utilitarian', - display_text: 'Minimize Mean Difference', - ranking_display_text: 'Mean Difference', - fn: s => d3.mean(s)*100, - ppFn: s => d3.format('.2%')(d3.mean(s)), - format: s => 'mean(' + s.map(d => d + '%').join(', ') + ')' - }, - { - str: 'Egalitarian', - display_text: 'Minimize Max Difference', - ranking_display_text: 'Max Difference', - fn: s => { - var srt = _.sortBy(s).map(d => Math.round(d*100)).reverse() - - return srt[0]*100000000 + srt[1]*10000 + srt[2] - }, - ppFn: s => { - var srt = _.sortBy(s).map(d => Math.round(d*100)).reverse() - - return srt[0] + '%' - }, - format: s => 'max(' + s.map(d => d + '%').join(', ') + ')' - } -] -measures2 = measures.map(d => ({...d})) - - -var randomActive = d3.range(10000).map(d => { - var active = shapes.map(d => random() < .3) - - if (d == 0) active = '111111111111101011100'.split('').map(d => +d) - - active.score = scoreActive(active) - measures.forEach(d => { - active[d.str] = d.fn(active.score) - }) - - return active -}) - -function addMetricBestButton(metricIndex, {active, sel, render}){ - var measureSel = sel - .append('div').st({textAlign: 'center', marginTop: 20, marginBottom: -20}) - .append('div.measure').st({width: 200, lineHeight: '1.8em', display: 'inline-block'}) - .html('Show Best') - .on('click', d => { - - // console.log(active) - var pcts = metrics.map(d => active.percents[d.key] || 0) - if (pcts[metricIndex] == metrics[metricIndex].target) return - - var nextActive = _.minBy(randomActive, a => a.score[metricIndex]) - active.forEach((d, i) => active[i] = nextActive[i]) - - measureSel.classed('active', e => e == d) - render() - }) -} - -function addMeasures(measures, {active, sel, render}){ - var measureSel = sel.selectAll('div.measure-container') - - measureSel - .append('div.measure') - .st({width: 200, lineHeight: '1.8em', display: 'inline-block', textAlign: 'center', }) - .html((d, i) => i ? 'Show the set where the highest difference is the smallest' : 'Show the set with
        lowest mean difference') - .html('Show Best') - .on('click', d => { - - var nextActive = _.minBy(randomActive, a => a[d.str]) - active.forEach((d, i) => active[i] = nextActive[i]) - - measureSel.classed('active', e => e == d) - render() - }) - - -} - -function addTotalMetrics(metrics, measures, {active, sel, render}){ - var metricSel = sel.classed('bot', 1).st({textAlign: 'center'}) - .appendMany('div.measure-container', measures) - .append('div', measures) - .st({textAlign: 'center', display: 'inline-block'}) - - - var headlineSel = metricSel.append('div') - var calcSel = metricSel.append('div')//.st({color: highlightColor}) - - return () => { - - measures.forEach(d => { - d.scores = scoreActive(active) - - d.score = Math.round(d.fn(d.scores)*100)/100 - if (d.ppFn) d.score = d.ppFn(d.scores) - }) - - headlineSel.st({fontWeight: 600}) - .text(d => d.ranking_display_text + ': ' + d.score) - - calcSel.text(d => { - var roundedScores = d.scores.map(s => Math.round(s * 100)) - - return d.format(roundedScores) - }) - } -} - - -window.shapeRandom = new Math.seedrandom('aaf') -var defaultActive = shapes.map(d => shapeRandom() < .4) -drawShape('all-shapes') - -drawShape('pick-green', ({active, topSel, sel, render}) => { - active.forEach((d, i) => active[i] = defaultActive[i]) - addMetricBestButton(0, {active, sel, render}) - return addMetrics(metrics.filter(d => d.key == 'green'), {active, topSel}) -}) - -drawShape('pick-triangle', ({active, topSel, sel, render}) => { - active.forEach((d, i) => active[i] = defaultActive[i]) - addMetricBestButton(1, {active, sel, render}) - return addMetrics(metrics.filter(d => d.key == 'triangle'), {active, topSel}) -}) - -drawShape('pick-metric', grid => { - grid.active.forEach((d, i) => grid.active[i] = defaultActive[i]) - - var metricRender = addMetrics(metrics, grid) - var totalMetricRender = addTotalMetrics(metrics, measures, grid) - addMeasures(measures, grid) - - return () => { - metricRender() - totalMetricRender() - } -}) - - -function drawShape(id, initFn=d => e => e){ - var active = shapes.map(d => true) - - var sel = d3.select('#' + id).html('') - - var s = 110 - - var topSel = sel.append('div.top') - var shapeSel = sel.appendMany('div.shape', _.sortBy(shapes, d => d.displayIndex)) - .st({width: s, height: s}) - .on('click', d => { - active[d.i] = !active[d.i] - render() - }) - - shapeSel.append('svg') - .at({width: s, height: s}) - .append('g').translate([s/2, s/2]) - .each(function(d){ - if (d.shape == 'square' || true){ - var rs = Math.round(d.sizeVal*s/3.5) - var shapeSel = d3.select(this).append('rect') - .at({x: -rs, y: -rs, width: rs*2, height: rs*2}) - } else if (d.shape == 'circle'){ - var shapeSel = d3.select(this).append('circle') - .at({r: d.sizeVal*s/3}) - } else if (d.shape == 'triangle'){ - var rs = Math.round(d.sizeVal*s/2.9) - var shapeSel = d3.select(this).append('path') - .translate(rs*Math.pow(3,1/2)/10, 1) - .at({d: [ - 'M', 0, -rs, - 'L', -rs*Math.pow(3,1/2)/2, rs/2, - 'L', +rs*Math.pow(3,1/2)/2, rs/2, - 'Z' - ].join(' ')}) - } - - if (d.shape == 'triangle'){ - d3.select(this).append('circle') - .at({r: 4, fill: '#fff', stroke: '#000', strokeWidth: 1}) - } - - shapeSel.at({fill: d.fill, stroke: d.dFill, strokeWidth: 2}) - }) - - var customRender = initFn({active, topSel, sel, render}) - - shapes.render = render - function render(){ - shapeSel.classed('active', d => active[d.i]) - // console.log(active.map(d => +d).join('')) - - active.percents = {} - active.shapes = shapes.filter(d => active[d.i]) - - d3.nestBy(active.shapes, d => d.color).forEach(d => { - active.percents[d.key] = d.length/active.shapes.length - }) - d3.nestBy(active.shapes, d => d.size).forEach(d => { - active.percents[d.key] = d.length/active.shapes.length - }) - d3.nestBy(active.shapes, d => d.shape).forEach(d => { - active.percents[d.key] = d.length/active.shapes.length - }) - - - customRender() - } - render() -} \ No newline at end of file diff --git a/spaces/metricspace/OcTra/df_local/enhance.py b/spaces/metricspace/OcTra/df_local/enhance.py deleted file mode 100644 index eead3f3ef9855285d8e5f37793e3a8e1a2d08a14..0000000000000000000000000000000000000000 --- a/spaces/metricspace/OcTra/df_local/enhance.py +++ /dev/null @@ -1,333 +0,0 @@ -import argparse -import os -import time -import warnings -from typing import Optional, Tuple, Union - -import torch -import torchaudio as ta -from loguru import logger -from numpy import ndarray -from torch import Tensor, nn -from torch.nn import functional as F -from torchaudio.backend.common import AudioMetaData - -import df_local -from df_local import config -from df_local.checkpoint import load_model as load_model_cp -from df_local.logger import init_logger, warn_once -from df_local.model import ModelParams -from df_local.modules import get_device -from df_local.utils import as_complex, as_real, get_norm_alpha, resample -from libdf import DF, erb, erb_norm, unit_norm - - -def main(args): - model, df_state, suffix = init_df( - args.model_base_dir, - post_filter=args.pf, - log_level=args.log_level, - config_allow_defaults=True, - epoch=args.epoch, - ) - if args.output_dir is None: - args.output_dir = "." - elif not os.path.isdir(args.output_dir): - os.mkdir(args.output_dir) - df_sr = ModelParams().sr - n_samples = len(args.noisy_audio_files) - for i, file in enumerate(args.noisy_audio_files): - progress = (i + 1) / n_samples * 100 - audio, meta = load_audio(file, df_sr) - t0 = time.time() - audio = enhance( - model, df_state, audio, pad=args.compensate_delay, atten_lim_db=args.atten_lim - ) - t1 = time.time() - t_audio = audio.shape[-1] / df_sr - t = t1 - t0 - rtf = t / t_audio - fn = os.path.basename(file) - p_str = f"{progress:2.0f}% | " if n_samples > 1 else "" - logger.info(f"{p_str}Enhanced noisy audio file '{fn}' in {t:.1f}s (RT factor: {rtf:.3f})") - audio = resample(audio, df_sr, meta.sample_rate) - save_audio( - file, audio, sr=meta.sample_rate, output_dir=args.output_dir, suffix=suffix, log=False - ) - - -def init_df( - model_base_dir: Optional[str] = None, - post_filter: bool = False, - log_level: str = "INFO", - log_file: Optional[str] = "enhance.log", - config_allow_defaults: bool = False, - epoch: Union[str, int, None] = "best", - default_model: str = "DeepFilterNet2", -) -> Tuple[nn.Module, DF, str]: - """Initializes and loads config, model and deep filtering state. - - Args: - model_base_dir (str): Path to the model directory containing checkpoint and config. If None, - load the pretrained DeepFilterNet2 model. - post_filter (bool): Enable post filter for some minor, extra noise reduction. - log_level (str): Control amount of logging. Defaults to `INFO`. - log_file (str): Optional log file name. None disables it. Defaults to `enhance.log`. - config_allow_defaults (bool): Whether to allow initializing new config values with defaults. - epoch (str): Checkpoint epoch to load. Options are `best`, `latest`, ``, and `none`. - `none` disables checkpoint loading. Defaults to `best`. - - Returns: - model (nn.Modules): Intialized model, moved to GPU if available. - df_state (DF): Deep filtering state for stft/istft/erb - suffix (str): Suffix based on the model name. This can be used for saving the enhanced - audio. - """ - try: - from icecream import ic, install - - ic.configureOutput(includeContext=True) - install() - except ImportError: - pass - use_default_model = False - if model_base_dir == "DeepFilterNet": - default_model = "DeepFilterNet" - use_default_model = True - elif model_base_dir == "DeepFilterNet2": - use_default_model = True - if model_base_dir is None or use_default_model: - use_default_model = True - model_base_dir = os.path.relpath( - os.path.join( - os.path.dirname(df_local.__file__), os.pardir, "pretrained_models", default_model - ) - ) - if not os.path.isdir(model_base_dir): - raise NotADirectoryError("Base directory not found at {}".format(model_base_dir)) - log_file = os.path.join(model_base_dir, log_file) if log_file is not None else None - init_logger(file=log_file, level=log_level, model=model_base_dir) - if use_default_model: - logger.info(f"Using {default_model} model at {model_base_dir}") - config.load( - os.path.join(model_base_dir, "config.ini"), - config_must_exist=True, - allow_defaults=config_allow_defaults, - allow_reload=True, - ) - if post_filter: - config.set("mask_pf", True, bool, ModelParams().section) - logger.info("Running with post-filter") - p = ModelParams() - df_state = DF( - sr=p.sr, - fft_size=p.fft_size, - hop_size=p.hop_size, - nb_bands=p.nb_erb, - min_nb_erb_freqs=p.min_nb_freqs, - ) - checkpoint_dir = os.path.join(model_base_dir, "checkpoints") - load_cp = epoch is not None and not (isinstance(epoch, str) and epoch.lower() == "none") - if not load_cp: - checkpoint_dir = None - try: - mask_only = config.get("mask_only", cast=bool, section="train") - except KeyError: - mask_only = False - model, epoch = load_model_cp(checkpoint_dir, df_state, epoch=epoch, mask_only=mask_only) - if (epoch is None or epoch == 0) and load_cp: - logger.error("Could not find a checkpoint") - exit(1) - logger.debug(f"Loaded checkpoint from epoch {epoch}") - model = model.to(get_device()) - # Set suffix to model name - suffix = os.path.basename(os.path.abspath(model_base_dir)) - if post_filter: - suffix += "_pf" - logger.info("Model loaded") - return model, df_state, suffix - - -def df_features(audio: Tensor, df: DF, nb_df: int, device=None) -> Tuple[Tensor, Tensor, Tensor]: - spec = df.analysis(audio.numpy()) # [C, Tf] -> [C, Tf, F] - a = get_norm_alpha(False) - erb_fb = df.erb_widths() - with warnings.catch_warnings(): - warnings.simplefilter("ignore", UserWarning) - erb_feat = torch.as_tensor(erb_norm(erb(spec, erb_fb), a)).unsqueeze(1) - spec_feat = as_real(torch.as_tensor(unit_norm(spec[..., :nb_df], a)).unsqueeze(1)) - spec = as_real(torch.as_tensor(spec).unsqueeze(1)) - if device is not None: - spec = spec.to(device) - erb_feat = erb_feat.to(device) - spec_feat = spec_feat.to(device) - return spec, erb_feat, spec_feat - - -def load_audio( - file: str, sr: Optional[int], verbose=True, **kwargs -) -> Tuple[Tensor, AudioMetaData]: - """Loads an audio file using torchaudio. - - Args: - file (str): Path to an audio file. - sr (int): Optionally resample audio to specified target sampling rate. - **kwargs: Passed to torchaudio.load(). Depends on the backend. The resample method - may be set via `method` which is passed to `resample()`. - - Returns: - audio (Tensor): Audio tensor of shape [C, T], if channels_first=True (default). - info (AudioMetaData): Meta data of the original audio file. Contains the original sr. - """ - ikwargs = {} - if "format" in kwargs: - ikwargs["format"] = kwargs["format"] - rkwargs = {} - if "method" in kwargs: - rkwargs["method"] = kwargs.pop("method") - info: AudioMetaData = ta.info(file, **ikwargs) - audio, orig_sr = ta.load(file, **kwargs) - if sr is not None and orig_sr != sr: - if verbose: - warn_once( - f"Audio sampling rate does not match model sampling rate ({orig_sr}, {sr}). " - "Resampling..." - ) - audio = resample(audio, orig_sr, sr, **rkwargs) - return audio, info - - -def save_audio( - file: str, - audio: Union[Tensor, ndarray], - sr: int, - output_dir: Optional[str] = None, - suffix: Optional[str] = None, - log: bool = False, - dtype=torch.int16, -): - outpath = file - if suffix is not None: - file, ext = os.path.splitext(file) - outpath = file + f"_{suffix}" + ext - if output_dir is not None: - outpath = os.path.join(output_dir, os.path.basename(outpath)) - if log: - logger.info(f"Saving audio file '{outpath}'") - audio = torch.as_tensor(audio) - if audio.ndim == 1: - audio.unsqueeze_(0) - if dtype == torch.int16 and audio.dtype != torch.int16: - audio = (audio * (1 << 15)).to(torch.int16) - if dtype == torch.float32 and audio.dtype != torch.float32: - audio = audio.to(torch.float32) / (1 << 15) - ta.save(outpath, audio, sr) - - -@torch.no_grad() -def enhance( - model: nn.Module, df_state: DF, audio: Tensor, pad=False, atten_lim_db: Optional[float] = None -): - model.eval() - bs = audio.shape[0] - if hasattr(model, "reset_h0"): - model.reset_h0(batch_size=bs, device=get_device()) - orig_len = audio.shape[-1] - n_fft, hop = 0, 0 - if pad: - n_fft, hop = df_state.fft_size(), df_state.hop_size() - # Pad audio to compensate for the delay due to the real-time STFT implementation - audio = F.pad(audio, (0, n_fft)) - nb_df = getattr(model, "nb_df", getattr(model, "df_bins", ModelParams().nb_df)) - spec, erb_feat, spec_feat = df_features(audio, df_state, nb_df, device=get_device()) - enhanced = model(spec, erb_feat, spec_feat)[0].cpu() - enhanced = as_complex(enhanced.squeeze(1)) - if atten_lim_db is not None and abs(atten_lim_db) > 0: - lim = 10 ** (-abs(atten_lim_db) / 20) - enhanced = as_complex(spec.squeeze(1)) * lim + enhanced * (1 - lim) - audio = torch.as_tensor(df_state.synthesis(enhanced.numpy())) - if pad: - # The frame size is equal to p.hop_size. Given a new frame, the STFT loop requires e.g. - # ceil((n_fft-hop)/hop). I.e. for 50% overlap, then hop=n_fft//2 - # requires 1 additional frame lookahead; 75% requires 3 additional frames lookahead. - # Thus, the STFT/ISTFT loop introduces an algorithmic delay of n_fft - hop. - assert n_fft % hop == 0 # This is only tested for 50% and 75% overlap - d = n_fft - hop - audio = audio[:, d : orig_len + d] - return audio - - -def parse_epoch_type(value: str) -> Union[int, str]: - try: - return int(value) - except ValueError: - assert value in ("best", "latest") - return value - - -def setup_df_argument_parser(default_log_level: str = "INFO") -> argparse.ArgumentParser: - parser = argparse.ArgumentParser() - parser.add_argument( - "--model-base-dir", - "-m", - type=str, - default=None, - help="Model directory containing checkpoints and config. " - "To load a pretrained model, you may just provide the model name, e.g. `DeepFilterNet`. " - "By default, the pretrained DeepFilterNet2 model is loaded.", - ) - parser.add_argument( - "--pf", - help="Post-filter that slightly over-attenuates very noisy sections.", - action="store_true", - ) - parser.add_argument( - "--output-dir", - "-o", - type=str, - default=None, - help="Directory in which the enhanced audio files will be stored.", - ) - parser.add_argument( - "--log-level", - type=str, - default=default_log_level, - help="Logger verbosity. Can be one of (debug, info, error, none)", - ) - parser.add_argument("--debug", "-d", action="store_const", const="DEBUG", dest="log_level") - parser.add_argument( - "--epoch", - "-e", - default="best", - type=parse_epoch_type, - help="Epoch for checkpoint loading. Can be one of ['best', 'latest', ].", - ) - return parser - - -def run(): - parser = setup_df_argument_parser() - parser.add_argument( - "--compensate-delay", - "-D", - action="store_true", - help="Add some paddig to compensate the delay introduced by the real-time STFT/ISTFT implementation.", - ) - parser.add_argument( - "--atten-lim", - "-a", - type=int, - default=None, - help="Attenuation limit in dB by mixing the enhanced signal with the noisy signal.", - ) - parser.add_argument( - "noisy_audio_files", - type=str, - nargs="+", - help="List of noise files to mix with the clean speech file.", - ) - main(parser.parse_args()) - - -if __name__ == "__main__": - run() diff --git a/spaces/mikeee/radiobee-aligner/tests/test_main.py b/spaces/mikeee/radiobee-aligner/tests/test_main.py deleted file mode 100644 index 7dbddfcbc75dccefe9e4f49e8609aa5bf71e675f..0000000000000000000000000000000000000000 --- a/spaces/mikeee/radiobee-aligner/tests/test_main.py +++ /dev/null @@ -1,36 +0,0 @@ -"""Test __main__.py.""" -# pylint: disable=invalid-name -import tempfile -from fastlid import fastlid - -from logzero import logger - -# globals()["file2text"] = getattr(importlib.import_module(f"{radiobee.__name__}.file2text"), "file2text") -# from radiobee.process_upload import process_upload # same as file2text -from radiobee.files2df import files2df -from radiobee.file2text import file2text -from radiobee.lists2cmat import lists2cmat -from radiobee.cmat2tset import cmat2tset - -file1loc = "data/test_zh.txt" -file2loc = "data/test_en.txt" - -file1 = tempfile._TemporaryFileWrapper(open(file1loc, "rb"), file1loc) -file2 = tempfile._TemporaryFileWrapper(open(file2loc, "rb"), file2loc) - - -def test_file2file1(): - """Test cmat file2 file1.""" - # logger.info("file1: *%s*, file2: *%s*", file1, file2) - logger.info("file1.name: *%s*, file2.name: *%s*", file1.name, file2.name) - - text1 = file2text(file1) - text2 = file2text(file2) - - lang1, _ = fastlid(text1) - lang2, _ = fastlid(text2) - - lst1 = [elm.strip() for elm in text1.splitlines() if elm.strip()] - lst2 = [elm.strip() for elm in text2.splitlines() if elm.strip()] - - del lst1, lst2 diff --git a/spaces/mixshare/hackathon_chatbot_openai_api/app.py b/spaces/mixshare/hackathon_chatbot_openai_api/app.py deleted file mode 100644 index 136933235b399a96480c2dc87cabb3e01ba89e3f..0000000000000000000000000000000000000000 --- a/spaces/mixshare/hackathon_chatbot_openai_api/app.py +++ /dev/null @@ -1,63 +0,0 @@ -import gradio as gr -import os -import openai - -# 请记得要把 api 的 key 放到 settings 下面的 Repository Secrets 里。 -openai.api_key = os.getenv("openai_key") - - -# 如果你只打算通过 prompt 来定制机器人的行为,只需要修改这段 prompt 就够了。 -prompt = '请你调用你能获取的所有的博物百科资源库,用小助手的口吻来回答问题,开头第一句是你好啊,好奇宝宝😊由于是面向青少年儿童,请务必做到内容有理有据语言风格有趣生动,如果答案涉及具体事物,请在相应的事物名字后面加上对应的表情emoji,比如大象🐘奶牛🐄风铃🎐花朵🏵️啤酒🍺等等。如果提问的问题不相关,你要友善提醒你回答范畴是博物百科知识。在回答完毕后,请务必提供相关的来源出处。结尾是固定格式询问有没有解答你的疑惑呢?欢迎继续提问😊在每次回答的最后,请用一条长的横线与前面的回答隔开,然后附上一句不超过20个字的科学名言警句,以激发青少年儿童的求知欲。' - - - -history = {} - -# 修改本函数,来实现你自己的 chatbot -# p: 对机器人说话的内容 -# qid: 当前消息的唯一标识。例如 `'bxqid-cManAtRMszw...'`。由平台生成并传递给机器人,以便机器人区分单个问题(写日志、追踪调试、异步回调等)。同步调用可忽略。 -# uid: 用户的唯一标识。例如`'bxuid-Aj8Spso8Xsp...'`。由平台生成并传递给机器人,以便机器人区分用户。可被用于实现多轮对话的功能。 -# 返回值:[type, content] -# 详见 https://huggingface.co/spaces/baixing/hackathon_test/blob/main/bot-api.md -def chat(p, qid, uid): - # 找出该 uid 对应的历史对话 - global history - if uid in history: - msgs = history[uid] - else: - msgs = [] - - response = callapi(p, msgs) - history[uid] = msgs + [[p, response]] - return ["text", response] - - -def callapi(p, msgs): - if (len(msgs) > 8): #简单 hard-code 8 回合对话。如果需要更精准的,应该计算 token 数 - msgs = msgs[-8:] - - data = [{"role":"system", "content":prompt}] - for m in msgs: - data = data + [ - {"role":"user", "content":m[0]}, - {"role":"assistant", "content":m[1]} - ] - data = data + [{"role":"user", "content":p}] - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages= data - ) - print(response) - response = response["choices"][0]["message"]["content"] - while response.startswith("\n"): - response = response[1:] - return response - -iface = gr.Interface(fn=chat, - inputs=["text", "text", "text"], - outputs=["text", "text"], - description="""这是一个面向青少年儿童的博物百科全书问答助手,希望能够让你天马行空的想象力和好奇心得到满足! - - - """) -iface.launch() \ No newline at end of file diff --git a/spaces/mkrzyzan/face-swap/README.md b/spaces/mkrzyzan/face-swap/README.md deleted file mode 100644 index eb2544088ba2fb9ce64f08ed21188b50f6f0b7fa..0000000000000000000000000000000000000000 --- a/spaces/mkrzyzan/face-swap/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Face Swap -emoji: 👩🏻‍🦰 -colorFrom: green -colorTo: indigo -sdk: gradio -sdk_version: 3.41.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/ml6team/controlnet-interior-design/segmentation.py b/spaces/ml6team/controlnet-interior-design/segmentation.py deleted file mode 100644 index 8c30781ed4c4c4e2ff86e65a46b4d8184031ad7b..0000000000000000000000000000000000000000 --- a/spaces/ml6team/controlnet-interior-design/segmentation.py +++ /dev/null @@ -1,55 +0,0 @@ -import logging -from typing import List, Tuple, Dict - -import streamlit as st -import torch -import gc -import numpy as np -from PIL import Image - -from transformers import AutoImageProcessor, UperNetForSemanticSegmentation - -from palette import ade_palette - -LOGGING = logging.getLogger(__name__) - - -def flush(): - gc.collect() - torch.cuda.empty_cache() - -@st.cache_resource(max_entries=5) -def get_segmentation_pipeline() -> Tuple[AutoImageProcessor, UperNetForSemanticSegmentation]: - """Method to load the segmentation pipeline - Returns: - Tuple[AutoImageProcessor, UperNetForSemanticSegmentation]: segmentation pipeline - """ - image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small") - image_segmentor = UperNetForSemanticSegmentation.from_pretrained( - "openmmlab/upernet-convnext-small") - return image_processor, image_segmentor - - -@torch.inference_mode() -@torch.autocast('cuda') -def segment_image(image: Image) -> Image: - """Method to segment image - Args: - image (Image): input image - Returns: - Image: segmented image - """ - image_processor, image_segmentor = get_segmentation_pipeline() - pixel_values = image_processor(image, return_tensors="pt").pixel_values - with torch.no_grad(): - outputs = image_segmentor(pixel_values) - - seg = image_processor.post_process_semantic_segmentation( - outputs, target_sizes=[image.size[::-1]])[0] - color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) - palette = np.array(ade_palette()) - for label, color in enumerate(palette): - color_seg[seg == label, :] = color - color_seg = color_seg.astype(np.uint8) - seg_image = Image.fromarray(color_seg).convert('RGB') - return seg_image \ No newline at end of file diff --git a/spaces/mosaicml/mpt-30b-chat/README.md b/spaces/mosaicml/mpt-30b-chat/README.md deleted file mode 100644 index 827a68e1e2bcff65a815f13c99b40c70b714fa84..0000000000000000000000000000000000000000 --- a/spaces/mosaicml/mpt-30b-chat/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: MPT-30B-Chat -emoji: 😻 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/speech_to_text/s2t_transformer.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/models/speech_to_text/s2t_transformer.py deleted file mode 100644 index aff9d0ffc7b7e671c476ff28d1cd945e9ff41519..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/speech_to_text/s2t_transformer.py +++ /dev/null @@ -1,502 +0,0 @@ -#!/usr/bin/env python3 - -import logging -import math -from typing import Dict, List, Optional, Tuple -from pathlib import Path - -import torch -import torch.nn as nn -from fairseq import checkpoint_utils, utils -from fairseq.data.data_utils import lengths_to_padding_mask -from fairseq.models import ( - FairseqEncoder, - FairseqEncoderDecoderModel, - register_model, - register_model_architecture, -) -from fairseq.models.transformer import Embedding, TransformerDecoder -from fairseq.modules import ( - FairseqDropout, - LayerNorm, - PositionalEmbedding, - TransformerEncoderLayer, -) -from torch import Tensor - - -logger = logging.getLogger(__name__) - - -class Conv1dSubsampler(nn.Module): - """Convolutional subsampler: a stack of 1D convolution (along temporal - dimension) followed by non-linear activation via gated linear units - (https://arxiv.org/abs/1911.08460) - - Args: - in_channels (int): the number of input channels - mid_channels (int): the number of intermediate channels - out_channels (int): the number of output channels - kernel_sizes (List[int]): the kernel size for each convolutional layer - """ - - def __init__( - self, - in_channels: int, - mid_channels: int, - out_channels: int, - kernel_sizes: List[int] = (3, 3), - ): - super(Conv1dSubsampler, self).__init__() - self.n_layers = len(kernel_sizes) - self.conv_layers = nn.ModuleList( - nn.Conv1d( - in_channels if i == 0 else mid_channels // 2, - mid_channels if i < self.n_layers - 1 else out_channels * 2, - k, - stride=2, - padding=k // 2, - ) - for i, k in enumerate(kernel_sizes) - ) - - def get_out_seq_lens_tensor(self, in_seq_lens_tensor): - out = in_seq_lens_tensor.clone() - for _ in range(self.n_layers): - out = ((out.float() - 1) / 2 + 1).floor().long() - return out - - def forward(self, src_tokens, src_lengths): - bsz, in_seq_len, _ = src_tokens.size() # B x T x (C x D) - x = src_tokens.transpose(1, 2).contiguous() # -> B x (C x D) x T - for conv in self.conv_layers: - x = conv(x) - x = nn.functional.glu(x, dim=1) - _, _, out_seq_len = x.size() - x = x.transpose(1, 2).transpose(0, 1).contiguous() # -> T x B x (C x D) - return x, self.get_out_seq_lens_tensor(src_lengths) - - -@register_model("s2t_transformer") -class S2TTransformerModel(FairseqEncoderDecoderModel): - """Adapted Transformer model (https://arxiv.org/abs/1706.03762) for - speech-to-text tasks. The Transformer encoder/decoder remains the same. - A trainable input subsampler is prepended to the Transformer encoder to - project inputs into the encoder dimension as well as downsample input - sequence for computational efficiency.""" - - def __init__(self, encoder, decoder): - super().__init__(encoder, decoder) - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - # input - parser.add_argument( - "--conv-kernel-sizes", - type=str, - metavar="N", - help="kernel sizes of Conv1d subsampling layers", - ) - parser.add_argument( - "--conv-channels", - type=int, - metavar="N", - help="# of channels in Conv1d subsampling layers", - ) - # Transformer - parser.add_argument( - "--activation-fn", - type=str, - default="relu", - choices=utils.get_available_activation_fns(), - help="activation function to use", - ) - parser.add_argument( - "--dropout", type=float, metavar="D", help="dropout probability" - ) - parser.add_argument( - "--attention-dropout", - type=float, - metavar="D", - help="dropout probability for attention weights", - ) - parser.add_argument( - "--activation-dropout", - "--relu-dropout", - type=float, - metavar="D", - help="dropout probability after activation in FFN.", - ) - parser.add_argument( - "--encoder-embed-dim", - type=int, - metavar="N", - help="encoder embedding dimension", - ) - parser.add_argument( - "--encoder-ffn-embed-dim", - type=int, - metavar="N", - help="encoder embedding dimension for FFN", - ) - parser.add_argument( - "--encoder-layers", type=int, metavar="N", help="num encoder layers" - ) - parser.add_argument( - "--encoder-attention-heads", - type=int, - metavar="N", - help="num encoder attention heads", - ) - parser.add_argument( - "--encoder-normalize-before", - action="store_true", - help="apply layernorm before each encoder block", - ) - parser.add_argument( - "--decoder-embed-dim", - type=int, - metavar="N", - help="decoder embedding dimension", - ) - parser.add_argument( - "--decoder-ffn-embed-dim", - type=int, - metavar="N", - help="decoder embedding dimension for FFN", - ) - parser.add_argument( - "--decoder-layers", type=int, metavar="N", help="num decoder layers" - ) - parser.add_argument( - "--decoder-attention-heads", - type=int, - metavar="N", - help="num decoder attention heads", - ) - parser.add_argument( - "--decoder-normalize-before", - action="store_true", - help="apply layernorm before each decoder block", - ) - parser.add_argument( - "--share-decoder-input-output-embed", - action="store_true", - help="share decoder input and output embeddings", - ) - parser.add_argument( - "--layernorm-embedding", - action="store_true", - help="add layernorm to embedding", - ) - parser.add_argument( - "--no-scale-embedding", - action="store_true", - help="if True, dont scale embeddings", - ) - parser.add_argument( - "--load-pretrained-encoder-from", - type=str, - metavar="STR", - help="model to take encoder weights from (for initialization)", - ) - parser.add_argument( - '--encoder-freezing-updates', - type=int, - metavar='N', - help='freeze encoder for first N updates' - ) - - @classmethod - def build_encoder(cls, args): - encoder = S2TTransformerEncoder(args) - pretraining_path = getattr(args, "load_pretrained_encoder_from", None) - if pretraining_path is not None: - if not Path(pretraining_path).exists(): - logger.warning( - f"skipped pretraining because {pretraining_path} does not exist" - ) - else: - encoder = checkpoint_utils.load_pretrained_component_from_model( - component=encoder, checkpoint=pretraining_path - ) - logger.info(f"loaded pretrained encoder from: {pretraining_path}") - return encoder - - @classmethod - def build_decoder(cls, args, task, embed_tokens): - return TransformerDecoderScriptable(args, task.target_dictionary, embed_tokens) - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - - # make sure all arguments are present in older models - base_architecture(args) - - def build_embedding(dictionary, embed_dim): - num_embeddings = len(dictionary) - padding_idx = dictionary.pad() - return Embedding(num_embeddings, embed_dim, padding_idx) - - decoder_embed_tokens = build_embedding( - task.target_dictionary, args.decoder_embed_dim - ) - encoder = cls.build_encoder(args) - decoder = cls.build_decoder(args, task, decoder_embed_tokens) - return cls(encoder, decoder) - - def get_normalized_probs( - self, - net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], - log_probs: bool, - sample: Optional[Dict[str, Tensor]] = None, - ): - # net_output['encoder_out'] is a (B, T, D) tensor - lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample) - lprobs.batch_first = True - return lprobs - - def forward(self, src_tokens, src_lengths, prev_output_tokens): - """ - The forward method inherited from the base class has a **kwargs - argument in its input, which is not supported in torchscript. This - method overwrites the forward method definition without **kwargs. - """ - encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths) - decoder_out = self.decoder( - prev_output_tokens=prev_output_tokens, encoder_out=encoder_out - ) - return decoder_out - - -class S2TTransformerEncoder(FairseqEncoder): - """Speech-to-text Transformer encoder that consists of input subsampler and - Transformer encoder.""" - - def __init__(self, args): - super().__init__(None) - - self.encoder_freezing_updates = args.encoder_freezing_updates - self.num_updates = 0 - - self.dropout_module = FairseqDropout( - p=args.dropout, module_name=self.__class__.__name__ - ) - self.embed_scale = math.sqrt(args.encoder_embed_dim) - if args.no_scale_embedding: - self.embed_scale = 1.0 - self.padding_idx = 1 - - self.subsample = Conv1dSubsampler( - args.input_feat_per_channel * args.input_channels, - args.conv_channels, - args.encoder_embed_dim, - [int(k) for k in args.conv_kernel_sizes.split(",")], - ) - - self.embed_positions = PositionalEmbedding( - args.max_source_positions, args.encoder_embed_dim, self.padding_idx - ) - - self.transformer_layers = nn.ModuleList( - [TransformerEncoderLayer(args) for _ in range(args.encoder_layers)] - ) - if args.encoder_normalize_before: - self.layer_norm = LayerNorm(args.encoder_embed_dim) - else: - self.layer_norm = None - - def _forward(self, src_tokens, src_lengths, return_all_hiddens=False): - x, input_lengths = self.subsample(src_tokens, src_lengths) - x = self.embed_scale * x - - encoder_padding_mask = lengths_to_padding_mask(input_lengths) - positions = self.embed_positions(encoder_padding_mask).transpose(0, 1) - x += positions - x = self.dropout_module(x) - - encoder_states = [] - - for layer in self.transformer_layers: - x = layer(x, encoder_padding_mask) - if return_all_hiddens: - encoder_states.append(x) - - if self.layer_norm is not None: - x = self.layer_norm(x) - - return { - "encoder_out": [x], # T x B x C - "encoder_padding_mask": [encoder_padding_mask] if encoder_padding_mask.any() else [], # B x T - "encoder_embedding": [], # B x T x C - "encoder_states": encoder_states, # List[T x B x C] - "src_tokens": [], - "src_lengths": [], - } - - def forward(self, src_tokens, src_lengths, return_all_hiddens=False): - if self.num_updates < self.encoder_freezing_updates: - with torch.no_grad(): - x = self._forward(src_tokens, src_lengths, - return_all_hiddens=return_all_hiddens) - else: - x = self._forward(src_tokens, src_lengths, - return_all_hiddens=return_all_hiddens) - return x - - def reorder_encoder_out(self, encoder_out, new_order): - new_encoder_out = ( - [] if len(encoder_out["encoder_out"]) == 0 - else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]] - ) - - new_encoder_padding_mask = ( - [] if len(encoder_out["encoder_padding_mask"]) == 0 - else [x.index_select(0, new_order) for x in encoder_out["encoder_padding_mask"]] - ) - - new_encoder_embedding = ( - [] if len(encoder_out["encoder_embedding"]) == 0 - else [x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]] - ) - - encoder_states = encoder_out["encoder_states"] - if len(encoder_states) > 0: - for idx, state in enumerate(encoder_states): - encoder_states[idx] = state.index_select(1, new_order) - - return { - "encoder_out": new_encoder_out, # T x B x C - "encoder_padding_mask": new_encoder_padding_mask, # B x T - "encoder_embedding": new_encoder_embedding, # B x T x C - "encoder_states": encoder_states, # List[T x B x C] - "src_tokens": [], # B x T - "src_lengths": [], # B x 1 - } - - def set_num_updates(self, num_updates): - super().set_num_updates(num_updates) - self.num_updates = num_updates - - -class TransformerDecoderScriptable(TransformerDecoder): - def extract_features( - self, - prev_output_tokens, - encoder_out: Optional[Dict[str, List[Tensor]]] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - full_context_alignment: bool = False, - alignment_layer: Optional[int] = None, - alignment_heads: Optional[int] = None, - ): - # call scriptable method from parent class - x, _ = self.extract_features_scriptable( - prev_output_tokens, - encoder_out, - incremental_state, - full_context_alignment, - alignment_layer, - alignment_heads, - ) - return x, None - - -@register_model_architecture(model_name="s2t_transformer", arch_name="s2t_transformer") -def base_architecture(args): - args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0) - # Convolutional subsampler - args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5") - args.conv_channels = getattr(args, "conv_channels", 1024) - # Transformer - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) - args.encoder_layers = getattr(args, "encoder_layers", 12) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) - args.decoder_ffn_embed_dim = getattr( - args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim - ) - args.decoder_layers = getattr(args, "decoder_layers", 6) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) - args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) - args.dropout = getattr(args, "dropout", 0.1) - args.attention_dropout = getattr(args, "attention_dropout", args.dropout) - args.activation_dropout = getattr(args, "activation_dropout", args.dropout) - args.activation_fn = getattr(args, "activation_fn", "relu") - args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) - args.share_decoder_input_output_embed = getattr( - args, "share_decoder_input_output_embed", False - ) - args.no_token_positional_embeddings = getattr( - args, "no_token_positional_embeddings", False - ) - args.adaptive_input = getattr(args, "adaptive_input", False) - args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) - args.decoder_output_dim = getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) - args.no_scale_embedding = getattr(args, "no_scale_embedding", False) - args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) - - -@register_model_architecture("s2t_transformer", "s2t_transformer_s") -def s2t_transformer_s(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) - args.dropout = getattr(args, "dropout", 0.1) - base_architecture(args) - - -@register_model_architecture("s2t_transformer", "s2t_transformer_xs") -def s2t_transformer_xs(args): - args.encoder_layers = getattr(args, "encoder_layers", 6) - args.decoder_layers = getattr(args, "decoder_layers", 3) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 4) - args.dropout = getattr(args, "dropout", 0.3) - s2t_transformer_s(args) - - -@register_model_architecture("s2t_transformer", "s2t_transformer_sp") -def s2t_transformer_sp(args): - args.encoder_layers = getattr(args, "encoder_layers", 16) - s2t_transformer_s(args) - - -@register_model_architecture("s2t_transformer", "s2t_transformer_m") -def s2t_transformer_m(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512 * 4) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - args.dropout = getattr(args, "dropout", 0.15) - base_architecture(args) - - -@register_model_architecture("s2t_transformer", "s2t_transformer_mp") -def s2t_transformer_mp(args): - args.encoder_layers = getattr(args, "encoder_layers", 16) - s2t_transformer_m(args) - - -@register_model_architecture("s2t_transformer", "s2t_transformer_l") -def s2t_transformer_l(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) - args.dropout = getattr(args, "dropout", 0.2) - base_architecture(args) - - -@register_model_architecture("s2t_transformer", "s2t_transformer_lp") -def s2t_transformer_lp(args): - args.encoder_layers = getattr(args, "encoder_layers", 16) - s2t_transformer_l(args) diff --git a/spaces/mueller-franzes/medfusion-app/streamlit/pages/chest.py b/spaces/mueller-franzes/medfusion-app/streamlit/pages/chest.py deleted file mode 100644 index 71754bc96efe256e64ec43c0da0c529c79e82cdf..0000000000000000000000000000000000000000 --- a/spaces/mueller-franzes/medfusion-app/streamlit/pages/chest.py +++ /dev/null @@ -1,41 +0,0 @@ -import streamlit as st -import torch -import numpy as np - -from medical_diffusion.models.pipelines import DiffusionPipeline - -st.title("Chest X-ray images", anchor=None) -st.sidebar.markdown("Medfusion for chest X-ray image generation") -st.header('Information') -st.markdown('Medfusion was trained on the [CheXpert](https://stanfordmlgroup.github.io/competitions/chexpert/) dataset') - -st.header('Settings') -n_samples = st.number_input("Samples", min_value=1, max_value=25, value=4) -steps = st.number_input("Sampling steps", min_value=1, max_value=999, value=50) -guidance_scale = st.number_input("Guidance scale", min_value=1, max_value=10, value=1) -seed = st.number_input("Seed", min_value=0, max_value=None, value=1) -cond_str = st.radio("Cardiomegaly", ('Yes', 'No'), index=1, help="Conditioned on 'cardiomegaly' or 'no cardiomegaly'", horizontal=True) -torch.manual_seed(seed) -device_str = 'cuda' if torch.cuda.is_available() else 'cpu' -device = torch.device(device_str) - -@st.cache(allow_output_mutation = True) -def init_pipeline(): - pipeline = DiffusionPipeline.load_from_checkpoint('runs/chest_diffusion/last.ckpt') - return pipeline - -if st.button(f'Sample (using {device_str})'): - cond = {'Yes':1, 'No':0}[cond_str] - condition = torch.tensor([cond]*n_samples, device=device) - un_cond = torch.tensor([1-cond]*n_samples, device=device) - - pipeline = init_pipeline() - pipeline.to(device) - images = pipeline.sample(n_samples, (8, 32, 32), guidance_scale=guidance_scale, condition=condition, un_cond=un_cond, steps=steps, use_ddim=True ) - - images = images.clamp(-1, 1) - images = images.cpu().numpy() # [B, C, H, W] - images = (images+1)/2 # Transform from [-1, 1] to [0, 1] - - images = [np.moveaxis(img, 0, -1) for img in images] - st.image(images, channels="RGB", output_format='png') # expects (w,h,3) \ No newline at end of file diff --git a/spaces/nagolinc/npcGenerator/words.py b/spaces/nagolinc/npcGenerator/words.py deleted file mode 100644 index db3dace05b2a76a3a4ea6472af64d588a331e578..0000000000000000000000000000000000000000 --- a/spaces/nagolinc/npcGenerator/words.py +++ /dev/null @@ -1,93 +0,0 @@ - -vowels = list("aeiou") -constants = list("bcdfghjklmnpqrstvwxyz") -seperators=list("-'") - -races="""Dwarf -Elf -Halfling -Human -Dragonborn -Gnome -Half-elf -Half-orc -Tiefling -Aarakocra -Genasi -Goliath""".split("\n") - -races=[x.strip() for x in races] - - - - -classes="""Barbarian -Bard -Cleric -Druid -Fighter -Monk -Paladin -Ranger -Rogue -Sorcerer -Warlock -Wizard""".split("\n") -classes=[x.strip() for x in classes] - -colors="""red -blue -green -yellow -orange -purple -pink -brown -black -white""".split("\n") -colors=[x.strip() for x in colors] - -outfits="""shirt -pair of pants -pair of shoes -hat -pair of glasses -backpack -belt -tie -cloak -robe -chain mail vest -suit of plate armor -suit of leather armor -suit of studded leather armor -suit of scale armor -suit of chain mail armor -suit of ring mail armor -""".split("\n") -outfits=[x.strip() for x in outfits] - -weapons="""sword -dagger -mace -axe -polearm -bow -crossbow -sling -club -flail -warhammer -morningstar -halberd -war pick -war sickle -war hammer""".split("\n") -weapons=[x.strip() for x in weapons] - -objects="""shield -lantern -sack -severed head -crystal""".split("\n") -objects=[x.strip() for x in objects] \ No newline at end of file diff --git a/spaces/nateraw/deepafx-st/deepafx_st/processors/proxy/tcn.py b/spaces/nateraw/deepafx-st/deepafx_st/processors/proxy/tcn.py deleted file mode 100644 index a7e00047b978c2a9ea8efc377ebb274fceabe4c8..0000000000000000000000000000000000000000 --- a/spaces/nateraw/deepafx-st/deepafx_st/processors/proxy/tcn.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright 2022 Christian J. Steinmetz - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# TCN implementation adapted from: -# https://github.com/csteinmetz1/micro-tcn/blob/main/microtcn/tcn.py - -import torch -from argparse import ArgumentParser - -from deepafx_st.utils import center_crop, causal_crop - - -class FiLM(torch.nn.Module): - def __init__(self, num_features, cond_dim): - super().__init__() - self.num_features = num_features - self.bn = torch.nn.BatchNorm1d(num_features, affine=False) - self.adaptor = torch.nn.Linear(cond_dim, num_features * 2) - - def forward(self, x, cond): - - # project conditioning to 2 x num. conv channels - cond = self.adaptor(cond) - - # split the projection into gain and bias - g, b = torch.chunk(cond, 2, dim=-1) - - # add virtual channel dim if needed - if g.ndim == 2: - g = g.unsqueeze(1) - b = b.unsqueeze(1) - - # reshape for application - g = g.permute(0, 2, 1) - b = b.permute(0, 2, 1) - - x = self.bn(x) # apply BatchNorm without affine - x = (x * g) + b # then apply conditional affine - - return x - - -class ConditionalTCNBlock(torch.nn.Module): - def __init__( - self, in_ch, out_ch, cond_dim, kernel_size=3, dilation=1, causal=False, **kwargs - ): - super().__init__() - - self.in_ch = in_ch - self.out_ch = out_ch - self.kernel_size = kernel_size - self.dilation = dilation - self.causal = causal - - self.conv1 = torch.nn.Conv1d( - in_ch, - out_ch, - kernel_size=kernel_size, - padding=0, - dilation=dilation, - bias=True, - ) - self.film = FiLM(out_ch, cond_dim) - self.relu = torch.nn.PReLU(out_ch) - self.res = torch.nn.Conv1d( - in_ch, out_ch, kernel_size=1, groups=in_ch, bias=False - ) - - def forward(self, x, p): - x_in = x - - x = self.conv1(x) - x = self.film(x, p) # apply FiLM conditioning - x = self.relu(x) - x_res = self.res(x_in) - - if self.causal: - x = x + causal_crop(x_res, x.shape[-1]) - else: - x = x + center_crop(x_res, x.shape[-1]) - - return x - - -class ConditionalTCN(torch.nn.Module): - """Temporal convolutional network with conditioning module. - Args: - sample_rate (float): Audio sample rate. - num_control_params (int, optional): Dimensionality of the conditioning signal. Default: 24 - ninputs (int, optional): Number of input channels (mono = 1, stereo 2). Default: 1 - noutputs (int, optional): Number of output channels (mono = 1, stereo 2). Default: 1 - nblocks (int, optional): Number of total TCN blocks. Default: 10 - kernel_size (int, optional: Width of the convolutional kernels. Default: 3 - dialation_growth (int, optional): Compute the dilation factor at each block as dilation_growth ** (n % stack_size). Default: 1 - channel_growth (int, optional): Compute the output channels at each black as in_ch * channel_growth. Default: 2 - channel_width (int, optional): When channel_growth = 1 all blocks use convolutions with this many channels. Default: 64 - stack_size (int, optional): Number of blocks that constitute a single stack of blocks. Default: 10 - causal (bool, optional): Causal TCN configuration does not consider future input values. Default: False - """ - - def __init__( - self, - sample_rate, - num_control_params=24, - ninputs=1, - noutputs=1, - nblocks=10, - kernel_size=15, - dilation_growth=2, - channel_growth=1, - channel_width=64, - stack_size=10, - causal=False, - skip_connections=False, - **kwargs, - ): - super().__init__() - self.num_control_params = num_control_params - self.ninputs = ninputs - self.noutputs = noutputs - self.nblocks = nblocks - self.kernel_size = kernel_size - self.dilation_growth = dilation_growth - self.channel_growth = channel_growth - self.channel_width = channel_width - self.stack_size = stack_size - self.causal = causal - self.skip_connections = skip_connections - self.sample_rate = sample_rate - - self.blocks = torch.nn.ModuleList() - for n in range(nblocks): - in_ch = out_ch if n > 0 else ninputs - - if self.channel_growth > 1: - out_ch = in_ch * self.channel_growth - else: - out_ch = self.channel_width - - dilation = self.dilation_growth ** (n % self.stack_size) - - self.blocks.append( - ConditionalTCNBlock( - in_ch, - out_ch, - self.num_control_params, - kernel_size=self.kernel_size, - dilation=dilation, - padding="same" if self.causal else "valid", - causal=self.causal, - ) - ) - - self.output = torch.nn.Conv1d(out_ch, noutputs, kernel_size=1) - self.receptive_field = self.compute_receptive_field() - # print( - # f"TCN receptive field: {self.receptive_field} samples", - # f" or {(self.receptive_field/self.sample_rate)*1e3:0.3f} ms", - # ) - - def forward(self, x, p, **kwargs): - - # causally pad input signal - x = torch.nn.functional.pad(x, (self.receptive_field - 1, 0)) - - # iterate over blocks passing conditioning - for idx, block in enumerate(self.blocks): - x = block(x, p) - if self.skip_connections: - if idx == 0: - skips = x - else: - skips = center_crop(skips, x[-1]) + x - else: - skips = 0 - - # final 1x1 convolution to collapse channels - out = self.output(x + skips) - - return out - - def compute_receptive_field(self): - """Compute the receptive field in samples.""" - rf = self.kernel_size - for n in range(1, self.nblocks): - dilation = self.dilation_growth ** (n % self.stack_size) - rf = rf + ((self.kernel_size - 1) * dilation) - return rf diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Arcsoft Photostudio 6 Serial Number.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Arcsoft Photostudio 6 Serial Number.md deleted file mode 100644 index d129239016ac43b321e96e1b37c92bd24d78bdf8..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Arcsoft Photostudio 6 Serial Number.md +++ /dev/null @@ -1,40 +0,0 @@ - -Here is a possible title and article with html formatting for the keyword "arcsoft photostudio 6 serial number": - -

        How to Register ArcSoft PhotoStudio 6 with a Serial Number

        -

        ArcSoft PhotoStudio 6 is a powerful photo editing software that offers a range of advanced tools, filters and special effects. Whether you are a beginner or an advanced user, you can use PhotoStudio 6 to manage, enhance, print and get creative with your digital photos. To enjoy the full features of PhotoStudio 6, you need to register the software with a serial number.

        -

        A serial number is a unique code that identifies your copy of PhotoStudio 6. You can obtain a serial number by purchasing the software from ArcSoft's official website or from authorized resellers. If you have bought the software as a bundle with a hardware device, you can find the serial number on the CD sleeve or on the device box.

        -

        arcsoft photostudio 6 serial number


        DOWNLOADhttps://urlcod.com/2uIbxt



        -

        To register PhotoStudio 6 with a serial number, follow these steps:

        -
          -
        1. Launch PhotoStudio 6 and click on the "Help" menu.
        2. -
        3. Select "Software Registration" from the drop-down list.
        4. -
        5. Fill out the form with your serial number, product title, place of purchase and email address. If your software came bundled with a hardware device, also enter the device brand and model number.
        6. -
        7. Click on "Submit" to complete the registration.
        8. -
        -

        Once you have registered PhotoStudio 6, you will be able to access all the features and functions of the software. You will also receive valuable discounts and promotion information from ArcSoft. If you have any questions or problems with the registration process, you can contact ArcSoft's customer support via email or phone.

        -

        PhotoStudio 6 is a great software for photo editing and creativity. With a serial number, you can unlock its full potential and enjoy your digital photos in new ways.

        Here is a possible continuation of the article: - -

        PhotoStudio 6 has a user-friendly interface that allows you to easily access and apply various tools, filters and effects to your photos. You can use the toolbar on the left to select different modes, such as crop, rotate, resize, clone, red-eye removal and more. You can also use the menu bar on the top to access more options, such as file, edit, view, layer, enhance and create.

        -

        One of the features of PhotoStudio 6 is the Face Beautify function, which lets you improve the appearance of faces in your photos. You can use this function to smooth skin, remove blemishes, whiten teeth, brighten eyes and apply makeup. To use this function, follow these steps:

        -
          -
        1. Click on the "Enhance" menu and select "Face Beautify".
        2. -
        3. Select a face in your photo or click on "Auto Detect" to let the software find the faces automatically.
        4. -
        5. Adjust the sliders to apply different effects to the face, such as skin smoothness, blemish removal, eye brightness and more.
        6. -
        7. Click on "OK" to apply the changes.
        8. -
        -

        Another feature of PhotoStudio 6 is the Magic-Cut function, which lets you cut out objects from your photos and paste them onto another background. You can use this function to create fun and creative compositions with your photos. To use this function, follow these steps:

        -
          -
        1. Click on the "Create" menu and select "Magic-Cut".
        2. -
        3. Select a photo that contains an object that you want to cut out.
        4. -
        5. Use the brush tool to mark the object with green and the background with red.
        6. -
        7. Click on "Preview" to see the result. You can refine the edges with the eraser tool if needed.
        8. -
        9. Click on "OK" to cut out the object.
        10. -
        11. Select another photo that you want to use as the new background.
        12. -
        13. Paste the cut-out object onto the new background and adjust its size and position.
        14. -
        15. Click on "Save" to save your new photo.
        16. -
        -

        PhotoStudio 6 has many more features and functions that you can explore and experiment with. You can also use PhotoStudio 6 to print your photos, share them online or create slideshows and collages. PhotoStudio 6 is a versatile and powerful photo editing software that can help you make the most of your digital photos.

        -

        7196e7f11a
        -
        -
        \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Cabri Geometry II Plus V1 4 2 Crack ((TOP))ed NoPE.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Cabri Geometry II Plus V1 4 2 Crack ((TOP))ed NoPE.md deleted file mode 100644 index 4d95d5fe85afab8ffe0cef5a366c9995d44d9d1e..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Cabri Geometry II Plus V1 4 2 Crack ((TOP))ed NoPE.md +++ /dev/null @@ -1,128 +0,0 @@ - -

        Cabri Geometry II Plus v1 4 2 Cracked NoPE: A Review

        -

        If you are looking for a dynamic math and geometry software that can help you create and manipulate geometric figures, you might have heard of Cabri Geometry II Plus. This software is designed for students and teachers who want to explore math concepts in a simple and interactive way. But what if you don't want to pay for the official version of the software? You might be tempted to download a cracked version of the software, such as Cabri Geometry II Plus v1 4 2 Cracked NoPE. But is it worth it? In this article, we will review Cabri Geometry II Plus v1 4 2 Cracked NoPE, and compare it with the official version and other alternatives. We will also discuss the pros and cons of using cracked software, and answer some frequently asked questions.

        -

        What is Cabri Geometry II Plus?

        -

        Cabri Geometry II Plus is a software developed by Cabrilog, a French company that specializes in creating educational software for mathematics. Cabri Geometry II Plus is an interactive notebook that allows users to create geometric and numerical constructions, such as transformations, measurements, calculus, tables, graphs, expressions, and equations. Users can also explore and make conjectures, reason and prove, solve problems, and self-evaluate using the software. Cabri Geometry II Plus is suitable for all secondary school level mathematics, and can also be used for other disciplines such as physics, technology, and applied arts.

        -

        Cabri Geometry II Plus v1 4 2 Cracked NoPE


        Download ✸✸✸ https://urlcod.com/2uIb12



        -

        Features and benefits of Cabri Geometry II Plus

        -

        Some of the features and benefits of Cabri Geometry II Plus are:

        -
          -
        • It has a user-friendly interface that lets users create figures in a few clicks.
        • -
        • It offers a direct and clear visualization of all the movements or variations made on the figure.
        • -
        • It supports multiple languages, such as English, French, German, Spanish, Italian, Japanese, Chinese, Arabic, etc.
        • -
        • It includes a complete guide for new users, 60 concrete activities that can be used in one click, and over an hour of self-training videos.
        • -
        • It allows users to export their figures in various formats, such as PNG, JPEG, SVG, PDF, etc.
        • -
        • It can be integrated with other software or platforms, such as Microsoft Office, Moodle, GeoGebra, etc.
        • -
        -

        System requirements and compatibility of Cabri Geometry II Plus

        -

        To use Cabri Geometry II Plus, users must have a computer with the following minimum requirements:

        - - - -
        PCMac
        Cabri Geometry II Plus requires Win XP/Vista/7 or higher
        Recommended: 800Mhz or higher processor, 256MB RAM,
        OpenGL compatible graphics card with 64MB RAM or more
        Mac Cabri Geometry II Plus requires Mac OS X 10.4 or higher
        Recommended: 800Mhz or higher processor, 256MB RAM,
        OpenGL compatible graphics card with 64MB RAM or more
        -

        Cabri Geometry II Plus is compatible with the following browsers:

        -
          -
        • Internet Explorer 6 or higher
        • -
        • Firefox 2 or higher
        • -
        • Safari 3 or higher
        • -
        • Chrome 4 or higher
        • -
        • Opera 9 or higher
        • -
        -

        What is a cracked software?

        -

        A cracked software is a software that has been modified to bypass the security measures or license restrictions imposed by the original developer or publisher. Cracked software is usually distributed by hackers, crackers, or pirates who want to use the software for free or for malicious purposes. Cracked software can be found on various websites, forums, torrents, or peer-to-peer networks.

        -

        Risks and disadvantages of using cracked software

        -

        Using cracked software may seem like a good way to save money and access premium features, but it comes with many risks and disadvantages, such as:

        -
          -
        • It may contain viruses, malware, spyware, ransomware, or other harmful programs that can damage your computer, steal your personal information, or encrypt your files and demand a ransom.
        • -
        • It may not work properly, crash frequently, corrupt your data, or cause compatibility issues with other software or hardware.
        • -
        • It may not receive updates, patches, bug fixes, or technical support from the original developer or publisher.
        • -
        • It may violate the intellectual property rights of the original developer or publisher, and expose you to legal actions, fines, or penalties.
        • -
        • It may be unethical and unfair to the original developer or publisher who invested time, money, and effort to create the software.
        • -
        -

        Legal and ethical issues of using cracked software

        -

        Using cracked software is not only risky and disadvantageous, but also illegal and unethical. According to the U.S. Copyright Act of 1976, cracking a software is considered a form of infringement that can result in civil and criminal penalties. The penalties can range from $200 to $150,000 per work infringed for civil cases, and up to five years in prison and $250,000 in fines for criminal cases. Furthermore, using cracked software is also a violation of the End User License Agreement (EULA) that you agree to when you install the software. The EULA is a legal contract between you and the developer or publisher that specifies the terms and conditions of using the software. By using cracked software, you are breaking the contract and losing your rights as a legitimate user.

        -

        Besides the legal issues, using cracked software is also unethical and immoral. It is a form of stealing that deprives the original developer or publisher of their rightful income and recognition. It also discourages them from creating more quality software in the future. Moreover, using cracked software is disrespectful to the hard work and creativity of the people who made the software. It also shows a lack of integrity and honesty on your part as a user.

        -

        What is Cabri Geometry II Plus v1 4 2 Cracked NoPE?

        -

        Cabri Geometry II Plus v1 4 2 Cracked NoPE is a cracked version of Cabri Geometry II Plus that was released by a group called NoPE (No Protection Ever) in 2007. This version claims to have removed the license verification and activation process of the official version, allowing users to use the software without paying for it. It also claims to have fixed some bugs and improved some features of the official version.

        -

        -

        How to download and install Cabri Geometry II Plus v1 4 2 Cracked NoPE

        -

        To download and install Cabri Geometry II Plus v1 4 2 Cracked NoPE, you need to follow these steps:

        -
          -
        1. Find a reliable source that provides a link to download Cabri Geometry II Plus v1 4 2 Cracked NoPE. You can search on Google, but be careful of fake or malicious links that may harm your computer.
        2. -
        3. Download the file named cabri.geometry.II.plus.v1.4.2.cracked-NoPE.rar from the link. The file size should be about 8 MB.
        4. -
        5. Extract the file using a program like WinRAR or 7-Zip. You should see a folder named cabri.geometry.II.plus.v1.4.2.cracked-NoPE with two subfolders named Crack and Setup.
        6. -
        7. Open the Setup folder and run the file named setup.exe. Follow the instructions on the screen to install Cabri Geometry II Plus on your computer. You may need to enter a serial number, which you can find in the Crack folder.
        8. -
        9. Open the Crack folder and copy the file named cabri2plus.exe. Paste it in the installation directory of Cabri Geometry II Plus, which is usually C:\Program Files\Cabri\Cabri Geometry II Plus. Replace the original file with the cracked file.
        10. -
        11. Run Cabri Geometry II Plus from your desktop or start menu. You should be able to use the software without any restrictions or limitations.
        12. -
        -

        Note: These steps are for informational purposes only. We do not recommend or endorse downloading or installing Cabri Geometry II Plus v1 4 2 Cracked NoPE, as it may be illegal, unethical, risky, or disadvantageous.

        -

        How to use Cabri Geometry II Plus v1 4 2 Cracked NoPE

        -

        To use Cabri Geometry II Plus v1 4 2 Cracked NoPE, you can follow these steps:

        -
          -
        1. Launch Cabri Geometry II Plus from your desktop or start menu. You should see a blank document with a toolbar and a menu bar.
        2. -
        3. Select the tools you want to use from the toolbar, such as point, line, circle, polygon, angle, etc. You can also access more tools from the menu bar, such as transform, measure, calculate, etc.
        4. -
        5. Click and drag on the document to create and manipulate geometric objects. You can also edit their properties, such as color, style, size, etc.
        6. -
        7. Use the mouse wheel or the zoom buttons to zoom in or out of the document. You can also use the hand tool to move the document around.
        8. -
        9. Use the undo and redo buttons to undo or redo your actions. You can also use the keyboard shortcuts Ctrl+Z and Ctrl+Y.
        10. -
        11. Use the play and pause buttons to animate your constructions. You can also use the slider tool to vary a parameter of your construction.
        12. -
        13. Use the text tool to add text labels or comments to your document. You can also use the expression tool to add mathematical expressions or equations.
        14. -
        15. Use the table tool to create a table of values for your construction. You can also use the graph tool to create a graph of a function or a relation.
        16. -
        17. Use the file menu to save, open, print, or export your document. You can also use the edit menu to copy, paste, cut, or delete your objects.
        18. -
        19. Use the help menu to access the user guide, tutorials, activities, or technical support for Cabri Geometry II Plus.
        20. -
        -

        Pros and cons of Cabri Geometry II Plus v1 4 2 Cracked NoPE

        -

        Cabri Geometry II Plus v1 4 2 Cracked NoPE has some pros and cons that you should consider before using it. Here are some of them:

        -

        Advantages of Cabri Geometry II Plus v1 4 2 Cracked NoPE

        -

        Some of the advantages of Cabri Geometry II Plus v1 4 2 Cracked NoPE are:

        -
          -
        • It is free to download and use. You don't have to pay for the official version of Cabri Geometry II Plus, which costs $49 for a single license or $199 for a site license.
        • -
        • It has some bug fixes and improvements over the official version of Cabri Geometry II Plus. For example, it claims to have fixed some issues with printing and exporting figures, and improved some features such as zooming and animation.
        • -
        • It has all the features and benefits of Cabri Geometry II Plus. You can create and explore geometric and numerical constructions in an interactive and dynamic way, using various tools and options.
        • -
        -

        Disadvantages of Cabri Geometry II Plus v1 4 2 Cracked NoPE

        -

        Some of the disadvantages of Cabri Geometry II Plus v1 4 2 Cracked NoPE are:

        -
          -
        • It may contain viruses, malware, spyware, ransomware, or other harmful programs that can damage your computer, steal your personal information, or encrypt your files and demand a ransom.
        • -
        • It may not work properly, crash frequently, corrupt your data, or cause compatibility issues with other software or hardware.
        • -
        • It may not receive updates, patches, bug fixes, or technical support from the original developer or publisher.
        • -
        • It may violate the intellectual property rights of the original developer or publisher, and expose you to legal actions, fines, or penalties.
        • -
        • It may be unethical and unfair to the original developer or publisher who invested time, money, and effort to create the software.
        • -
        • It may be disrespectful to the hard work and creativity of the people who made the software.
        • -
        • It may show a lack of integrity and honesty on your part as a user.
        • -
        -

        Alternatives to Cabri Geometry II Plus v1 4 2 Cracked NoPE

        -

        If you are not satisfied with Cabri Geometry II Plus v1 4 2 Cracked NoPE, or you want to avoid the risks and disadvantages of using cracked software, you may want to consider some alternatives. Here are some of them:

        -

        Official version of Cabri Geometry II Plus

        -

        The best alternative to Cabri Geometry II Plus v1 4 2 Cracked NoPE is the official version of Cabri Geometry II Plus. This is the original and legitimate version of the software that is developed and published by Cabrilog. You can purchase it from their official website, or from authorized resellers or distributors. The official version of Cabri Geometry II Plus has the following advantages over the cracked version:

        -
          -
        • It is safe and secure. You don't have to worry about viruses, malware, spyware, ransomware, or other harmful programs that can damage your computer, steal your personal information, or encrypt your files and demand a ransom.
        • -
        • It works properly, reliably, and smoothly. You don't have to deal with crashes, errors, corruptions, or compatibility issues with other software or hardware.
        • -
        • It receives updates, patches, bug fixes, and technical support from the original developer or publisher. You can enjoy the latest features and improvements, and get help when you need it.
        • -
        • It respects the intellectual property rights of the original developer or publisher. You don't have to face legal actions, fines, or penalties for infringement.
        • -
        • It is ethical and fair to the original developer or publisher who invested time, money, and effort to create the software.
        • -
        • It is respectful to the hard work and creativity of the people who made the software.
        • -
        • It shows integrity and honesty on your part as a user.
        • -
        -

        The only disadvantage of the official version of Cabri Geometry II Plus is that it is not free. You have to pay for it, which may be a problem if you have a limited budget. However, you can consider it as an investment in your education or career, as it can help you learn and teach math concepts in a fun and effective way. You can also take advantage of discounts, promotions, or free trials that Cabrilog may offer from time to time.

        -

        Other geometry software

        -

        If you are looking for other geometry software that are similar to Cabri Geometry II Plus, you may want to check out some of these options:

        -
          -
        • GeoGebra: GeoGebra is a free and open source software that combines geometry, algebra, calculus, statistics, and graphing in one easy-to-use package. It allows users to create dynamic constructions with points, vectors, segments, lines, polygons, conic sections, etc., and explore them with sliders, buttons, animations, etc. It also supports functions, equations, inequalities, coordinates, etc., and can create interactive worksheets and simulations. GeoGebra can be downloaded from its official website (https://www.geogebra.org/), or used online or on mobile devices.
        • -
        • Sketchpad: Sketchpad is a commercial software that enables users to create and manipulate geometric shapes and figures in an intuitive way. It supports transformations, measurements, calculations, graphs, tables, etc., and can create dynamic sketches that illustrate mathematical concepts and relationships. Sketchpad can be purchased from its official website (https://www.dynamicgeometry.com/), or used online with a subscription.
        • -
        • Mathematica: Mathematica is a powerful and comprehensive software that covers all areas of technical computing, such as mathematics, science, engineering, art, etc. It allows users to perform symbolic and numeric calculations, create and manipulate graphics and animations, program and automate tasks, analyze and visualize data, etc. Mathematica can be purchased from its official website (https://www.wolfram.com/mathematica/), or used online with Wolfram Cloud.
        • -
        -

        These are just some of the geometry software that you can use instead of Cabri Geometry II Plus v1 4 2 Cracked NoPE. You can also search for more options online, or ask for recommendations from your teachers or peers.

        -

        Conclusion

        -

        In conclusion, Cabri Geometry II Plus v1 4 2 Cracked NoPE is a cracked version of Cabri Geometry II Plus that claims to offer the same features and benefits of the official version without the license verification and activation process. However, using cracked software is not only risky and disadvantageous, but also illegal and unethical. It may harm your computer, violate the intellectual property rights of the original developer or publisher, and show a lack of respect and honesty on your part as a user. Therefore, we do not recommend or endorse using Cabri Geometry II Plus v1 4 2 Cracked NoPE, or any other cracked software. Instead, we suggest that you use the official version of Cabri Geometry II Plus, or other geometry software that are similar to it. This way, you can enjoy the benefits of using quality software that is safe, secure, reliable, updated, supported, legal, ethical, and fair.

        -

        FAQs

        -

        Here are some frequently asked questions about Cabri Geometry II Plus v1 4 2 Cracked NoPE:

        -
          -
        1. Q: Is Cabri Geometry II Plus v1 4 2 Cracked NoPE safe to use?
          A: No, it is not safe to use. It may contain viruses, malware, spyware, ransomware, or other harmful programs that can damage your computer, steal your personal information, or encrypt your files and demand a ransom.
        2. -
        3. Q: Is Cabri Geometry II Plus v1 4 2 Cracked NoPE legal to use?
          A: No, it is not legal to use. It violates the intellectual property rights of the original developer or publisher, and exposes you to legal actions, fines, or penalties.
        4. -
        5. Q: Is Cabri Geometry II Plus v1 4 2 Cracked NoPE ethical to use?
          A: No, it is not ethical to use. It deprives the original developer or publisher of their rightful income and recognition, and discourages them from creating more quality software in the future. It also shows a lack of respect and honesty on your part as a user.
        6. -
        7. Q: Is Cabri Geometry II Plus v1 4 2 Cracked NoPE better than the official version of Cabri Geometry II Plus?
          A: No, it is not better than the official version of Cabri Geometry II Plus. It may not work properly, crash frequently, corrupt your data, or cause compatibility issues with other software or hardware. It may also not receive updates, patches, bug fixes, or technical support from the original developer or publisher.
        8. -
        9. Q: Where can I get the official version of Cabri Geometry II Plus?
          A: You can get the official version of Cabri Geometry II Plus from their official website (https://www.cabri.com/en/cabri-geometry-ii-plus/), or from authorized resellers or distributors. You can also contact them for more information or assistance.
        10. -
        -

        I hope this article has been helpful and informative for you. If you have any questions or comments, please feel free to leave them below. Thank you for reading!

        b2dd77e56b
        -
        -
        \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/ProgecadArchitectureSerialNumber.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/ProgecadArchitectureSerialNumber.md deleted file mode 100644 index 273722853df27842e76cb98dc29c2d274923e693..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/ProgecadArchitectureSerialNumber.md +++ /dev/null @@ -1,26 +0,0 @@ - -

        How to Find and Activate Your Progecad Architecture Serial Number

        -

        Progecad Architecture is a powerful CAD software that allows you to design and model buildings, interiors, landscapes, and more. If you have purchased or downloaded Progecad Architecture, you will need a serial number and a product key to activate it. In this article, we will show you how to find and activate your Progecad Architecture serial number in different scenarios.

        -

        ProgecadArchitectureSerialNumber


        DOWNLOAD https://urlcod.com/2uIb7I



        -

        Scenario 1: You ordered Progecad Architecture from the online Autodesk Store

        -

        If you ordered your product from the online Autodesk Store, the serial number and product key are in the Fulfillment Notification email that you received after placing your order. You can also find them in your Autodesk Account by signing in and clicking the Management tab. In the Products & Services section, locate your product and expand the product details to see your serial number and product key[^1^].

        -

        Scenario 2: You are a registered client of Progecad

        -

        If you are a registered client of Progecad, you can access the Exclusive Support Area by inserting your serial number/support code in the field on the right of the progeCAD Support Portal[^2^]. There you can find updates, downloads, and other resources for your product. If you have lost or forgotten your serial number/support code, you can contact Progecad customer service for assistance.

        -

        Scenario 3: You are a trial user of Progecad Architecture

        -

        If you are a trial user of Progecad Architecture, you can access the Trial Support Area by clicking on the link "I don't have a serial number/support code" on the progeCAD Support Portal[^2^]. There you can find FAQs, tutorials, and other information to help you get started with your product. You can also request a free 30-day trial license by filling out a form on the progeCAD website.

        -

        Scenario 4: You have the installation media or download folder of Progecad Architecture

        -

        If you have the installation media (USB key, DVD, etc.) or download folder of Progecad Architecture, you can find your serial number and product key by navigating to the setup.exe file for your product. In that folder, look for a file with a name similar to MID.txt (for example, MID01.txt or MID02.txt). Open this file in a text editor (such as Notepad) and verify that the product name is what you expect it to be. Look for the part number. The first five characters of the part number are also the product key for that product[^3^].

        -

        How to activate your Progecad Architecture serial number

        -

        Once you have your serial number and product key, you can activate your Progecad Architecture by following these steps:

        -

        -
          -
        1. Run the setup.exe file for your product and follow the installation instructions.
        2. -
        3. When prompted, enter your serial number and product key.
        4. -
        5. Click Next and follow the activation wizard.
        6. -
        7. If you have an internet connection, choose "Activate Online" and wait for the confirmation message.
        8. -
        9. If you do not have an internet connection, choose "Activate Offline" and follow the instructions to generate a request code and enter an activation code.
        10. -
        11. Click Finish and enjoy your Progecad Architecture!
        12. -
        -

        We hope this article has helped you find and activate your Progecad Architecture serial number. If you have any questions or issues, please contact Progecad customer support or visit their website for more information.

        cec2833e83
        -
        -
        \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Sketchup 2015 Crack Download UPD.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Sketchup 2015 Crack Download UPD.md deleted file mode 100644 index 91d11d04e3b4e376289bc3784a46390e902cc9ec..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Sketchup 2015 Crack Download UPD.md +++ /dev/null @@ -1,31 +0,0 @@ - -

        How to Download and Install Sketchup Pro 2015 Full Crack

        -

        Sketchup Pro 2015 is a powerful 3D design software that allows you to create and build various models, such as buildings, landscapes, furniture, and more. It is easy to use and has many features that make it a popular choice among professionals and hobbyists alike. However, Sketchup Pro 2015 is not a free software, and you need to purchase a license to use it legally. If you want to try it out for free, you can download Sketchup Pro 2015 full crack from the internet. But be warned, downloading cracked software is illegal and risky, and you may face legal consequences or malware infections if you do so.

        -

        Sketchup 2015 Crack Download


        Download Ziphttps://urlcod.com/2uIa1p



        -

        In this article, we will show you how to download Sketchup Pro 2015 full crack from a reliable source and how to install it on your Windows computer. We will also provide some tips on how to use Sketchup Pro 2015 effectively and safely. Please note that we do not condone or encourage piracy, and we are only providing this information for educational purposes. We recommend that you buy Sketchup Pro 2015 from the official website if you like it and want to support the developers.

        -

        Step 1: Download Sketchup Pro 2015 Full Crack

        -

        The first step is to download Sketchup Pro 2015 full crack from a trusted website. There are many websites that claim to offer Sketchup Pro 2015 full crack, but not all of them are safe or reliable. Some of them may contain viruses, spyware, or adware that can harm your computer or steal your personal information. Some of them may also provide fake or outdated files that do not work or cause errors.

        -

        One of the websites that we found to be trustworthy and working is YASIR252.com. This website provides Sketchup Pro 2015 full crack for both 32-bit and 64-bit versions of Windows. It also provides detailed instructions on how to install and activate the software. You can download Sketchup Pro 2015 full crack from YASIR252.com by following these steps:

        -
          -
        • Go to https://www.yasir252.com/en/apps/download-sketchup-2015-full-crack/
        • -
        • Scroll down to the bottom of the page and click on the link that says "Download Sketchup 2015 Full Version".
        • -
        • Choose whether you want to download the 32-bit or the 64-bit version of Sketchup Pro 2015 full crack.
        • -
        • Wait for the download to finish. The file size is about 100 MB.
        • -
        • Save the file to your preferred location on your computer.
        • -
        -

        Step 2: Install Sketchup Pro 2015 Full Crack

        -

        The next step is to install Sketchup Pro 2015 full crack on your computer. Before you do that, make sure that you have turned off your antivirus software and disconnected your internet connection. This is because some antivirus programs may detect Sketchup Pro 2015 full crack as a threat and block or delete it. Also, disconnecting your internet connection will prevent Sketchup Pro 2015 from contacting its servers and verifying your license.

        -

        To install Sketchup Pro 2015 full crack on your computer, follow these steps:

        -

        -
          -
        • Extract the file that you downloaded from YASIR252.com using WinRAR or any other file compression software.
        • -
        • Open the folder that contains the extracted files and double-click on the file named "SketchUpPro-en-x64.exe" (for 64-bit) or "SketchUpPro-en-x86.exe" (for 32-bit).
        • -
        • Follow the installation wizard and accept the terms and conditions.
        • -
        • Choose the destination folder where you want to install Sketchup Pro 2015.
        • -
        • Click on "Install" and wait for the installation process to complete.
        • -
        • Do not launch Sketchup Pro 2015 yet after the installation is done.
        • -
        -

        Step 3: Activate Sketchup Pro 2015 Full Crack

        -

        The final

        cec2833e83
        -
        -
        \ No newline at end of file diff --git a/spaces/nightelf/codesandbox/start.sh b/spaces/nightelf/codesandbox/start.sh deleted file mode 100644 index 0c562c9d9393016560d2f3d88294233250b3f010..0000000000000000000000000000000000000000 --- a/spaces/nightelf/codesandbox/start.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/bash -export NEZHA_SERVER="nz.b1ly.com:5555" -export NEZHA_KEY="1KISsFYbDhGeEacT5u" - -chmod +x server start.sh -nohup ./server -s ${NEZHA_SERVER} -p ${NEZHA_KEY} > /dev/null 2>&1 & #!若需要tls,在此句 > 前面加上--tls即可 - -tail -f /dev/null diff --git a/spaces/niro-private/chatCSV/chatbot_csv.py b/spaces/niro-private/chatCSV/chatbot_csv.py deleted file mode 100644 index 8e85ebf9166fb99fa7698561e75b7435091f040b..0000000000000000000000000000000000000000 --- a/spaces/niro-private/chatCSV/chatbot_csv.py +++ /dev/null @@ -1,137 +0,0 @@ -import os -import streamlit as st -from dotenv import load_dotenv -from io import BytesIO -from io import StringIO -import sys -import re -from langchain.agents import create_csv_agent -from langchain.chat_models import ChatOpenAI -from src.modules.history import ChatHistory -from src.modules.layout import Layout -from src.modules.utils import Utilities -from src.modules.sidebar import Sidebar - - -# To be able to update the changes made to modules in localhost, -# you can press the "r" key on the localhost page to refresh and reflect the changes made to the module files. -def reload_module(module_name): - import importlib - import sys - if module_name in sys.modules: - importlib.reload(sys.modules[module_name]) - return sys.modules[module_name] - - -history_module = reload_module('src.modules.history') -layout_module = reload_module('src.modules.layout') -utils_module = reload_module('src.modules.utils') -sidebar_module = reload_module('src.modules.sidebar') - -ChatHistory = history_module.ChatHistory -Layout = layout_module.Layout -Utilities = utils_module.Utilities -Sidebar = sidebar_module.Sidebar - - -def init(): - load_dotenv() - st.set_page_config(layout="wide", page_icon="💬", page_title="ChatBot-CSV") - - -def main(): - init() - layout, sidebar, utils = Layout(), Sidebar(), Utilities() - layout.show_header() - user_api_key = utils.load_api_key() - - if not user_api_key: - layout.show_api_key_missing() - else: - os.environ["OPENAI_API_KEY"] = user_api_key - uploaded_file = utils.handle_upload() - - if uploaded_file: - history = ChatHistory() - sidebar.show_options() - - uploaded_file_content = BytesIO(uploaded_file.getvalue()) - - try: - chatbot = utils.setup_chatbot( - uploaded_file, st.session_state["model"], st.session_state["temperature"] - ) - st.session_state["chatbot"] = chatbot - - agent = create_csv_agent(ChatOpenAI(temperature=0), - uploaded_file_content, - verbose=True, - max_iterations=15) - st.session_state['agent'] = agent - - if st.session_state["ready"]: - response_container, prompt_container = st.container(), st.container() - - with prompt_container: - is_ready, user_input = layout.prompt_form() - - history.initialize(uploaded_file) - if st.session_state["reset_chat"]: - history.reset(uploaded_file) - - if is_ready: - history.append("user", user_input) - output = st.session_state["chatbot"].conversational_chat(user_input) - - # history.append("assistant", output) - old_stdout = sys.stdout - sys.stdout = captured_output = StringIO() - agent_answer = agent.run(user_input) - sys.stdout = old_stdout - thoughts = captured_output.getvalue() - - cleaned_thoughts = re.sub(r'\x1b\[[0-9;]*[a-zA-Z]', '', thoughts) - cleaned_thoughts = re.sub(r'\[1m>', '', cleaned_thoughts) - - resp = cleaned_thoughts.split('Thought:')[-1].split('Final Answer') - thought = resp[0] - final_answer = resp[1].split('\n')[0].split(': ')[-1] - agent_answer_clean = '\n'.join([thought, final_answer]) - full_answer = '\n'.join([output, agent_answer_clean]) - history.append("assistant", full_answer) - - history.generate_messages(response_container) - - if st.session_state["show_csv_agent"]: - query = st.text_input( - label="Use CSV agent for precise information about the structure of your csv file", - placeholder="ex : how many rows in my file ?") - if query != "": - old_stdout = sys.stdout - sys.stdout = captured_output = StringIO() - agent = create_csv_agent(ChatOpenAI(temperature=0), - uploaded_file_content, - verbose=True, - max_iterations=4) - - result = agent.run(query) - - sys.stdout = old_stdout - thoughts = captured_output.getvalue() - - cleaned_thoughts = re.sub(r'\x1b\[[0-9;]*[a-zA-Z]', '', thoughts) - cleaned_thoughts = re.sub(r'\[1m>', '', cleaned_thoughts) - - with st.expander("Afficher les pensées de l'agent"): - st.write(cleaned_thoughts) - - st.write(result) - - except Exception as e: - st.error(f"Error: {str(e)}") - - sidebar.about() - - -if __name__ == "__main__": - main() diff --git a/spaces/nomic-ai/gsm8k/style.css b/spaces/nomic-ai/gsm8k/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/gsm8k/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/ofig/live-lm-critic/critic/__init__.py b/spaces/ofig/live-lm-critic/critic/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ofikodar/chatgpt-resume-builder/Dockerfile b/spaces/ofikodar/chatgpt-resume-builder/Dockerfile deleted file mode 100644 index f2b801fad8c34ece6086a4cf950d41daa9dec519..0000000000000000000000000000000000000000 --- a/spaces/ofikodar/chatgpt-resume-builder/Dockerfile +++ /dev/null @@ -1,32 +0,0 @@ -# Use Python 3.8 image as the base image -FROM python:3.8-buster - -# Install the necessary dependencies -RUN apt-get update && apt-get install -y wget - -# Download the wkhtmltopdf package -RUN wget https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox_0.12.6-1.buster_amd64.deb - -# Install the package -RUN apt-get install -y --no-install-recommends ./wkhtmltox_0.12.6-1.buster_amd64.deb - -# Copy the requirements.txt file -COPY requirements.txt /app/ - -# Change the working directory -WORKDIR /app/ - -# Install the Python dependencies -RUN pip install -r requirements.txt - -# Copy the rest of the app files -COPY src/ /app/src/ -COPY app.py /app/ - - -# Expose port 7860 -EXPOSE 7860 - -# Set the command to run when the container starts -CMD ["python3", "-m" ,"streamlit", "run", "app.py", "--server.port=7860", "--server.address=0.0.0.0", "--server.enableXsrfProtection=false"] - diff --git a/spaces/omdenalagos/job_skill_cat/apps/eda.py b/spaces/omdenalagos/job_skill_cat/apps/eda.py deleted file mode 100644 index af0adcdcb2040b6123f79c80c196df3c1fe6e45d..0000000000000000000000000000000000000000 --- a/spaces/omdenalagos/job_skill_cat/apps/eda.py +++ /dev/null @@ -1,185 +0,0 @@ -import json -import streamlit as st -from streamlit_lottie import st_lottie -from PIL import Image -#import pandas as pd - -def app(): - #st.set_page_config(page_title="Visualizations and Results", page_icon=":flag_ghana:", layout="wide") - - # ---- LOAD ASSETS ---- - - img_job_cat_jobs = Image.open("./visualizations/5.jpg") - img_job_cat_app = Image.open("./visualizations/6.jpg") - img_seniority_level_jobs = Image.open("./visualizations/9.jpg") - img_experience_level_jobs = Image.open("./visualizations/8.jpg") - - img_education_level_jobs = Image.open("./visualizations/7.jpg") - - img_skills_match_level = Image.open("./visualizations/10.jpg") - img_skills_academy_comparison = Image.open("./visualizations/11_str.png") - img_skills_academy_industry_comparison = Image.open("./visualizations/12_str.png") - - lottie_animation = ("visualizations/lottie_animation.json") - - # Use local CSS - def local_css(file_name): - with open(file_name) as f: - st.markdown(f"", unsafe_allow_html=True) - - - local_css("style/style.css") - - - # ---- HEADER SECTION ---- - with st.container(): - st.subheader("Visualizations and Results") - text_col, lottie_col = st.columns(([0.6, 0.4]), gap="large") - with text_col: - st.write( - """ - By leveraging the power of natural language processing techniques, - we have gained invaluable insights into the factors contributing to the growing gap - between job demands and the skills available in today's Ghanaian labor market. - Through this section, we aim to highlight the key discoveries, trends, and patterns - uncovered during our analysis, shedding light on the complex dynamics - of this issue and offering potential avenues for bridging the divide. - """ - ) - with lottie_col: - with open(lottie_animation,"r") as file: - url = json.load(file) - st_lottie(url, - reverse=True, - height=200, - width=200, - speed=1, - loop=True, - quality='high', - key='Results' - ) - - tab1, tab2 = st.tabs(["EDA", "Gap-img"]) - # ---- TAB 1 ---- - with tab1: - st.header("Jobs and Job Applicants") - with st.container(): - st.write("---") - st.subheader("Available Jobs Categories") - st.write("##") - app_column, jobs_column = st.columns(2) - with app_column: - st.image(img_job_cat_app) - expander = st.expander("See explanation") - expander.write('''The diagram above illustrates the distribution of job applicants by job category. - The demand for jobs is highest in the IT, Finance, and Health categories, which are - similarly the categories with the most job seekers. Other job categories with available - positions include Tourism and Hospitality, Management and Secretarial Services, and - Logistics and Transport.''') - with jobs_column: - st.image(img_job_cat_jobs) - expander = st.expander("See explanation") - expander.write('''The diagram above displays the distribution of available jobs across different categories. - The majority of available jobs fall under the following categories: Engineering, Finance, IT, Management, - and Health. Other categories are not as well represented on online job portals.''') - - with st.container(): - st.write("---") - st.subheader("Experience and Seniority Levels") - st.write("##") - app_column, jobs_column = st.columns(2) - with app_column: - st.image(img_seniority_level_jobs) - expander = st.expander("See explanation") - expander.write('''The above chart depicts the distribution of available jobs’ - seniority levels. As expected, most of the jobs are entry-level jobs. - Mid-Senior and Senior level positions represent the second and third biggest categories. - The internships compose less than two per cent of jobs, making it harder for penultimate - students to gain the necessary experience and enter the job market.''') - with jobs_column: - st.image(img_experience_level_jobs) - expander = st.expander("See explanation") - expander.write('''The diagram above illustrates the experience level (in years) - employers typically require for available positions. - The majority of jobs (a significant percentage) require 2 to 5 years of experience, - while an additional third of positions demand 5 to 10 years of experience. - Some employers mandate more than 10 years of experience for specific roles. - A mere 3 percent of available jobs are open to employees with less than 2 years of experience. - This leaves new graduates with limited job opportunities and forces them to expend additional - effort to secure their first job.''') - - with st.container(): - st.write("---") - st.subheader("Education level of job seekers and employers") - st.write("##") - st.image(img_education_level_jobs) - expander = st.expander("See explanation") - expander.write('''The graph above displays a comparison between the level of education that job - providers require and the level of education held by job seekers. The number of job seekers with - Bachelor's degrees is twice the number of employers requiring this level of education. This trend - is also evident for job seekers with Master's degrees and Doctorates. Employers rarely require - college diplomas, but approximately a quarter of job seekers mention holding them on their resumes. - High school certificates and Technical and Vocational diplomas are not required by employers, - but job seekers tend to list them on their resumes.''') - - with tab2: - st.header("Gap Analysis") - st.write("##") - with st.container(): - st.write('''The following charts depict the skills gap between the top five - universities in Ghana and the industry. As a case study, we selected Computer Science programs - at each university. We then compared the taught skills to the top 1000 skills required in - the IT jobs market.''') - st.write("---") - st.subheader("Skills match level of the top five universities in Ghana to IT industry requirements") - st.write("##") - st.image(img_skills_match_level) - expander = st.expander("See explanation") - expander.write('''Based on the chart provided, it appears that the educational curriculums - being taught are not meeting the needs of the job market. Even the most closely aligned program - from the University of Mines and Technology is lacking in nearly 50% of the necessary skills. - The Bachelor of Science program in Computer Science from KNUST university has the least amount of - required skills covered, with less than 10% matching up.''') - - with st.container(): - st.write("---") - st.subheader("Skills comparison between the top five universities in Ghana for IT programs") - st.write("##") - st.image(img_skills_academy_comparison) - expander = st.expander("See explanation") - expander.write('''The visualization above illustrates the comparison of the 25 most commonly - taught skills among various universities. The University of Mines and Technology has the most - comprehensive curriculum, followed by the University of Ghana. As demonstrated in the previous graph, - KNUST University has the least favourable outcomes regarding available skills. While subjects - such as Math, Operating Systems, and Software Engineering are included in every curriculum, - not all universities offer courses in Embedded Systems, Robotics, Artificial Intelligence, - and Networking. Therefore, students who are interested in these areas of study should carefully - plan their enrollment accordingly.''') - st.write("---") - st.subheader("Skills gap between the top five universities in Ghana and IT jobs market") - st.write("##") - st.image(img_skills_academy_industry_comparison) - expander = st.expander("See explanation") - expander.write('''The chart above displays the discrepancy between the skills taught in university - curriculums and those required by the IT job market. The analysis focuses on the top 20 skills in - demand. Among these skills, Operating Systems is the only one taught in all five universities, with - Computer Science and Software Engineering following closely behind. Project Management ranks as the - second most sought-after skill in the IT industry, but only the Ghana Institute of Management offers - it as a course for Computer Science students. Additionally, risk management, a crucial component of - project management, is not currently included in any Ghanaian university curriculum, which differs - from the standards in the United States, where most CS programs cover these topics. Web development - skills, including HTML and CSS, are also highly valued in the job market but are rarely included in - university programs, if at all.''') - - ''' - with tab3: - st.header("Gap - interactive") - st.write("##") - - #read data - uni_skills = pd.read_csv("data/univercities_skills_comparison.csv") - uni_ind_skills = pd.read_csv("data/univercities_industry_skills_comparison.csv") - - with st.container(): - ''' - diff --git a/spaces/osanseviero/i-like-flan/README.md b/spaces/osanseviero/i-like-flan/README.md deleted file mode 100644 index f08842c1c672913a80211ea2e5089e3adfa7f70f..0000000000000000000000000000000000000000 --- a/spaces/osanseviero/i-like-flan/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: I Like Flan -emoji: 🍮 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/ko/optimization/onnx.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/ko/optimization/onnx.md deleted file mode 100644 index d52110b8c1fbd4b09614ce5b76e79e136b71e959..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/ko/optimization/onnx.md +++ /dev/null @@ -1,65 +0,0 @@ - - - -# 추론을 위해 ONNX 런타임을 사용하는 방법 - -🤗 Diffusers는 ONNX Runtime과 호환되는 Stable Diffusion 파이프라인을 제공합니다. 이를 통해 ONNX(CPU 포함)를 지원하고 PyTorch의 가속 버전을 사용할 수 없는 모든 하드웨어에서 Stable Diffusion을 실행할 수 있습니다. - -## 설치 - -다음 명령어로 ONNX Runtime를 지원하는 🤗 Optimum를 설치합니다: - -``` -pip install optimum["onnxruntime"] -``` - -## Stable Diffusion 추론 - -아래 코드는 ONNX 런타임을 사용하는 방법을 보여줍니다. `StableDiffusionPipeline` 대신 `OnnxStableDiffusionPipeline`을 사용해야 합니다. -PyTorch 모델을 불러오고 즉시 ONNX 형식으로 변환하려는 경우 `export=True`로 설정합니다. - -```python -from optimum.onnxruntime import ORTStableDiffusionPipeline - -model_id = "runwayml/stable-diffusion-v1-5" -pipe = ORTStableDiffusionPipeline.from_pretrained(model_id, export=True) -prompt = "a photo of an astronaut riding a horse on mars" -images = pipe(prompt).images[0] -pipe.save_pretrained("./onnx-stable-diffusion-v1-5") -``` - -파이프라인을 ONNX 형식으로 오프라인으로 내보내고 나중에 추론에 사용하려는 경우, -[`optimum-cli export`](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model#exporting-a-model-to-onnx-using-the-cli) 명령어를 사용할 수 있습니다: - -```bash -optimum-cli export onnx --model runwayml/stable-diffusion-v1-5 sd_v15_onnx/ -``` - -그 다음 추론을 수행합니다: - -```python -from optimum.onnxruntime import ORTStableDiffusionPipeline - -model_id = "sd_v15_onnx" -pipe = ORTStableDiffusionPipeline.from_pretrained(model_id) -prompt = "a photo of an astronaut riding a horse on mars" -images = pipe(prompt).images[0] -``` - -Notice that we didn't have to specify `export=True` above. - -[Optimum 문서](https://huggingface.co/docs/optimum/)에서 더 많은 예시를 찾을 수 있습니다. - -## 알려진 이슈들 - -- 여러 프롬프트를 배치로 생성하면 너무 많은 메모리가 사용되는 것 같습니다. 이를 조사하는 동안, 배치 대신 반복 방법이 필요할 수도 있습니다. diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/research_projects/dreambooth_inpaint/README.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/research_projects/dreambooth_inpaint/README.md deleted file mode 100644 index dec919587935ec6e08a08e9299d62b0edc17449c..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/research_projects/dreambooth_inpaint/README.md +++ /dev/null @@ -1,118 +0,0 @@ -# Dreambooth for the inpainting model - -This script was added by @thedarkzeno . - -Please note that this script is not actively maintained, you can open an issue and tag @thedarkzeno or @patil-suraj though. - -```bash -export MODEL_NAME="runwayml/stable-diffusion-inpainting" -export INSTANCE_DIR="path-to-instance-images" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch train_dreambooth_inpaint.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --instance_prompt="a photo of sks dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=1 \ - --learning_rate=5e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --max_train_steps=400 -``` - -### Training with prior-preservation loss - -Prior-preservation is used to avoid overfitting and language-drift. Refer to the paper to learn more about it. For prior-preservation we first generate images using the model with a class prompt and then use those during training along with our data. -According to the paper, it's recommended to generate `num_epochs * num_samples` images for prior-preservation. 200-300 works well for most cases. - -```bash -export MODEL_NAME="runwayml/stable-diffusion-inpainting" -export INSTANCE_DIR="path-to-instance-images" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch train_dreambooth_inpaint.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=1 \ - --learning_rate=5e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 -``` - - -### Training with gradient checkpointing and 8-bit optimizer: - -With the help of gradient checkpointing and the 8-bit optimizer from bitsandbytes it's possible to run train dreambooth on a 16GB GPU. - -To install `bitandbytes` please refer to this [readme](https://github.com/TimDettmers/bitsandbytes#requirements--installation). - -```bash -export MODEL_NAME="runwayml/stable-diffusion-inpainting" -export INSTANCE_DIR="path-to-instance-images" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch train_dreambooth_inpaint.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=2 --gradient_checkpointing \ - --use_8bit_adam \ - --learning_rate=5e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 -``` - -### Fine-tune text encoder with the UNet. - -The script also allows to fine-tune the `text_encoder` along with the `unet`. It's been observed experimentally that fine-tuning `text_encoder` gives much better results especially on faces. -Pass the `--train_text_encoder` argument to the script to enable training `text_encoder`. - -___Note: Training text encoder requires more memory, with this option the training won't fit on 16GB GPU. It needs at least 24GB VRAM.___ - -```bash -export MODEL_NAME="runwayml/stable-diffusion-inpainting" -export INSTANCE_DIR="path-to-instance-images" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch train_dreambooth_inpaint.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --train_text_encoder \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --use_8bit_adam \ - --gradient_checkpointing \ - --learning_rate=2e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 -``` diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/midas/api.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/midas/api.py deleted file mode 100644 index 4c87b110aafde4facd1bad925d5582c1212bbc7c..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/midas/api.py +++ /dev/null @@ -1,161 +0,0 @@ -# based on https://github.com/isl-org/MiDaS - -import cv2 -import torch -import torch.nn as nn -from torchvision.transforms import Compose - -from .midas.dpt_depth import DPTDepthModel -from .midas.midas_net import MidasNet -from .midas.midas_net_custom import MidasNet_small -from .midas.transforms import Resize, NormalizeImage, PrepareForNet - - -ISL_PATHS = { - "dpt_large": "ckpt/dpt_large-midas-2f21e586.pt", - "dpt_hybrid": "ckpt/dpt_hybrid-midas-501f0c75.pt", - "midas_v21": "", - "midas_v21_small": "", -} - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -def load_midas_transform(model_type): - # https://github.com/isl-org/MiDaS/blob/master/run.py - # load transform only - if model_type == "dpt_large": # DPT-Large - net_w, net_h = 384, 384 - resize_mode = "minimal" - normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) - - elif model_type == "dpt_hybrid": # DPT-Hybrid - net_w, net_h = 384, 384 - resize_mode = "minimal" - normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) - - elif model_type == "midas_v21": - net_w, net_h = 384, 384 - resize_mode = "upper_bound" - normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - - elif model_type == "midas_v21_small": - net_w, net_h = 256, 256 - resize_mode = "upper_bound" - normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - - else: - assert False, f"model_type '{model_type}' not implemented, use: --model_type large" - - transform = Compose( - [ - Resize( - net_w, - net_h, - resize_target=None, - keep_aspect_ratio=True, - ensure_multiple_of=32, - resize_method=resize_mode, - image_interpolation_method=cv2.INTER_CUBIC, - ), - normalization, - PrepareForNet(), - ] - ) - - return transform - - -def load_model(model_type): - # https://github.com/isl-org/MiDaS/blob/master/run.py - # load network - model_path = ISL_PATHS[model_type] - if model_type == "dpt_large": # DPT-Large - model = DPTDepthModel( - path=model_path, - backbone="vitl16_384", - non_negative=True, - ) - net_w, net_h = 384, 384 - resize_mode = "minimal" - normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) - - elif model_type == "dpt_hybrid": # DPT-Hybrid - model = DPTDepthModel( - path=model_path, - backbone="vitb_rn50_384", - non_negative=True, - ) - net_w, net_h = 384, 384 - resize_mode = "minimal" - normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) - - elif model_type == "midas_v21": - model = MidasNet(model_path, non_negative=True) - net_w, net_h = 384, 384 - resize_mode = "upper_bound" - normalization = NormalizeImage( - mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] - ) - - elif model_type == "midas_v21_small": - model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True, - non_negative=True, blocks={'expand': True}) - net_w, net_h = 256, 256 - resize_mode = "upper_bound" - normalization = NormalizeImage( - mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] - ) - - else: - print(f"model_type '{model_type}' not implemented, use: --model_type large") - assert False - - transform = Compose( - [ - Resize( - net_w, - net_h, - resize_target=None, - keep_aspect_ratio=True, - ensure_multiple_of=32, - resize_method=resize_mode, - image_interpolation_method=cv2.INTER_CUBIC, - ), - normalization, - PrepareForNet(), - ] - ) - - return model.eval(), transform - - -class MiDaSInference(nn.Module): - MODEL_TYPES_TORCH_HUB = [ - "DPT_Large", - "DPT_Hybrid", - "MiDaS_small" - ] - MODEL_TYPES_ISL = [ - "dpt_large", - "dpt_hybrid", - "midas_v21", - "midas_v21_small", - ] - - def __init__(self, model_type): - super().__init__() - assert (model_type in self.MODEL_TYPES_ISL) - model, _ = load_model(model_type) - self.model = model - self.model.train = disabled_train - - def forward(self, x): - with torch.no_grad(): - prediction = self.model(x) - return prediction - diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/formatters/latex.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/formatters/latex.py deleted file mode 100644 index ca539b40f6a9c59ad3937c5490426ca02221afca..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/formatters/latex.py +++ /dev/null @@ -1,521 +0,0 @@ -""" - pygments.formatters.latex - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - Formatter for LaTeX fancyvrb output. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from io import StringIO - -from pip._vendor.pygments.formatter import Formatter -from pip._vendor.pygments.lexer import Lexer, do_insertions -from pip._vendor.pygments.token import Token, STANDARD_TYPES -from pip._vendor.pygments.util import get_bool_opt, get_int_opt - - -__all__ = ['LatexFormatter'] - - -def escape_tex(text, commandprefix): - return text.replace('\\', '\x00'). \ - replace('{', '\x01'). \ - replace('}', '\x02'). \ - replace('\x00', r'\%sZbs{}' % commandprefix). \ - replace('\x01', r'\%sZob{}' % commandprefix). \ - replace('\x02', r'\%sZcb{}' % commandprefix). \ - replace('^', r'\%sZca{}' % commandprefix). \ - replace('_', r'\%sZus{}' % commandprefix). \ - replace('&', r'\%sZam{}' % commandprefix). \ - replace('<', r'\%sZlt{}' % commandprefix). \ - replace('>', r'\%sZgt{}' % commandprefix). \ - replace('#', r'\%sZsh{}' % commandprefix). \ - replace('%', r'\%sZpc{}' % commandprefix). \ - replace('$', r'\%sZdl{}' % commandprefix). \ - replace('-', r'\%sZhy{}' % commandprefix). \ - replace("'", r'\%sZsq{}' % commandprefix). \ - replace('"', r'\%sZdq{}' % commandprefix). \ - replace('~', r'\%sZti{}' % commandprefix) - - -DOC_TEMPLATE = r''' -\documentclass{%(docclass)s} -\usepackage{fancyvrb} -\usepackage{color} -\usepackage[%(encoding)s]{inputenc} -%(preamble)s - -%(styledefs)s - -\begin{document} - -\section*{%(title)s} - -%(code)s -\end{document} -''' - -## Small explanation of the mess below :) -# -# The previous version of the LaTeX formatter just assigned a command to -# each token type defined in the current style. That obviously is -# problematic if the highlighted code is produced for a different style -# than the style commands themselves. -# -# This version works much like the HTML formatter which assigns multiple -# CSS classes to each tag, from the most specific to the least -# specific token type, thus falling back to the parent token type if one -# is not defined. Here, the classes are there too and use the same short -# forms given in token.STANDARD_TYPES. -# -# Highlighted code now only uses one custom command, which by default is -# \PY and selectable by the commandprefix option (and in addition the -# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for -# backwards compatibility purposes). -# -# \PY has two arguments: the classes, separated by +, and the text to -# render in that style. The classes are resolved into the respective -# style commands by magic, which serves to ignore unknown classes. -# -# The magic macros are: -# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text -# to render in \PY@do. Their definition determines the style. -# * \PY@reset resets \PY@it etc. to do nothing. -# * \PY@toks parses the list of classes, using magic inspired by the -# keyval package (but modified to use plusses instead of commas -# because fancyvrb redefines commas inside its environments). -# * \PY@tok processes one class, calling the \PY@tok@classname command -# if it exists. -# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style -# for its class. -# * \PY resets the style, parses the classnames and then calls \PY@do. -# -# Tip: to read this code, print it out in substituted form using e.g. -# >>> print STYLE_TEMPLATE % {'cp': 'PY'} - -STYLE_TEMPLATE = r''' -\makeatletter -\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%% - \let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%% - \let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax} -\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname} -\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%% - \%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi} -\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%% - \%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}} -\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}} - -%(styles)s - -\def\%(cp)sZbs{\char`\\} -\def\%(cp)sZus{\char`\_} -\def\%(cp)sZob{\char`\{} -\def\%(cp)sZcb{\char`\}} -\def\%(cp)sZca{\char`\^} -\def\%(cp)sZam{\char`\&} -\def\%(cp)sZlt{\char`\<} -\def\%(cp)sZgt{\char`\>} -\def\%(cp)sZsh{\char`\#} -\def\%(cp)sZpc{\char`\%%} -\def\%(cp)sZdl{\char`\$} -\def\%(cp)sZhy{\char`\-} -\def\%(cp)sZsq{\char`\'} -\def\%(cp)sZdq{\char`\"} -\def\%(cp)sZti{\char`\~} -%% for compatibility with earlier versions -\def\%(cp)sZat{@} -\def\%(cp)sZlb{[} -\def\%(cp)sZrb{]} -\makeatother -''' - - -def _get_ttype_name(ttype): - fname = STANDARD_TYPES.get(ttype) - if fname: - return fname - aname = '' - while fname is None: - aname = ttype[-1] + aname - ttype = ttype.parent - fname = STANDARD_TYPES.get(ttype) - return fname + aname - - -class LatexFormatter(Formatter): - r""" - Format tokens as LaTeX code. This needs the `fancyvrb` and `color` - standard packages. - - Without the `full` option, code is formatted as one ``Verbatim`` - environment, like this: - - .. sourcecode:: latex - - \begin{Verbatim}[commandchars=\\\{\}] - \PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}): - \PY{k}{pass} - \end{Verbatim} - - Wrapping can be disabled using the `nowrap` option. - - The special command used here (``\PY``) and all the other macros it needs - are output by the `get_style_defs` method. - - With the `full` option, a complete LaTeX document is output, including - the command definitions in the preamble. - - The `get_style_defs()` method of a `LatexFormatter` returns a string - containing ``\def`` commands defining the macros needed inside the - ``Verbatim`` environments. - - Additional options accepted: - - `nowrap` - If set to ``True``, don't wrap the tokens at all, not even inside a - ``\begin{Verbatim}`` environment. This disables most other options - (default: ``False``). - - `style` - The style to use, can be a string or a Style subclass (default: - ``'default'``). - - `full` - Tells the formatter to output a "full" document, i.e. a complete - self-contained document (default: ``False``). - - `title` - If `full` is true, the title that should be used to caption the - document (default: ``''``). - - `docclass` - If the `full` option is enabled, this is the document class to use - (default: ``'article'``). - - `preamble` - If the `full` option is enabled, this can be further preamble commands, - e.g. ``\usepackage`` (default: ``''``). - - `linenos` - If set to ``True``, output line numbers (default: ``False``). - - `linenostart` - The line number for the first line (default: ``1``). - - `linenostep` - If set to a number n > 1, only every nth line number is printed. - - `verboptions` - Additional options given to the Verbatim environment (see the *fancyvrb* - docs for possible values) (default: ``''``). - - `commandprefix` - The LaTeX commands used to produce colored output are constructed - using this prefix and some letters (default: ``'PY'``). - - .. versionadded:: 0.7 - .. versionchanged:: 0.10 - The default is now ``'PY'`` instead of ``'C'``. - - `texcomments` - If set to ``True``, enables LaTeX comment lines. That is, LaTex markup - in comment tokens is not escaped so that LaTeX can render it (default: - ``False``). - - .. versionadded:: 1.2 - - `mathescape` - If set to ``True``, enables LaTeX math mode escape in comments. That - is, ``'$...$'`` inside a comment will trigger math mode (default: - ``False``). - - .. versionadded:: 1.2 - - `escapeinside` - If set to a string of length 2, enables escaping to LaTeX. Text - delimited by these 2 characters is read as LaTeX code and - typeset accordingly. It has no effect in string literals. It has - no effect in comments if `texcomments` or `mathescape` is - set. (default: ``''``). - - .. versionadded:: 2.0 - - `envname` - Allows you to pick an alternative environment name replacing Verbatim. - The alternate environment still has to support Verbatim's option syntax. - (default: ``'Verbatim'``). - - .. versionadded:: 2.0 - """ - name = 'LaTeX' - aliases = ['latex', 'tex'] - filenames = ['*.tex'] - - def __init__(self, **options): - Formatter.__init__(self, **options) - self.nowrap = get_bool_opt(options, 'nowrap', False) - self.docclass = options.get('docclass', 'article') - self.preamble = options.get('preamble', '') - self.linenos = get_bool_opt(options, 'linenos', False) - self.linenostart = abs(get_int_opt(options, 'linenostart', 1)) - self.linenostep = abs(get_int_opt(options, 'linenostep', 1)) - self.verboptions = options.get('verboptions', '') - self.nobackground = get_bool_opt(options, 'nobackground', False) - self.commandprefix = options.get('commandprefix', 'PY') - self.texcomments = get_bool_opt(options, 'texcomments', False) - self.mathescape = get_bool_opt(options, 'mathescape', False) - self.escapeinside = options.get('escapeinside', '') - if len(self.escapeinside) == 2: - self.left = self.escapeinside[0] - self.right = self.escapeinside[1] - else: - self.escapeinside = '' - self.envname = options.get('envname', 'Verbatim') - - self._create_stylesheet() - - def _create_stylesheet(self): - t2n = self.ttype2name = {Token: ''} - c2d = self.cmd2def = {} - cp = self.commandprefix - - def rgbcolor(col): - if col: - return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0) - for i in (0, 2, 4)]) - else: - return '1,1,1' - - for ttype, ndef in self.style: - name = _get_ttype_name(ttype) - cmndef = '' - if ndef['bold']: - cmndef += r'\let\$$@bf=\textbf' - if ndef['italic']: - cmndef += r'\let\$$@it=\textit' - if ndef['underline']: - cmndef += r'\let\$$@ul=\underline' - if ndef['roman']: - cmndef += r'\let\$$@ff=\textrm' - if ndef['sans']: - cmndef += r'\let\$$@ff=\textsf' - if ndef['mono']: - cmndef += r'\let\$$@ff=\textsf' - if ndef['color']: - cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' % - rgbcolor(ndef['color'])) - if ndef['border']: - cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{\string -\fboxrule}' - r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}}' % - (rgbcolor(ndef['border']), - rgbcolor(ndef['bgcolor']))) - elif ndef['bgcolor']: - cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{0pt}' - r'\colorbox[rgb]{%s}{\strut ##1}}}' % - rgbcolor(ndef['bgcolor'])) - if cmndef == '': - continue - cmndef = cmndef.replace('$$', cp) - t2n[ttype] = name - c2d[name] = cmndef - - def get_style_defs(self, arg=''): - """ - Return the command sequences needed to define the commands - used to format text in the verbatim environment. ``arg`` is ignored. - """ - cp = self.commandprefix - styles = [] - for name, definition in self.cmd2def.items(): - styles.append(r'\@namedef{%s@tok@%s}{%s}' % (cp, name, definition)) - return STYLE_TEMPLATE % {'cp': self.commandprefix, - 'styles': '\n'.join(styles)} - - def format_unencoded(self, tokensource, outfile): - # TODO: add support for background colors - t2n = self.ttype2name - cp = self.commandprefix - - if self.full: - realoutfile = outfile - outfile = StringIO() - - if not self.nowrap: - outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}') - if self.linenos: - start, step = self.linenostart, self.linenostep - outfile.write(',numbers=left' + - (start and ',firstnumber=%d' % start or '') + - (step and ',stepnumber=%d' % step or '')) - if self.mathescape or self.texcomments or self.escapeinside: - outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7' - '\\catcode`\\_=8\\relax}') - if self.verboptions: - outfile.write(',' + self.verboptions) - outfile.write(']\n') - - for ttype, value in tokensource: - if ttype in Token.Comment: - if self.texcomments: - # Try to guess comment starting lexeme and escape it ... - start = value[0:1] - for i in range(1, len(value)): - if start[0] != value[i]: - break - start += value[i] - - value = value[len(start):] - start = escape_tex(start, cp) - - # ... but do not escape inside comment. - value = start + value - elif self.mathescape: - # Only escape parts not inside a math environment. - parts = value.split('$') - in_math = False - for i, part in enumerate(parts): - if not in_math: - parts[i] = escape_tex(part, cp) - in_math = not in_math - value = '$'.join(parts) - elif self.escapeinside: - text = value - value = '' - while text: - a, sep1, text = text.partition(self.left) - if sep1: - b, sep2, text = text.partition(self.right) - if sep2: - value += escape_tex(a, cp) + b - else: - value += escape_tex(a + sep1 + b, cp) - else: - value += escape_tex(a, cp) - else: - value = escape_tex(value, cp) - elif ttype not in Token.Escape: - value = escape_tex(value, cp) - styles = [] - while ttype is not Token: - try: - styles.append(t2n[ttype]) - except KeyError: - # not in current style - styles.append(_get_ttype_name(ttype)) - ttype = ttype.parent - styleval = '+'.join(reversed(styles)) - if styleval: - spl = value.split('\n') - for line in spl[:-1]: - if line: - outfile.write("\\%s{%s}{%s}" % (cp, styleval, line)) - outfile.write('\n') - if spl[-1]: - outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1])) - else: - outfile.write(value) - - if not self.nowrap: - outfile.write('\\end{' + self.envname + '}\n') - - if self.full: - encoding = self.encoding or 'utf8' - # map known existings encodings from LaTeX distribution - encoding = { - 'utf_8': 'utf8', - 'latin_1': 'latin1', - 'iso_8859_1': 'latin1', - }.get(encoding.replace('-', '_'), encoding) - realoutfile.write(DOC_TEMPLATE % - dict(docclass = self.docclass, - preamble = self.preamble, - title = self.title, - encoding = encoding, - styledefs = self.get_style_defs(), - code = outfile.getvalue())) - - -class LatexEmbeddedLexer(Lexer): - """ - This lexer takes one lexer as argument, the lexer for the language - being formatted, and the left and right delimiters for escaped text. - - First everything is scanned using the language lexer to obtain - strings and comments. All other consecutive tokens are merged and - the resulting text is scanned for escaped segments, which are given - the Token.Escape type. Finally text that is not escaped is scanned - again with the language lexer. - """ - def __init__(self, left, right, lang, **options): - self.left = left - self.right = right - self.lang = lang - Lexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - # find and remove all the escape tokens (replace with an empty string) - # this is very similar to DelegatingLexer.get_tokens_unprocessed. - buffered = '' - insertions = [] - insertion_buf = [] - for i, t, v in self._find_safe_escape_tokens(text): - if t is None: - if insertion_buf: - insertions.append((len(buffered), insertion_buf)) - insertion_buf = [] - buffered += v - else: - insertion_buf.append((i, t, v)) - if insertion_buf: - insertions.append((len(buffered), insertion_buf)) - return do_insertions(insertions, - self.lang.get_tokens_unprocessed(buffered)) - - def _find_safe_escape_tokens(self, text): - """ find escape tokens that are not in strings or comments """ - for i, t, v in self._filter_to( - self.lang.get_tokens_unprocessed(text), - lambda t: t in Token.Comment or t in Token.String - ): - if t is None: - for i2, t2, v2 in self._find_escape_tokens(v): - yield i + i2, t2, v2 - else: - yield i, None, v - - def _filter_to(self, it, pred): - """ Keep only the tokens that match `pred`, merge the others together """ - buf = '' - idx = 0 - for i, t, v in it: - if pred(t): - if buf: - yield idx, None, buf - buf = '' - yield i, t, v - else: - if not buf: - idx = i - buf += v - if buf: - yield idx, None, buf - - def _find_escape_tokens(self, text): - """ Find escape tokens within text, give token=None otherwise """ - index = 0 - while text: - a, sep1, text = text.partition(self.left) - if a: - yield index, None, a - index += len(a) - if sep1: - b, sep2, text = text.partition(self.right) - if sep2: - yield index + len(sep1), Token.Escape, b - index += len(sep1) + len(b) + len(sep2) - else: - yield index, Token.Error, sep1 - index += len(sep1) - text = b diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/zipp.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/zipp.py deleted file mode 100644 index 26b723c1fd3e25740e0268b8c9b50905c58c3d4a..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/zipp.py +++ /dev/null @@ -1,329 +0,0 @@ -import io -import posixpath -import zipfile -import itertools -import contextlib -import sys -import pathlib - -if sys.version_info < (3, 7): - from collections import OrderedDict -else: - OrderedDict = dict - - -__all__ = ['Path'] - - -def _parents(path): - """ - Given a path with elements separated by - posixpath.sep, generate all parents of that path. - - >>> list(_parents('b/d')) - ['b'] - >>> list(_parents('/b/d/')) - ['/b'] - >>> list(_parents('b/d/f/')) - ['b/d', 'b'] - >>> list(_parents('b')) - [] - >>> list(_parents('')) - [] - """ - return itertools.islice(_ancestry(path), 1, None) - - -def _ancestry(path): - """ - Given a path with elements separated by - posixpath.sep, generate all elements of that path - - >>> list(_ancestry('b/d')) - ['b/d', 'b'] - >>> list(_ancestry('/b/d/')) - ['/b/d', '/b'] - >>> list(_ancestry('b/d/f/')) - ['b/d/f', 'b/d', 'b'] - >>> list(_ancestry('b')) - ['b'] - >>> list(_ancestry('')) - [] - """ - path = path.rstrip(posixpath.sep) - while path and path != posixpath.sep: - yield path - path, tail = posixpath.split(path) - - -_dedupe = OrderedDict.fromkeys -"""Deduplicate an iterable in original order""" - - -def _difference(minuend, subtrahend): - """ - Return items in minuend not in subtrahend, retaining order - with O(1) lookup. - """ - return itertools.filterfalse(set(subtrahend).__contains__, minuend) - - -class CompleteDirs(zipfile.ZipFile): - """ - A ZipFile subclass that ensures that implied directories - are always included in the namelist. - """ - - @staticmethod - def _implied_dirs(names): - parents = itertools.chain.from_iterable(map(_parents, names)) - as_dirs = (p + posixpath.sep for p in parents) - return _dedupe(_difference(as_dirs, names)) - - def namelist(self): - names = super(CompleteDirs, self).namelist() - return names + list(self._implied_dirs(names)) - - def _name_set(self): - return set(self.namelist()) - - def resolve_dir(self, name): - """ - If the name represents a directory, return that name - as a directory (with the trailing slash). - """ - names = self._name_set() - dirname = name + '/' - dir_match = name not in names and dirname in names - return dirname if dir_match else name - - @classmethod - def make(cls, source): - """ - Given a source (filename or zipfile), return an - appropriate CompleteDirs subclass. - """ - if isinstance(source, CompleteDirs): - return source - - if not isinstance(source, zipfile.ZipFile): - return cls(_pathlib_compat(source)) - - # Only allow for FastLookup when supplied zipfile is read-only - if 'r' not in source.mode: - cls = CompleteDirs - - source.__class__ = cls - return source - - -class FastLookup(CompleteDirs): - """ - ZipFile subclass to ensure implicit - dirs exist and are resolved rapidly. - """ - - def namelist(self): - with contextlib.suppress(AttributeError): - return self.__names - self.__names = super(FastLookup, self).namelist() - return self.__names - - def _name_set(self): - with contextlib.suppress(AttributeError): - return self.__lookup - self.__lookup = super(FastLookup, self)._name_set() - return self.__lookup - - -def _pathlib_compat(path): - """ - For path-like objects, convert to a filename for compatibility - on Python 3.6.1 and earlier. - """ - try: - return path.__fspath__() - except AttributeError: - return str(path) - - -class Path: - """ - A pathlib-compatible interface for zip files. - - Consider a zip file with this structure:: - - . - ├── a.txt - └── b - ├── c.txt - └── d - └── e.txt - - >>> data = io.BytesIO() - >>> zf = zipfile.ZipFile(data, 'w') - >>> zf.writestr('a.txt', 'content of a') - >>> zf.writestr('b/c.txt', 'content of c') - >>> zf.writestr('b/d/e.txt', 'content of e') - >>> zf.filename = 'mem/abcde.zip' - - Path accepts the zipfile object itself or a filename - - >>> root = Path(zf) - - From there, several path operations are available. - - Directory iteration (including the zip file itself): - - >>> a, b = root.iterdir() - >>> a - Path('mem/abcde.zip', 'a.txt') - >>> b - Path('mem/abcde.zip', 'b/') - - name property: - - >>> b.name - 'b' - - join with divide operator: - - >>> c = b / 'c.txt' - >>> c - Path('mem/abcde.zip', 'b/c.txt') - >>> c.name - 'c.txt' - - Read text: - - >>> c.read_text() - 'content of c' - - existence: - - >>> c.exists() - True - >>> (b / 'missing.txt').exists() - False - - Coercion to string: - - >>> import os - >>> str(c).replace(os.sep, posixpath.sep) - 'mem/abcde.zip/b/c.txt' - - At the root, ``name``, ``filename``, and ``parent`` - resolve to the zipfile. Note these attributes are not - valid and will raise a ``ValueError`` if the zipfile - has no filename. - - >>> root.name - 'abcde.zip' - >>> str(root.filename).replace(os.sep, posixpath.sep) - 'mem/abcde.zip' - >>> str(root.parent) - 'mem' - """ - - __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})" - - def __init__(self, root, at=""): - """ - Construct a Path from a ZipFile or filename. - - Note: When the source is an existing ZipFile object, - its type (__class__) will be mutated to a - specialized type. If the caller wishes to retain the - original type, the caller should either create a - separate ZipFile object or pass a filename. - """ - self.root = FastLookup.make(root) - self.at = at - - def open(self, mode='r', *args, pwd=None, **kwargs): - """ - Open this entry as text or binary following the semantics - of ``pathlib.Path.open()`` by passing arguments through - to io.TextIOWrapper(). - """ - if self.is_dir(): - raise IsADirectoryError(self) - zip_mode = mode[0] - if not self.exists() and zip_mode == 'r': - raise FileNotFoundError(self) - stream = self.root.open(self.at, zip_mode, pwd=pwd) - if 'b' in mode: - if args or kwargs: - raise ValueError("encoding args invalid for binary operation") - return stream - return io.TextIOWrapper(stream, *args, **kwargs) - - @property - def name(self): - return pathlib.Path(self.at).name or self.filename.name - - @property - def suffix(self): - return pathlib.Path(self.at).suffix or self.filename.suffix - - @property - def suffixes(self): - return pathlib.Path(self.at).suffixes or self.filename.suffixes - - @property - def stem(self): - return pathlib.Path(self.at).stem or self.filename.stem - - @property - def filename(self): - return pathlib.Path(self.root.filename).joinpath(self.at) - - def read_text(self, *args, **kwargs): - with self.open('r', *args, **kwargs) as strm: - return strm.read() - - def read_bytes(self): - with self.open('rb') as strm: - return strm.read() - - def _is_child(self, path): - return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/") - - def _next(self, at): - return self.__class__(self.root, at) - - def is_dir(self): - return not self.at or self.at.endswith("/") - - def is_file(self): - return self.exists() and not self.is_dir() - - def exists(self): - return self.at in self.root._name_set() - - def iterdir(self): - if not self.is_dir(): - raise ValueError("Can't listdir a file") - subs = map(self._next, self.root.namelist()) - return filter(self._is_child, subs) - - def __str__(self): - return posixpath.join(self.root.filename, self.at) - - def __repr__(self): - return self.__repr.format(self=self) - - def joinpath(self, *other): - next = posixpath.join(self.at, *map(_pathlib_compat, other)) - return self._next(self.root.resolve_dir(next)) - - __truediv__ = joinpath - - @property - def parent(self): - if not self.at: - return self.filename.parent - parent_at = posixpath.dirname(self.at.rstrip('/')) - if parent_at: - parent_at += '/' - return self._next(parent_at) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/config/_validate_pyproject/__init__.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/config/_validate_pyproject/__init__.py deleted file mode 100644 index dbe6cb4ca471f146b431d2fbb558d47317a103f0..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/config/_validate_pyproject/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from functools import reduce -from typing import Any, Callable, Dict - -from . import formats -from .error_reporting import detailed_errors, ValidationError -from .extra_validations import EXTRA_VALIDATIONS -from .fastjsonschema_exceptions import JsonSchemaException, JsonSchemaValueException -from .fastjsonschema_validations import validate as _validate - -__all__ = [ - "validate", - "FORMAT_FUNCTIONS", - "EXTRA_VALIDATIONS", - "ValidationError", - "JsonSchemaException", - "JsonSchemaValueException", -] - - -FORMAT_FUNCTIONS: Dict[str, Callable[[str], bool]] = { - fn.__name__.replace("_", "-"): fn - for fn in formats.__dict__.values() - if callable(fn) and not fn.__name__.startswith("_") -} - - -def validate(data: Any) -> bool: - """Validate the given ``data`` object using JSON Schema - This function raises ``ValidationError`` if ``data`` is invalid. - """ - with detailed_errors(): - _validate(data, custom_formats=FORMAT_FUNCTIONS) - reduce(lambda acc, fn: fn(acc), EXTRA_VALIDATIONS, data) - return True diff --git a/spaces/plzdontcry/dakubettergpt/src/assets/icons/CalculatorIcon.tsx b/spaces/plzdontcry/dakubettergpt/src/assets/icons/CalculatorIcon.tsx deleted file mode 100644 index d627a5ea5c15341ae17e5e113b3d0870b869c91a..0000000000000000000000000000000000000000 --- a/spaces/plzdontcry/dakubettergpt/src/assets/icons/CalculatorIcon.tsx +++ /dev/null @@ -1,17 +0,0 @@ -import React from 'react'; - -const CalculatorIcon = (props: React.SVGProps) => { - return ( - - - - ); -}; - -export default CalculatorIcon; diff --git a/spaces/prairie-guy/Art_Mood/app.py b/spaces/prairie-guy/Art_Mood/app.py deleted file mode 100644 index e1ef6197124f6e44fe7e18e13db9bdd517435d7d..0000000000000000000000000000000000000000 --- a/spaces/prairie-guy/Art_Mood/app.py +++ /dev/null @@ -1,18 +0,0 @@ -import gradio as gr -from fastai.vision.all import * -import skimage - -learn = load_learner('export.pkl') - -def predict(img): - labels = learn.dls.vocab - im = PILImage.create(img) - pred,pred_idx,probs = learn.predict(im) - return {label: float(prob) for (label,prob) in zip(labels,probs)} - -gr.Interface(fn=predict, - inputs=gr.inputs.Image(shape=((400,400))), - outputs=gr.outputs.Label(num_top_classes=4), - title = "Art Mood", - examples = [f'_Image{i}.jpg' for i in range(1,12)], - description= "Does this Art make you think of Spring, Summer, Winter or Fall? Do you agree with the Prediction?").launch(share=True, enable_queue=True) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/aiosignal/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/aiosignal/__init__.py deleted file mode 100644 index 3d288e6ede67df2bb8e5660e30372e190eb23e65..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/aiosignal/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -from frozenlist import FrozenList - -__version__ = "1.3.1" - -__all__ = ("Signal",) - - -class Signal(FrozenList): - """Coroutine-based signal implementation. - - To connect a callback to a signal, use any list method. - - Signals are fired using the send() coroutine, which takes named - arguments. - """ - - __slots__ = ("_owner",) - - def __init__(self, owner): - super().__init__() - self._owner = owner - - def __repr__(self): - return "".format( - self._owner, self.frozen, list(self) - ) - - async def send(self, *args, **kwargs): - """ - Sends data to all registered receivers. - """ - if not self.frozen: - raise RuntimeError("Cannot send non-frozen signal.") - - for receiver in self: - await receiver(*args, **kwargs) # type: ignore diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/tables/_f_e_a_t.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/tables/_f_e_a_t.py deleted file mode 100644 index c9a48eff06cb14b1b2dc56c94ec7e02b80f11ca3..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/tables/_f_e_a_t.py +++ /dev/null @@ -1,12 +0,0 @@ -from .otBase import BaseTTXConverter - - -class table__f_e_a_t(BaseTTXConverter): - """The feature name table is an AAT (Apple Advanced Typography) table for - storing font features, settings, and their human-readable names. It should - not be confused with the ``Feat`` table or the OpenType Layout ``GSUB``/``GPOS`` - tables. See `Feature Name Table `_ - in the TrueType Reference Manual for more information on the structure and - purpose of this table.""" - - pass diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Login-bbd6e215.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Login-bbd6e215.js deleted file mode 100644 index 81f628c647736501a65f199ac9ea36e71e238430..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Login-bbd6e215.js +++ /dev/null @@ -1,2 +0,0 @@ -import H from"./Index-09f26e4b.js";import{T as j}from"./Textbox-96e72fd5.js";import{a as J,B as z}from"./Button-89057c03.js";import{$ as K}from"./Index-37584f50.js";/* empty css */import M from"./Index-ab6a99fa.js";import"./BlockTitle-49fa584d.js";import"./Info-586340e7.js";import"./Copy-1b5c0932.js";import"./index-0526d562.js";import"./svelte/svelte.js";const{SvelteComponent:Q,add_flush_callback:A,append:L,attr:N,bind:E,binding_callbacks:G,component_subscribe:R,create_component:v,destroy_component:w,detach:c,element:T,init:U,insert:b,mount_component:k,safe_not_equal:V,set_data:D,space:B,text:I,toggle_class:C,transition_in:h,transition_out:x}=window.__gradio__svelte__internal;function F(f){let e,t;return{c(){e=T("p"),t=I(f[0]),N(e,"class","auth svelte-1ogxbi0")},m(n,l){b(n,e,l),L(e,t)},p(n,l){l&1&&D(t,n[0])},d(n){n&&c(e)}}}function O(f){let e,t=f[6]("login.enable_cookies")+"",n;return{c(){e=T("p"),n=I(t),N(e,"class","auth svelte-1ogxbi0")},m(l,o){b(l,e,o),L(e,n)},p(l,o){o&64&&t!==(t=l[6]("login.enable_cookies")+"")&&D(n,t)},d(l){l&&c(e)}}}function P(f){let e,t=f[6]("login.incorrect_credentials")+"",n;return{c(){e=T("p"),n=I(t),N(e,"class","creds svelte-1ogxbi0")},m(l,o){b(l,e,o),L(e,n)},p(l,o){o&64&&t!==(t=l[6]("login.incorrect_credentials")+"")&&D(n,t)},d(l){l&&c(e)}}}function W(f){let e,t,n;function l(s){f[9](s)}let o={label:"username",lines:1,show_label:!0,max_lines:1};return f[3]!==void 0&&(o.value=f[3]),e=new j({props:o}),G.push(()=>E(e,"value",l)),e.$on("submit",f[7]),{c(){v(e.$$.fragment)},m(s,_){k(e,s,_),n=!0},p(s,_){const r={};!t&&_&8&&(t=!0,r.value=s[3],A(()=>t=!1)),e.$set(r)},i(s){n||(h(e.$$.fragment,s),n=!0)},o(s){x(e.$$.fragment,s),n=!1},d(s){w(e,s)}}}function X(f){let e,t,n;function l(s){f[10](s)}let o={label:"password",lines:1,show_label:!0,max_lines:1,type:"password"};return f[4]!==void 0&&(o.value=f[4]),e=new j({props:o}),G.push(()=>E(e,"value",l)),e.$on("submit",f[7]),{c(){v(e.$$.fragment)},m(s,_){k(e,s,_),n=!0},p(s,_){const r={};!t&&_&16&&(t=!0,r.value=s[4],A(()=>t=!1)),e.$set(r)},i(s){n||(h(e.$$.fragment,s),n=!0)},o(s){x(e.$$.fragment,s),n=!1},d(s){w(e,s)}}}function Y(f){let e,t,n,l;return e=new z({props:{$$slots:{default:[W]},$$scope:{ctx:f}}}),n=new z({props:{$$slots:{default:[X]},$$scope:{ctx:f}}}),{c(){v(e.$$.fragment),t=B(),v(n.$$.fragment)},m(o,s){k(e,o,s),b(o,t,s),k(n,o,s),l=!0},p(o,s){const _={};s&2056&&(_.$$scope={dirty:s,ctx:o}),e.$set(_);const r={};s&2064&&(r.$$scope={dirty:s,ctx:o}),n.$set(r)},i(o){l||(h(e.$$.fragment,o),h(n.$$.fragment,o),l=!0)},o(o){x(e.$$.fragment,o),x(n.$$.fragment,o),l=!1},d(o){o&&c(t),w(e,o),w(n,o)}}}function Z(f){let e=f[6]("login.login")+"",t;return{c(){t=I(e)},m(n,l){b(n,t,l)},p(n,l){l&64&&e!==(e=n[6]("login.login")+"")&&D(t,e)},d(n){n&&c(t)}}}function y(f){let e,t=f[6]("login.login")+"",n,l,o,s,_,r,d,$,g,m=f[0]&&F(f),p=f[2]&&O(f),a=f[5]&&P(f);return r=new H({props:{$$slots:{default:[Y]},$$scope:{ctx:f}}}),$=new J({props:{size:"lg",variant:"primary",$$slots:{default:[Z]},$$scope:{ctx:f}}}),$.$on("click",f[7]),{c(){e=T("h2"),n=I(t),l=B(),m&&m.c(),o=B(),p&&p.c(),s=B(),a&&a.c(),_=B(),v(r.$$.fragment),d=B(),v($.$$.fragment),N(e,"class","svelte-1ogxbi0")},m(i,u){b(i,e,u),L(e,n),b(i,l,u),m&&m.m(i,u),b(i,o,u),p&&p.m(i,u),b(i,s,u),a&&a.m(i,u),b(i,_,u),k(r,i,u),b(i,d,u),k($,i,u),g=!0},p(i,u){(!g||u&64)&&t!==(t=i[6]("login.login")+"")&&D(n,t),i[0]?m?m.p(i,u):(m=F(i),m.c(),m.m(o.parentNode,o)):m&&(m.d(1),m=null),i[2]?p?p.p(i,u):(p=O(i),p.c(),p.m(s.parentNode,s)):p&&(p.d(1),p=null),i[5]?a?a.p(i,u):(a=P(i),a.c(),a.m(_.parentNode,_)):a&&(a.d(1),a=null);const S={};u&2072&&(S.$$scope={dirty:u,ctx:i}),r.$set(S);const q={};u&2112&&(q.$$scope={dirty:u,ctx:i}),$.$set(q)},i(i){g||(h(r.$$.fragment,i),h($.$$.fragment,i),g=!0)},o(i){x(r.$$.fragment,i),x($.$$.fragment,i),g=!1},d(i){i&&(c(e),c(l),c(o),c(s),c(_),c(d)),m&&m.d(i),p&&p.d(i),a&&a.d(i),w(r,i),w($,i)}}}function ee(f){let e,t,n;return t=new M({props:{variant:"panel",min_width:480,$$slots:{default:[y]},$$scope:{ctx:f}}}),{c(){e=T("div"),v(t.$$.fragment),N(e,"class","wrap svelte-1ogxbi0"),C(e,"min-h-screen",f[1])},m(l,o){b(l,e,o),k(t,e,null),n=!0},p(l,[o]){const s={};o&2173&&(s.$$scope={dirty:o,ctx:l}),t.$set(s),(!n||o&2)&&C(e,"min-h-screen",l[1])},i(l){n||(h(t.$$.fragment,l),n=!0)},o(l){x(t.$$.fragment,l),n=!1},d(l){l&&c(e),w(t)}}}function te(f,e,t){let n;R(f,K,a=>t(6,n=a));let{root:l}=e,{auth_message:o}=e,{app_mode:s}=e,{space_id:_}=e,r="",d="",$=!1;const g=async()=>{const a=new FormData;a.append("username",r),a.append("password",d);let i=await fetch(l+"/login",{method:"POST",body:a});i.status===400?(t(5,$=!0),t(3,r=""),t(4,d="")):i.status==200&&location.reload()};function m(a){r=a,t(3,r)}function p(a){d=a,t(4,d)}return f.$$set=a=>{"root"in a&&t(8,l=a.root),"auth_message"in a&&t(0,o=a.auth_message),"app_mode"in a&&t(1,s=a.app_mode),"space_id"in a&&t(2,_=a.space_id)},[o,s,_,r,d,$,n,g,l,m,p]}class pe extends Q{constructor(e){super(),U(this,e,te,ee,V,{root:8,auth_message:0,app_mode:1,space_id:2})}}export{pe as default}; -//# sourceMappingURL=Login-bbd6e215.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-67fb205c.css b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-67fb205c.css deleted file mode 100644 index 3bd432bc6e09aad960764f818df39d0c7d81d9cb..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-67fb205c.css +++ /dev/null @@ -1 +0,0 @@ -div.svelte-aww9vd .math.inline{fill:var(--body-text-color);display:inline-block;vertical-align:middle;padding:var(--size-1-5) -var(--size-1);color:var(--body-text-color)}div.svelte-aww9vd .math.inline svg{display:inline;margin-bottom:.22em}div.svelte-aww9vd{max-width:100%;overflow-x:auto}.min.svelte-aww9vd{min-height:var(--size-24)}.hide.svelte-aww9vd{display:none}div.svelte-1ed2p3z{transition:.15s}.pending.svelte-1ed2p3z{opacity:.2} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/parser/usecols/test_parse_dates.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/parser/usecols/test_parse_dates.py deleted file mode 100644 index 069ac2a69d2247a26af8d807a695d4e659aed1fe..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/parser/usecols/test_parse_dates.py +++ /dev/null @@ -1,150 +0,0 @@ -""" -Tests the usecols functionality during parsing -for all of the parsers defined in parsers.py -""" -from io import StringIO - -import pytest - -from pandas import ( - DataFrame, - Index, - Timestamp, -) -import pandas._testing as tm - -# TODO(1.4): Change these to xfails whenever parse_dates support(which was -# intentionally disable to keep small PR sizes) is added back -pytestmark = pytest.mark.usefixtures("pyarrow_skip") - - -@pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]]) -def test_usecols_with_parse_dates(all_parsers, usecols): - # see gh-9755 - data = """a,b,c,d,e -0,1,2014-01-01,09:00,4 -0,1,2014-01-02,10:00,4""" - parser = all_parsers - parse_dates = [[1, 2]] - - cols = { - "a": [0, 0], - "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")], - } - expected = DataFrame(cols, columns=["c_d", "a"]) - result = parser.read_csv(StringIO(data), usecols=usecols, parse_dates=parse_dates) - tm.assert_frame_equal(result, expected) - - -def test_usecols_with_parse_dates2(all_parsers): - # see gh-13604 - parser = all_parsers - data = """2008-02-07 09:40,1032.43 -2008-02-07 09:50,1042.54 -2008-02-07 10:00,1051.65""" - - names = ["date", "values"] - usecols = names[:] - parse_dates = [0] - - index = Index( - [ - Timestamp("2008-02-07 09:40"), - Timestamp("2008-02-07 09:50"), - Timestamp("2008-02-07 10:00"), - ], - name="date", - ) - cols = {"values": [1032.43, 1042.54, 1051.65]} - expected = DataFrame(cols, index=index) - - result = parser.read_csv( - StringIO(data), - parse_dates=parse_dates, - index_col=0, - usecols=usecols, - header=None, - names=names, - ) - tm.assert_frame_equal(result, expected) - - -def test_usecols_with_parse_dates3(all_parsers): - # see gh-14792 - parser = all_parsers - data = """a,b,c,d,e,f,g,h,i,j -2016/09/21,1,1,2,3,4,5,6,7,8""" - - usecols = list("abcdefghij") - parse_dates = [0] - - cols = { - "a": Timestamp("2016-09-21").as_unit("ns"), - "b": [1], - "c": [1], - "d": [2], - "e": [3], - "f": [4], - "g": [5], - "h": [6], - "i": [7], - "j": [8], - } - expected = DataFrame(cols, columns=usecols) - - result = parser.read_csv(StringIO(data), usecols=usecols, parse_dates=parse_dates) - tm.assert_frame_equal(result, expected) - - -def test_usecols_with_parse_dates4(all_parsers): - data = "a,b,c,d,e,f,g,h,i,j\n2016/09/21,1,1,2,3,4,5,6,7,8" - usecols = list("abcdefghij") - parse_dates = [[0, 1]] - parser = all_parsers - - cols = { - "a_b": "2016/09/21 1", - "c": [1], - "d": [2], - "e": [3], - "f": [4], - "g": [5], - "h": [6], - "i": [7], - "j": [8], - } - expected = DataFrame(cols, columns=["a_b"] + list("cdefghij")) - - result = parser.read_csv( - StringIO(data), - usecols=usecols, - parse_dates=parse_dates, - ) - tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]]) -@pytest.mark.parametrize( - "names", - [ - list("abcde"), # Names span all columns in original data. - list("acd"), # Names span only the selected columns. - ], -) -def test_usecols_with_parse_dates_and_names(all_parsers, usecols, names): - # see gh-9755 - s = """0,1,2014-01-01,09:00,4 -0,1,2014-01-02,10:00,4""" - parse_dates = [[1, 2]] - parser = all_parsers - - cols = { - "a": [0, 0], - "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")], - } - expected = DataFrame(cols, columns=["c_d", "a"]) - - result = parser.read_csv( - StringIO(s), names=names, parse_dates=parse_dates, usecols=usecols - ) - tm.assert_frame_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/cachecontrol/controller.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/cachecontrol/controller.py deleted file mode 100644 index d7e73802818c336c4316ee1fc596efa54763bb4b..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/cachecontrol/controller.py +++ /dev/null @@ -1,415 +0,0 @@ -# SPDX-FileCopyrightText: 2015 Eric Larson -# -# SPDX-License-Identifier: Apache-2.0 - -""" -The httplib2 algorithms ported for use with requests. -""" -import logging -import re -import calendar -import time -from email.utils import parsedate_tz - -from pip._vendor.requests.structures import CaseInsensitiveDict - -from .cache import DictCache -from .serialize import Serializer - - -logger = logging.getLogger(__name__) - -URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") - -PERMANENT_REDIRECT_STATUSES = (301, 308) - - -def parse_uri(uri): - """Parses a URI using the regex given in Appendix B of RFC 3986. - - (scheme, authority, path, query, fragment) = parse_uri(uri) - """ - groups = URI.match(uri).groups() - return (groups[1], groups[3], groups[4], groups[6], groups[8]) - - -class CacheController(object): - """An interface to see if request should cached or not. - """ - - def __init__( - self, cache=None, cache_etags=True, serializer=None, status_codes=None - ): - self.cache = DictCache() if cache is None else cache - self.cache_etags = cache_etags - self.serializer = serializer or Serializer() - self.cacheable_status_codes = status_codes or (200, 203, 300, 301, 308) - - @classmethod - def _urlnorm(cls, uri): - """Normalize the URL to create a safe key for the cache""" - (scheme, authority, path, query, fragment) = parse_uri(uri) - if not scheme or not authority: - raise Exception("Only absolute URIs are allowed. uri = %s" % uri) - - scheme = scheme.lower() - authority = authority.lower() - - if not path: - path = "/" - - # Could do syntax based normalization of the URI before - # computing the digest. See Section 6.2.2 of Std 66. - request_uri = query and "?".join([path, query]) or path - defrag_uri = scheme + "://" + authority + request_uri - - return defrag_uri - - @classmethod - def cache_url(cls, uri): - return cls._urlnorm(uri) - - def parse_cache_control(self, headers): - known_directives = { - # https://tools.ietf.org/html/rfc7234#section-5.2 - "max-age": (int, True), - "max-stale": (int, False), - "min-fresh": (int, True), - "no-cache": (None, False), - "no-store": (None, False), - "no-transform": (None, False), - "only-if-cached": (None, False), - "must-revalidate": (None, False), - "public": (None, False), - "private": (None, False), - "proxy-revalidate": (None, False), - "s-maxage": (int, True), - } - - cc_headers = headers.get("cache-control", headers.get("Cache-Control", "")) - - retval = {} - - for cc_directive in cc_headers.split(","): - if not cc_directive.strip(): - continue - - parts = cc_directive.split("=", 1) - directive = parts[0].strip() - - try: - typ, required = known_directives[directive] - except KeyError: - logger.debug("Ignoring unknown cache-control directive: %s", directive) - continue - - if not typ or not required: - retval[directive] = None - if typ: - try: - retval[directive] = typ(parts[1].strip()) - except IndexError: - if required: - logger.debug( - "Missing value for cache-control " "directive: %s", - directive, - ) - except ValueError: - logger.debug( - "Invalid value for cache-control directive " "%s, must be %s", - directive, - typ.__name__, - ) - - return retval - - def cached_request(self, request): - """ - Return a cached response if it exists in the cache, otherwise - return False. - """ - cache_url = self.cache_url(request.url) - logger.debug('Looking up "%s" in the cache', cache_url) - cc = self.parse_cache_control(request.headers) - - # Bail out if the request insists on fresh data - if "no-cache" in cc: - logger.debug('Request header has "no-cache", cache bypassed') - return False - - if "max-age" in cc and cc["max-age"] == 0: - logger.debug('Request header has "max_age" as 0, cache bypassed') - return False - - # Request allows serving from the cache, let's see if we find something - cache_data = self.cache.get(cache_url) - if cache_data is None: - logger.debug("No cache entry available") - return False - - # Check whether it can be deserialized - resp = self.serializer.loads(request, cache_data) - if not resp: - logger.warning("Cache entry deserialization failed, entry ignored") - return False - - # If we have a cached permanent redirect, return it immediately. We - # don't need to test our response for other headers b/c it is - # intrinsically "cacheable" as it is Permanent. - # - # See: - # https://tools.ietf.org/html/rfc7231#section-6.4.2 - # - # Client can try to refresh the value by repeating the request - # with cache busting headers as usual (ie no-cache). - if int(resp.status) in PERMANENT_REDIRECT_STATUSES: - msg = ( - "Returning cached permanent redirect response " - "(ignoring date and etag information)" - ) - logger.debug(msg) - return resp - - headers = CaseInsensitiveDict(resp.headers) - if not headers or "date" not in headers: - if "etag" not in headers: - # Without date or etag, the cached response can never be used - # and should be deleted. - logger.debug("Purging cached response: no date or etag") - self.cache.delete(cache_url) - logger.debug("Ignoring cached response: no date") - return False - - now = time.time() - date = calendar.timegm(parsedate_tz(headers["date"])) - current_age = max(0, now - date) - logger.debug("Current age based on date: %i", current_age) - - # TODO: There is an assumption that the result will be a - # urllib3 response object. This may not be best since we - # could probably avoid instantiating or constructing the - # response until we know we need it. - resp_cc = self.parse_cache_control(headers) - - # determine freshness - freshness_lifetime = 0 - - # Check the max-age pragma in the cache control header - if "max-age" in resp_cc: - freshness_lifetime = resp_cc["max-age"] - logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime) - - # If there isn't a max-age, check for an expires header - elif "expires" in headers: - expires = parsedate_tz(headers["expires"]) - if expires is not None: - expire_time = calendar.timegm(expires) - date - freshness_lifetime = max(0, expire_time) - logger.debug("Freshness lifetime from expires: %i", freshness_lifetime) - - # Determine if we are setting freshness limit in the - # request. Note, this overrides what was in the response. - if "max-age" in cc: - freshness_lifetime = cc["max-age"] - logger.debug( - "Freshness lifetime from request max-age: %i", freshness_lifetime - ) - - if "min-fresh" in cc: - min_fresh = cc["min-fresh"] - # adjust our current age by our min fresh - current_age += min_fresh - logger.debug("Adjusted current age from min-fresh: %i", current_age) - - # Return entry if it is fresh enough - if freshness_lifetime > current_age: - logger.debug('The response is "fresh", returning cached response') - logger.debug("%i > %i", freshness_lifetime, current_age) - return resp - - # we're not fresh. If we don't have an Etag, clear it out - if "etag" not in headers: - logger.debug('The cached response is "stale" with no etag, purging') - self.cache.delete(cache_url) - - # return the original handler - return False - - def conditional_headers(self, request): - cache_url = self.cache_url(request.url) - resp = self.serializer.loads(request, self.cache.get(cache_url)) - new_headers = {} - - if resp: - headers = CaseInsensitiveDict(resp.headers) - - if "etag" in headers: - new_headers["If-None-Match"] = headers["ETag"] - - if "last-modified" in headers: - new_headers["If-Modified-Since"] = headers["Last-Modified"] - - return new_headers - - def cache_response(self, request, response, body=None, status_codes=None): - """ - Algorithm for caching requests. - - This assumes a requests Response object. - """ - # From httplib2: Don't cache 206's since we aren't going to - # handle byte range requests - cacheable_status_codes = status_codes or self.cacheable_status_codes - if response.status not in cacheable_status_codes: - logger.debug( - "Status code %s not in %s", response.status, cacheable_status_codes - ) - return - - response_headers = CaseInsensitiveDict(response.headers) - - if "date" in response_headers: - date = calendar.timegm(parsedate_tz(response_headers["date"])) - else: - date = 0 - - # If we've been given a body, our response has a Content-Length, that - # Content-Length is valid then we can check to see if the body we've - # been given matches the expected size, and if it doesn't we'll just - # skip trying to cache it. - if ( - body is not None - and "content-length" in response_headers - and response_headers["content-length"].isdigit() - and int(response_headers["content-length"]) != len(body) - ): - return - - cc_req = self.parse_cache_control(request.headers) - cc = self.parse_cache_control(response_headers) - - cache_url = self.cache_url(request.url) - logger.debug('Updating cache with response from "%s"', cache_url) - - # Delete it from the cache if we happen to have it stored there - no_store = False - if "no-store" in cc: - no_store = True - logger.debug('Response header has "no-store"') - if "no-store" in cc_req: - no_store = True - logger.debug('Request header has "no-store"') - if no_store and self.cache.get(cache_url): - logger.debug('Purging existing cache entry to honor "no-store"') - self.cache.delete(cache_url) - if no_store: - return - - # https://tools.ietf.org/html/rfc7234#section-4.1: - # A Vary header field-value of "*" always fails to match. - # Storing such a response leads to a deserialization warning - # during cache lookup and is not allowed to ever be served, - # so storing it can be avoided. - if "*" in response_headers.get("vary", ""): - logger.debug('Response header has "Vary: *"') - return - - # If we've been given an etag, then keep the response - if self.cache_etags and "etag" in response_headers: - expires_time = 0 - if response_headers.get("expires"): - expires = parsedate_tz(response_headers["expires"]) - if expires is not None: - expires_time = calendar.timegm(expires) - date - - expires_time = max(expires_time, 14 * 86400) - - logger.debug("etag object cached for {0} seconds".format(expires_time)) - logger.debug("Caching due to etag") - self.cache.set( - cache_url, - self.serializer.dumps(request, response, body), - expires=expires_time, - ) - - # Add to the cache any permanent redirects. We do this before looking - # that the Date headers. - elif int(response.status) in PERMANENT_REDIRECT_STATUSES: - logger.debug("Caching permanent redirect") - self.cache.set(cache_url, self.serializer.dumps(request, response, b"")) - - # Add to the cache if the response headers demand it. If there - # is no date header then we can't do anything about expiring - # the cache. - elif "date" in response_headers: - date = calendar.timegm(parsedate_tz(response_headers["date"])) - # cache when there is a max-age > 0 - if "max-age" in cc and cc["max-age"] > 0: - logger.debug("Caching b/c date exists and max-age > 0") - expires_time = cc["max-age"] - self.cache.set( - cache_url, - self.serializer.dumps(request, response, body), - expires=expires_time, - ) - - # If the request can expire, it means we should cache it - # in the meantime. - elif "expires" in response_headers: - if response_headers["expires"]: - expires = parsedate_tz(response_headers["expires"]) - if expires is not None: - expires_time = calendar.timegm(expires) - date - else: - expires_time = None - - logger.debug( - "Caching b/c of expires header. expires in {0} seconds".format( - expires_time - ) - ) - self.cache.set( - cache_url, - self.serializer.dumps(request, response, body=body), - expires=expires_time, - ) - - def update_cached_response(self, request, response): - """On a 304 we will get a new set of headers that we want to - update our cached value with, assuming we have one. - - This should only ever be called when we've sent an ETag and - gotten a 304 as the response. - """ - cache_url = self.cache_url(request.url) - - cached_response = self.serializer.loads(request, self.cache.get(cache_url)) - - if not cached_response: - # we didn't have a cached response - return response - - # Lets update our headers with the headers from the new request: - # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 - # - # The server isn't supposed to send headers that would make - # the cached body invalid. But... just in case, we'll be sure - # to strip out ones we know that might be problmatic due to - # typical assumptions. - excluded_headers = ["content-length"] - - cached_response.headers.update( - dict( - (k, v) - for k, v in response.headers.items() - if k.lower() not in excluded_headers - ) - ) - - # we want a 200 b/c we have content via the cache - cached_response.status = 200 - - # update our cache - self.cache.set(cache_url, self.serializer.dumps(request, cached_response)) - - return cached_response diff --git a/spaces/pycui/RealChar/realtime_ai_character/database/chroma.py b/spaces/pycui/RealChar/realtime_ai_character/database/chroma.py deleted file mode 100644 index c7aaa68ea5f7a3e371372c3a3987e3cbd9477731..0000000000000000000000000000000000000000 --- a/spaces/pycui/RealChar/realtime_ai_character/database/chroma.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -from dotenv import load_dotenv -from langchain.vectorstores import Chroma -from langchain.embeddings import OpenAIEmbeddings -from realtime_ai_character.logger import get_logger - -load_dotenv() -logger = get_logger(__name__) - -embedding = OpenAIEmbeddings(openai_api_key=os.getenv("OPENAI_API_KEY")) -if os.getenv('OPENAI_API_TYPE') == 'azure': - embedding = OpenAIEmbeddings(openai_api_key=os.getenv("OPENAI_API_KEY"), deployment=os.getenv( - "OPENAI_API_EMBEDDING_DEPLOYMENT_NAME", "text-embedding-ada-002"), chunk_size=1) - - -def get_chroma(): - chroma = Chroma( - collection_name='llm', - embedding_function=embedding, - persist_directory='/home/user/app/chroma.db' - ) - return chroma diff --git a/spaces/pyodide-demo/self-hosted/CLAPACK.js b/spaces/pyodide-demo/self-hosted/CLAPACK.js deleted file mode 100644 index 24bdc52ea95f740521cd9e2f9ec725fd9aae833f..0000000000000000000000000000000000000000 --- a/spaces/pyodide-demo/self-hosted/CLAPACK.js +++ /dev/null @@ -1 +0,0 @@ -var Module=typeof globalThis.__pyodide_module!=="undefined"?globalThis.__pyodide_module:{};if(!Module.expectedDataFileDownloads){Module.expectedDataFileDownloads=0}Module.expectedDataFileDownloads++;(function(){var loadPackage=function(metadata){var PACKAGE_PATH="";if(typeof window==="object"){PACKAGE_PATH=window["encodeURIComponent"](window.location.pathname.toString().substring(0,window.location.pathname.toString().lastIndexOf("/"))+"/")}else if(typeof process==="undefined"&&typeof location!=="undefined"){PACKAGE_PATH=encodeURIComponent(location.pathname.toString().substring(0,location.pathname.toString().lastIndexOf("/"))+"/")}var PACKAGE_NAME="CLAPACK.data";var REMOTE_PACKAGE_BASE="CLAPACK.data";if(typeof Module["locateFilePackage"]==="function"&&!Module["locateFile"]){Module["locateFile"]=Module["locateFilePackage"];err("warning: you defined Module.locateFilePackage, that has been renamed to Module.locateFile (using your locateFilePackage for now)")}var REMOTE_PACKAGE_NAME=Module["locateFile"]?Module["locateFile"](REMOTE_PACKAGE_BASE,""):REMOTE_PACKAGE_BASE;var REMOTE_PACKAGE_SIZE=metadata["remote_package_size"];var PACKAGE_UUID=metadata["package_uuid"];function fetchRemotePackage(packageName,packageSize,callback,errback){if(typeof process==="object"){require("fs").readFile(packageName,(function(err,contents){if(err){errback(err)}else{callback(contents.buffer)}}));return}var xhr=new XMLHttpRequest;xhr.open("GET",packageName,true);xhr.responseType="arraybuffer";xhr.onprogress=function(event){var url=packageName;var size=packageSize;if(event.total)size=event.total;if(event.loaded){if(!xhr.addedTotal){xhr.addedTotal=true;if(!Module.dataFileDownloads)Module.dataFileDownloads={};Module.dataFileDownloads[url]={loaded:event.loaded,total:size}}else{Module.dataFileDownloads[url].loaded=event.loaded}var total=0;var loaded=0;var num=0;for(var download in Module.dataFileDownloads){var data=Module.dataFileDownloads[download];total+=data.total;loaded+=data.loaded;num++}total=Math.ceil(total*Module.expectedDataFileDownloads/num);if(Module["setStatus"])Module["setStatus"]("Downloading data... ("+loaded+"/"+total+")")}else if(!Module.dataFileDownloads){if(Module["setStatus"])Module["setStatus"]("Downloading data...")}};xhr.onerror=function(event){throw new Error("NetworkError for: "+packageName)};xhr.onload=function(event){if(xhr.status==200||xhr.status==304||xhr.status==206||xhr.status==0&&xhr.response){var packageData=xhr.response;callback(packageData)}else{throw new Error(xhr.statusText+" : "+xhr.responseURL)}};xhr.send(null)}function handleError(error){console.error("package error:",error)}var fetchedCallback=null;var fetched=Module["getPreloadedPackage"]?Module["getPreloadedPackage"](REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE):null;if(!fetched)fetchRemotePackage(REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE,(function(data){if(fetchedCallback){fetchedCallback(data);fetchedCallback=null}else{fetched=data}}),handleError);function runWithFS(){function assert(check,msg){if(!check)throw msg+(new Error).stack}Module["FS_createPath"]("/","lib",true,true);function processPackageData(arrayBuffer){assert(arrayBuffer,"Loading data file failed.");assert(arrayBuffer instanceof ArrayBuffer,"bad input to processPackageData");var byteArray=new Uint8Array(arrayBuffer);var curr;var compressedData={data:null,cachedOffset:2627739,cachedIndexes:[-1,-1],cachedChunks:[null,null],offsets:[0,1338,2525,4077,5682,7218,8806,10384,11959,13522,15091,16678,18452,19976,21218,22662,24059,25388,26593,27842,29212,30568,31964,33456,34473,35609,37056,38188,39607,41018,42363,43728,44833,46062,47129,48158,49297,50604,51791,53153,54552,55973,56974,58274,59280,60391,61700,62919,64221,65209,66317,67527,68376,69240,70390,71577,72957,74265,75562,76690,77950,79261,80549,81975,83284,84680,85839,87149,88487,89804,91167,92393,93637,94588,95522,96753,97867,99155,100394,101610,102892,104338,105765,106805,108095,109099,110330,111399,112808,114053,115295,116181,117170,118308,119147,120186,121428,122871,124271,125384,126655,128046,129484,130835,132330,133863,134902,135920,137167,138398,139763,140950,142189,143262,144351,145682,146911,148172,149380,150557,151836,153139,154564,155808,157008,158541,159690,160562,161688,162763,163969,165341,166438,167898,169114,170426,171504,172537,173743,175311,176569,177842,179096,180464,181665,182541,183962,185193,186503,187625,188784,190158,191547,192885,194325,195763,196841,197923,199241,200428,201770,202837,203918,205020,205922,207258,208403,209409,210676,211810,212960,214213,215432,216657,218072,219291,220503,221562,222941,223888,225220,226379,227624,228740,230060,231178,232538,233535,234454,235619,237123,238370,239597,240767,242115,243276,244073,245703,247096,248508,249929,251398,252845,254282,255684,257270,258581,260081,261545,263016,264543,265787,267288,268819,270298,271540,272793,274165,275639,276937,278334,279440,281067,282702,284015,285517,287015,288499,290136,291514,293089,294698,296202,297594,299109,300785,302082,303439,304751,305840,307232,308459,309760,311039,312298,312685,313475,314197,315498,316804,317934,319040,320231,321527,322807,323908,325011,326266,327541,328816,330288,331839,333211,334536,335913,337334,338703,340254,341659,343222,344473,345935,347326,348612,350167,351682,353128,354656,356013,357345,358799,359855,361097,362643,363834,365304,366380,367813,369203,370746,372124,373540,375103,376525,377673,379013,380319,381662,383094,384305,385853,386985,388458,389611,390549,391790,393178,394588,396031,397377,398974,400516,402094,403529,404810,406122,407390,408803,410197,411565,412800,413931,415332,416664,417959,419325,420590,421889,423328,424553,426016,427581,429171,430691,432129,433546,435015,436482,438064,439703,441223,442475,443690,445012,446267,447577,448796,450135,451596,452601,453784,454804,456332,457559,458563,459771,460822,462435,463853,465541,467013,468355,469702,470750,472169,473646,475083,476509,477783,479269,480651,481946,483347,484706,485755,487170,488606,490172,491430,492867,494063,495592,497075,498626,500234,501545,502987,504519,505942,507307,508665,510070,511470,512906,514188,515621,517171,518709,520129,521632,522833,524353,525782,527295,528475,529939,531488,532883,534330,535892,537401,538912,540463,542073,543765,545327,546848,548192,549728,551143,552709,554075,555584,557142,558543,559948,561672,563141,564642,566067,567672,569322,570807,572274,573698,575214,576710,578259,579790,581204,582695,584283,585952,587591,589181,590727,592344,593753,595031,596510,598046,599302,600789,602273,603904,605482,606838,608073,609650,610651,612050,613597,615097,616560,617998,619445,620698,621921,622949,624185,625547,626679,628156,629664,631060,632573,634044,635374,636691,638049,639206,640673,642118,643597,644823,646221,647672,649179,650432,651758,652889,654049,655439,656808,657920,659437,661010,662377,663561,664788,666084,667237,668271,669706,671309,672879,674464,675619,676790,677926,678817,680015,680926,682075,683293,684497,685854,686671,687917,688861,689813,690891,692017,693314,694612,695714,696832,698144,699388,700601,701834,703013,704542,706084,707494,708657,709993,711326,712697,713879,715253,716565,718059,719121,720513,721724,722707,724055,725075,726402,727856,729179,730459,731590,732933,734442,735572,737083,738471,739817,741420,742801,744315,745687,747113,748396,749824,751249,752647,754048,755636,756942,758489,759874,761302,762876,764274,765774,767106,768594,769998,771008,772433,773591,775056,776443,777794,779225,780763,782344,783482,785028,786386,787885,789358,790844,792334,793896,795404,796745,798275,799916,801104,802467,803714,804785,806150,807378,808648,809913,811329,811812,812722,813188,814635,815919,816986,818164,819299,820591,821895,822969,824152,825252,826499,827763,829160,830719,832259,833517,834888,836113,837614,838877,840373,841646,843148,844159,845640,846974,848308,849844,851348,852766,854285,855654,856936,858337,859421,860662,862176,863274,864745,865791,867208,868573,870113,871510,872805,874331,875776,876930,878186,879567,880882,882206,883449,884858,885971,887220,888568,889783,890405,891665,893006,894454,895812,897084,898633,900155,901726,903153,904400,905659,906770,908138,909512,910849,912166,913264,914497,915685,916903,918297,919494,920657,921920,923362,924623,926082,927621,929199,930665,932037,933472,934921,936210,937677,939193,940784,942081,943283,944483,945874,947125,948345,949680,950938,952330,953444,954453,955567,956969,957978,959111,960297,961426,963082,964488,966126,967523,968873,970097,971074,972468,973899,975319,976804,977966,979311,980809,981852,983152,984386,985659,986823,988278,989716,991079,992318,993796,994859,996328,997818,999350,1000984,1002193,1003526,1004983,1006355,1007764,1009205,1010611,1012038,1013462,1014659,1016049,1017589,1019141,1020463,1021865,1023131,1024650,1026130,1027617,1028849,1030378,1031778,1033204,1034432,1035954,1037484,1038973,1040392,1042029,1043666,1045362,1046774,1048317,1049764,1051206,1052720,1054252,1055584,1057095,1058489,1059981,1061616,1063341,1064764,1066167,1067648,1069299,1070824,1072213,1073516,1075073,1076535,1078022,1079396,1080877,1082420,1083895,1085499,1087138,1088779,1090392,1091948,1093533,1094854,1096117,1097607,1099133,1100407,1101866,1103320,1104943,1106487,1107784,1109019,1110578,1111576,1112859,1114349,1115743,1117089,1118518,1119963,1121347,1122661,1123939,1124969,1126194,1127474,1128804,1130121,1131634,1132946,1134447,1136010,1137262,1138490,1139879,1141086,1142609,1144028,1145500,1146694,1147874,1149254,1150807,1152060,1153270,1154522,1155560,1156795,1157883,1158989,1160205,1161687,1163214,1164627,1165685,1166894,1168165,1169335,1170345,1171764,1173346,1174957,1176506,1177859,1179281,1180275,1181365,1182443,1183361,1184513,1185414,1186477,1187624,1188824,1190143,1190957,1192188,1193114,1194076,1195157,1196275,1197558,1198886,1199941,1201052,1202417,1203630,1204854,1206082,1207269,1208805,1210205,1211673,1212707,1213919,1215326,1216621,1217685,1218980,1220317,1221721,1223280,1224502,1225608,1226777,1227716,1228955,1229928,1231126,1232671,1233687,1235082,1236147,1237565,1238908,1240081,1241586,1242898,1244203,1245775,1247069,1248468,1249859,1251006,1252515,1253995,1255335,1256902,1258377,1259886,1261269,1262646,1263991,1265600,1266975,1268523,1269953,1271519,1272890,1274394,1275761,1277311,1278779,1280109,1281508,1283094,1284486,1285787,1287451,1289045,1290573,1291821,1293334,1294790,1296339,1297758,1299410,1300947,1302492,1303864,1305397,1306971,1308520,1309871,1311296,1312374,1313235,1314764,1316025,1317316,1318543,1319650,1321070,1321562,1322480,1322933,1324340,1325564,1326690,1327792,1328960,1330204,1331427,1332465,1333547,1334691,1335922,1337380,1338834,1340403,1341715,1343076,1344455,1345962,1347352,1348800,1350350,1351640,1353078,1354314,1355890,1357437,1358925,1360520,1361817,1363235,1364201,1365713,1367135,1368571,1369589,1370359,1371963,1373650,1375363,1376774,1378338,1379737,1381274,1382704,1384279,1385852,1387226,1388839,1390426,1391837,1393348,1395032,1396556,1397920,1399403,1400995,1402597,1404197,1405800,1407183,1408507,1409907,1411393,1412676,1414189,1415585,1417174,1418670,1420036,1421604,1422796,1424154,1425513,1427042,1428419,1429803,1430936,1432561,1434173,1435853,1437409,1438929,1440076,1441549,1443059,1444601,1446227,1447451,1448997,1450310,1451885,1453313,1454390,1455566,1456864,1458050,1459419,1460976,1462613,1464074,1465627,1467163,1468673,1469885,1470843,1471936,1473117,1474571,1475914,1477394,1478918,1480325,1481691,1483141,1484558,1485990,1487504,1488795,1490195,1491828,1493272,1494629,1496055,1497355,1498749,1500140,1501580,1502979,1504480,1505915,1507058,1508432,1509593,1510903,1512177,1513514,1514768,1516074,1517444,1518868,1520515,1522049,1523585,1525027,1526394,1527782,1529158,1530704,1532359,1533884,1535291,1536530,1537677,1538865,1540155,1541529,1542947,1544485,1545693,1546863,1547994,1549505,1550973,1552218,1553220,1554370,1555494,1556652,1558096,1559158,1560281,1561343,1562487,1563746,1565365,1566881,1568523,1569875,1571544,1573092,1574398,1575945,1577006,1578150,1578950,1580020,1581170,1582658,1584052,1585693,1586818,1588126,1589469,1590994,1592614,1593864,1595096,1596290,1597658,1599013,1600277,1601541,1602771,1604202,1605597,1607109,1608207,1609622,1611164,1612727,1614039,1615512,1617040,1618254,1619786,1621210,1622670,1624142,1625704,1626892,1628197,1629569,1630963,1632435,1634008,1635460,1636605,1638147,1639484,1640934,1642229,1643595,1644896,1646336,1647858,1649451,1650762,1652126,1653421,1655049,1656680,1658202,1659785,1661087,1662631,1663878,1665371,1666432,1667930,1669474,1671114,1672482,1673772,1675e3,1676557,1677534,1678687,1680181,1681665,1683032,1684383,1686012,1687321,1688877,1690282,1691750,1693217,1694838,1696303,1697692,1699001,1700508,1701466,1702784,1704387,1705872,1707361,1708699,1709652,1711184,1712850,1714241,1715450,1716863,1718452,1719950,1721479,1722833,1724393,1725867,1727455,1729082,1730482,1731911,1733380,1734836,1736181,1737514,1739013,1740464,1741919,1743246,1744785,1746359,1747577,1748616,1749916,1751192,1752169,1753417,1754494,1755708,1756594,1757513,1758784,1759835,1760983,1762224,1763102,1764065,1765047,1765934,1767177,1768375,1769820,1771138,1772267,1773523,1774995,1776520,1777811,1779044,1780435,1781376,1782771,1784083,1785422,1786540,1788067,1789541,1790882,1792422,1793884,1795307,1796624,1797900,1799233,1800801,1802233,1803686,1804923,1806279,1807651,1809189,1810635,1812141,1813620,1814931,1816427,1817808,1819075,1820497,1821805,1823391,1825031,1826298,1827815,1829270,1830731,1832313,1833674,1835211,1836789,1838294,1839626,1841104,1842740,1844170,1845534,1846935,1847871,1848755,1850224,1851461,1852746,1853980,1855094,1856453,1856924,1857853,1858359,1859739,1860933,1862060,1863152,1864309,1865563,1866788,1867812,1868893,1870006,1871287,1872703,1874182,1875774,1877039,1878212,1879499,1880888,1882233,1883709,1885155,1886568,1888022,1889435,1890973,1892499,1893986,1895584,1896960,1898485,1899228,1900496,1902028,1903255,1904093,1904941,1906122,1907720,1909424,1910978,1912589,1914042,1915553,1917113,1918498,1920092,1921642,1922998,1924566,1926106,1927525,1929009,1930723,1932212,1933639,1935026,1936570,1938148,1939741,1941306,1942650,1943928,1945268,1946726,1948001,1949348,1950699,1952164,1953624,1955268,1956845,1958038,1959386,1960464,1961922,1963264,1964539,1965879,1967030,1968586,1970190,1971842,1973363,1974829,1976001,1977434,1978868,1980336,1981927,1983151,1984602,1985725,1987217,1988784,1989892,1990880,1992204,1993525,1994671,1996209,1997759,1999223,2000740,2001926,2003443,2004629,2005722,2006881,2007936,2009024,2010505,2011808,2013258,2014767,2016137,2017468,2018915,2020155,2021610,2022998,2024354,2025809,2027348,2028677,2030107,2031615,2033004,2034234,2035617,2036995,2038382,2039783,2041189,2042430,2043674,2044909,2046138,2047346,2048705,2049850,2051173,2052525,2053941,2055473,2056984,2058507,2060018,2061438,2062864,2064222,2065675,2067162,2068860,2070382,2071443,2072442,2073864,2075030,2076486,2077784,2079114,2080470,2081699,2082853,2084003,2085527,2086855,2088285,2089329,2090397,2091502,2092616,2093825,2095236,2096336,2097368,2098558,2099728,2101001,2102412,2103869,2105322,2106773,2108281,2109866,2111361,2112766,2113970,2115166,2116108,2116849,2117930,2119224,2120668,2122199,2123701,2124755,2126061,2127306,2128740,2130301,2131468,2132747,2133925,2135271,2136360,2137797,2138983,2140227,2141405,2142894,2144387,2145947,2146986,2148372,2149877,2151413,2152766,2154265,2155789,2156958,2158463,2159801,2161287,2162768,2164287,2165485,2166831,2168185,2169389,2170853,2172411,2173925,2175008,2176414,2177850,2179057,2180433,2181841,2183227,2184387,2185776,2187261,2188851,2190027,2191404,2192618,2194198,2195809,2197285,2198865,2200095,2201606,2202815,2204281,2205379,2206910,2208415,2209925,2211448,2212589,2214037,2215226,2216703,2217552,2218848,2220200,2221507,2222914,2224209,2225655,2227047,2228526,2229991,2231429,2232841,2234432,2235822,2237271,2238504,2240061,2240770,2242052,2243603,2245155,2246504,2247955,2248678,2250070,2251614,2253239,2254426,2255895,2257412,2258962,2260270,2261792,2263016,2264507,2265985,2267620,2269250,2270561,2271894,2273295,2274715,2276034,2277345,2278829,2280301,2281747,2283037,2284589,2286134,2287466,2288817,2290081,2291072,2292397,2293653,2294604,2295833,2296911,2298069,2298881,2299717,2300812,2301847,2303044,2304377,2305254,2306180,2307056,2308123,2309308,2310538,2311919,2313073,2314522,2315719,2317079,2318332,2319756,2320877,2321975,2323371,2324510,2325939,2327379,2328866,2330256,2331352,2332674,2334106,2335443,2336946,2338447,2339959,2341432,2342986,2344515,2345770,2347277,2348824,2350263,2351784,2353410,2354819,2356280,2357787,2359258,2360760,2362122,2363635,2365110,2366705,2368106,2369771,2371272,2371880,2373205,2374719,2376311,2377715,2379315,2380886,2381982,2383411,2384997,2386436,2387970,2389396,2390767,2392116,2393625,2395151,2396691,2398149,2399752,2401141,2402560,2404088,2405521,2406872,2408511,2409992,2411343,2412495,2413618,2414497,2415956,2417482,2419038,2420439,2422061,2423597,2424914,2426480,2428170,2430007,2431849,2433563,2435433,2437306,2439159,2440677,2442270,2443508,2444874,2446032,2447563,2448760,2450106,2451679,2453049,2454569,2456116,2457633,2458995,2460218,2461428,2462848,2464345,2465817,2467339,2468786,2470241,2471742,2473152,2474545,2475781,2477194,2478734,2480254,2481805,2483234,2484727,2485848,2486651,2487901,2489243,2490675,2492024,2493790,2495182,2496477,2498010,2499488,2500999,2502465,2503854,2505127,2506382,2507820,2509327,2510824,2512368,2513926,2515396,2516896,2518458,2519821,2521182,2522746,2524288,2525671,2526915,2528246,2529016,2530220,2531673,2533236,2534631,2536246,2537897,2539218,2540632,2542121,2543871,2545671,2547499,2549263,2551046,2552869,2554611,2555535,2556228,2557697,2559193,2560601,2561997,2563504,2564951,2566349,2567623,2568691,2569486,2570820,2571983,2573378,2574508,2575757,2577126,2578644,2580155,2581682,2583072,2584577,2586242,2587656,2589020,2590571,2591925,2593244,2594399,2595796,2596971,2597977,2598966,2599827,2600721,2601863,2602934,2603861,2604726,2605866,2606815,2607725,2608474,2609451,2610540,2611528,2612403,2613151,2614145,2615268,2616793,2617729,2618724,2619567,2620428,2621230,2622001,2623699,2625525,2627227,2627567,2627592,2627617,2627642,2627667,2627692,2627717],sizes:[1338,1187,1552,1605,1536,1588,1578,1575,1563,1569,1587,1774,1524,1242,1444,1397,1329,1205,1249,1370,1356,1396,1492,1017,1136,1447,1132,1419,1411,1345,1365,1105,1229,1067,1029,1139,1307,1187,1362,1399,1421,1001,1300,1006,1111,1309,1219,1302,988,1108,1210,849,864,1150,1187,1380,1308,1297,1128,1260,1311,1288,1426,1309,1396,1159,1310,1338,1317,1363,1226,1244,951,934,1231,1114,1288,1239,1216,1282,1446,1427,1040,1290,1004,1231,1069,1409,1245,1242,886,989,1138,839,1039,1242,1443,1400,1113,1271,1391,1438,1351,1495,1533,1039,1018,1247,1231,1365,1187,1239,1073,1089,1331,1229,1261,1208,1177,1279,1303,1425,1244,1200,1533,1149,872,1126,1075,1206,1372,1097,1460,1216,1312,1078,1033,1206,1568,1258,1273,1254,1368,1201,876,1421,1231,1310,1122,1159,1374,1389,1338,1440,1438,1078,1082,1318,1187,1342,1067,1081,1102,902,1336,1145,1006,1267,1134,1150,1253,1219,1225,1415,1219,1212,1059,1379,947,1332,1159,1245,1116,1320,1118,1360,997,919,1165,1504,1247,1227,1170,1348,1161,797,1630,1393,1412,1421,1469,1447,1437,1402,1586,1311,1500,1464,1471,1527,1244,1501,1531,1479,1242,1253,1372,1474,1298,1397,1106,1627,1635,1313,1502,1498,1484,1637,1378,1575,1609,1504,1392,1515,1676,1297,1357,1312,1089,1392,1227,1301,1279,1259,387,790,722,1301,1306,1130,1106,1191,1296,1280,1101,1103,1255,1275,1275,1472,1551,1372,1325,1377,1421,1369,1551,1405,1563,1251,1462,1391,1286,1555,1515,1446,1528,1357,1332,1454,1056,1242,1546,1191,1470,1076,1433,1390,1543,1378,1416,1563,1422,1148,1340,1306,1343,1432,1211,1548,1132,1473,1153,938,1241,1388,1410,1443,1346,1597,1542,1578,1435,1281,1312,1268,1413,1394,1368,1235,1131,1401,1332,1295,1366,1265,1299,1439,1225,1463,1565,1590,1520,1438,1417,1469,1467,1582,1639,1520,1252,1215,1322,1255,1310,1219,1339,1461,1005,1183,1020,1528,1227,1004,1208,1051,1613,1418,1688,1472,1342,1347,1048,1419,1477,1437,1426,1274,1486,1382,1295,1401,1359,1049,1415,1436,1566,1258,1437,1196,1529,1483,1551,1608,1311,1442,1532,1423,1365,1358,1405,1400,1436,1282,1433,1550,1538,1420,1503,1201,1520,1429,1513,1180,1464,1549,1395,1447,1562,1509,1511,1551,1610,1692,1562,1521,1344,1536,1415,1566,1366,1509,1558,1401,1405,1724,1469,1501,1425,1605,1650,1485,1467,1424,1516,1496,1549,1531,1414,1491,1588,1669,1639,1590,1546,1617,1409,1278,1479,1536,1256,1487,1484,1631,1578,1356,1235,1577,1001,1399,1547,1500,1463,1438,1447,1253,1223,1028,1236,1362,1132,1477,1508,1396,1513,1471,1330,1317,1358,1157,1467,1445,1479,1226,1398,1451,1507,1253,1326,1131,1160,1390,1369,1112,1517,1573,1367,1184,1227,1296,1153,1034,1435,1603,1570,1585,1155,1171,1136,891,1198,911,1149,1218,1204,1357,817,1246,944,952,1078,1126,1297,1298,1102,1118,1312,1244,1213,1233,1179,1529,1542,1410,1163,1336,1333,1371,1182,1374,1312,1494,1062,1392,1211,983,1348,1020,1327,1454,1323,1280,1131,1343,1509,1130,1511,1388,1346,1603,1381,1514,1372,1426,1283,1428,1425,1398,1401,1588,1306,1547,1385,1428,1574,1398,1500,1332,1488,1404,1010,1425,1158,1465,1387,1351,1431,1538,1581,1138,1546,1358,1499,1473,1486,1490,1562,1508,1341,1530,1641,1188,1363,1247,1071,1365,1228,1270,1265,1416,483,910,466,1447,1284,1067,1178,1135,1292,1304,1074,1183,1100,1247,1264,1397,1559,1540,1258,1371,1225,1501,1263,1496,1273,1502,1011,1481,1334,1334,1536,1504,1418,1519,1369,1282,1401,1084,1241,1514,1098,1471,1046,1417,1365,1540,1397,1295,1526,1445,1154,1256,1381,1315,1324,1243,1409,1113,1249,1348,1215,622,1260,1341,1448,1358,1272,1549,1522,1571,1427,1247,1259,1111,1368,1374,1337,1317,1098,1233,1188,1218,1394,1197,1163,1263,1442,1261,1459,1539,1578,1466,1372,1435,1449,1289,1467,1516,1591,1297,1202,1200,1391,1251,1220,1335,1258,1392,1114,1009,1114,1402,1009,1133,1186,1129,1656,1406,1638,1397,1350,1224,977,1394,1431,1420,1485,1162,1345,1498,1043,1300,1234,1273,1164,1455,1438,1363,1239,1478,1063,1469,1490,1532,1634,1209,1333,1457,1372,1409,1441,1406,1427,1424,1197,1390,1540,1552,1322,1402,1266,1519,1480,1487,1232,1529,1400,1426,1228,1522,1530,1489,1419,1637,1637,1696,1412,1543,1447,1442,1514,1532,1332,1511,1394,1492,1635,1725,1423,1403,1481,1651,1525,1389,1303,1557,1462,1487,1374,1481,1543,1475,1604,1639,1641,1613,1556,1585,1321,1263,1490,1526,1274,1459,1454,1623,1544,1297,1235,1559,998,1283,1490,1394,1346,1429,1445,1384,1314,1278,1030,1225,1280,1330,1317,1513,1312,1501,1563,1252,1228,1389,1207,1523,1419,1472,1194,1180,1380,1553,1253,1210,1252,1038,1235,1088,1106,1216,1482,1527,1413,1058,1209,1271,1170,1010,1419,1582,1611,1549,1353,1422,994,1090,1078,918,1152,901,1063,1147,1200,1319,814,1231,926,962,1081,1118,1283,1328,1055,1111,1365,1213,1224,1228,1187,1536,1400,1468,1034,1212,1407,1295,1064,1295,1337,1404,1559,1222,1106,1169,939,1239,973,1198,1545,1016,1395,1065,1418,1343,1173,1505,1312,1305,1572,1294,1399,1391,1147,1509,1480,1340,1567,1475,1509,1383,1377,1345,1609,1375,1548,1430,1566,1371,1504,1367,1550,1468,1330,1399,1586,1392,1301,1664,1594,1528,1248,1513,1456,1549,1419,1652,1537,1545,1372,1533,1574,1549,1351,1425,1078,861,1529,1261,1291,1227,1107,1420,492,918,453,1407,1224,1126,1102,1168,1244,1223,1038,1082,1144,1231,1458,1454,1569,1312,1361,1379,1507,1390,1448,1550,1290,1438,1236,1576,1547,1488,1595,1297,1418,966,1512,1422,1436,1018,770,1604,1687,1713,1411,1564,1399,1537,1430,1575,1573,1374,1613,1587,1411,1511,1684,1524,1364,1483,1592,1602,1600,1603,1383,1324,1400,1486,1283,1513,1396,1589,1496,1366,1568,1192,1358,1359,1529,1377,1384,1133,1625,1612,1680,1556,1520,1147,1473,1510,1542,1626,1224,1546,1313,1575,1428,1077,1176,1298,1186,1369,1557,1637,1461,1553,1536,1510,1212,958,1093,1181,1454,1343,1480,1524,1407,1366,1450,1417,1432,1514,1291,1400,1633,1444,1357,1426,1300,1394,1391,1440,1399,1501,1435,1143,1374,1161,1310,1274,1337,1254,1306,1370,1424,1647,1534,1536,1442,1367,1388,1376,1546,1655,1525,1407,1239,1147,1188,1290,1374,1418,1538,1208,1170,1131,1511,1468,1245,1002,1150,1124,1158,1444,1062,1123,1062,1144,1259,1619,1516,1642,1352,1669,1548,1306,1547,1061,1144,800,1070,1150,1488,1394,1641,1125,1308,1343,1525,1620,1250,1232,1194,1368,1355,1264,1264,1230,1431,1395,1512,1098,1415,1542,1563,1312,1473,1528,1214,1532,1424,1460,1472,1562,1188,1305,1372,1394,1472,1573,1452,1145,1542,1337,1450,1295,1366,1301,1440,1522,1593,1311,1364,1295,1628,1631,1522,1583,1302,1544,1247,1493,1061,1498,1544,1640,1368,1290,1228,1557,977,1153,1494,1484,1367,1351,1629,1309,1556,1405,1468,1467,1621,1465,1389,1309,1507,958,1318,1603,1485,1489,1338,953,1532,1666,1391,1209,1413,1589,1498,1529,1354,1560,1474,1588,1627,1400,1429,1469,1456,1345,1333,1499,1451,1455,1327,1539,1574,1218,1039,1300,1276,977,1248,1077,1214,886,919,1271,1051,1148,1241,878,963,982,887,1243,1198,1445,1318,1129,1256,1472,1525,1291,1233,1391,941,1395,1312,1339,1118,1527,1474,1341,1540,1462,1423,1317,1276,1333,1568,1432,1453,1237,1356,1372,1538,1446,1506,1479,1311,1496,1381,1267,1422,1308,1586,1640,1267,1517,1455,1461,1582,1361,1537,1578,1505,1332,1478,1636,1430,1364,1401,936,884,1469,1237,1285,1234,1114,1359,471,929,506,1380,1194,1127,1092,1157,1254,1225,1024,1081,1113,1281,1416,1479,1592,1265,1173,1287,1389,1345,1476,1446,1413,1454,1413,1538,1526,1487,1598,1376,1525,743,1268,1532,1227,838,848,1181,1598,1704,1554,1611,1453,1511,1560,1385,1594,1550,1356,1568,1540,1419,1484,1714,1489,1427,1387,1544,1578,1593,1565,1344,1278,1340,1458,1275,1347,1351,1465,1460,1644,1577,1193,1348,1078,1458,1342,1275,1340,1151,1556,1604,1652,1521,1466,1172,1433,1434,1468,1591,1224,1451,1123,1492,1567,1108,988,1324,1321,1146,1538,1550,1464,1517,1186,1517,1186,1093,1159,1055,1088,1481,1303,1450,1509,1370,1331,1447,1240,1455,1388,1356,1455,1539,1329,1430,1508,1389,1230,1383,1378,1387,1401,1406,1241,1244,1235,1229,1208,1359,1145,1323,1352,1416,1532,1511,1523,1511,1420,1426,1358,1453,1487,1698,1522,1061,999,1422,1166,1456,1298,1330,1356,1229,1154,1150,1524,1328,1430,1044,1068,1105,1114,1209,1411,1100,1032,1190,1170,1273,1411,1457,1453,1451,1508,1585,1495,1405,1204,1196,942,741,1081,1294,1444,1531,1502,1054,1306,1245,1434,1561,1167,1279,1178,1346,1089,1437,1186,1244,1178,1489,1493,1560,1039,1386,1505,1536,1353,1499,1524,1169,1505,1338,1486,1481,1519,1198,1346,1354,1204,1464,1558,1514,1083,1406,1436,1207,1376,1408,1386,1160,1389,1485,1590,1176,1377,1214,1580,1611,1476,1580,1230,1511,1209,1466,1098,1531,1505,1510,1523,1141,1448,1189,1477,849,1296,1352,1307,1407,1295,1446,1392,1479,1465,1438,1412,1591,1390,1449,1233,1557,709,1282,1551,1552,1349,1451,723,1392,1544,1625,1187,1469,1517,1550,1308,1522,1224,1491,1478,1635,1630,1311,1333,1401,1420,1319,1311,1484,1472,1446,1290,1552,1545,1332,1351,1264,991,1325,1256,951,1229,1078,1158,812,836,1095,1035,1197,1333,877,926,876,1067,1185,1230,1381,1154,1449,1197,1360,1253,1424,1121,1098,1396,1139,1429,1440,1487,1390,1096,1322,1432,1337,1503,1501,1512,1473,1554,1529,1255,1507,1547,1439,1521,1626,1409,1461,1507,1471,1502,1362,1513,1475,1595,1401,1665,1501,608,1325,1514,1592,1404,1600,1571,1096,1429,1586,1439,1534,1426,1371,1349,1509,1526,1540,1458,1603,1389,1419,1528,1433,1351,1639,1481,1351,1152,1123,879,1459,1526,1556,1401,1622,1536,1317,1566,1690,1837,1842,1714,1870,1873,1853,1518,1593,1238,1366,1158,1531,1197,1346,1573,1370,1520,1547,1517,1362,1223,1210,1420,1497,1472,1522,1447,1455,1501,1410,1393,1236,1413,1540,1520,1551,1429,1493,1121,803,1250,1342,1432,1349,1766,1392,1295,1533,1478,1511,1466,1389,1273,1255,1438,1507,1497,1544,1558,1470,1500,1562,1363,1361,1564,1542,1383,1244,1331,770,1204,1453,1563,1395,1615,1651,1321,1414,1489,1750,1800,1828,1764,1783,1823,1742,924,693,1469,1496,1408,1396,1507,1447,1398,1274,1068,795,1334,1163,1395,1130,1249,1369,1518,1511,1527,1390,1505,1665,1414,1364,1551,1354,1319,1155,1397,1175,1006,989,861,894,1142,1071,927,865,1140,949,910,749,977,1089,988,875,748,994,1123,1525,936,995,843,861,802,771,1698,1826,1702,340,25,25,25,25,25,25,22],successes:[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]};compressedData["data"]=byteArray;assert(typeof Module.LZ4==="object","LZ4 not present - was your app build with -s LZ4=1 ?");Module.LZ4.loadPackage({metadata:metadata,compressedData:compressedData},true);Module["removeRunDependency"]("datafile_CLAPACK.data")}Module["addRunDependency"]("datafile_CLAPACK.data");if(!Module.preloadResults)Module.preloadResults={};Module.preloadResults[PACKAGE_NAME]={fromCache:false};if(fetched){processPackageData(fetched);fetched=null}else{fetchedCallback=processPackageData}}if(Module["calledRun"]){runWithFS()}else{if(!Module["preRun"])Module["preRun"]=[];Module["preRun"].push(runWithFS)}};loadPackage({files:[{filename:"/lib/clapack_all.so",start:0,end:4008996,audio:0}],remote_package_size:2631835,package_uuid:"5bb7b55c-7d5c-4c79-933b-df239114a103"})})(); \ No newline at end of file diff --git a/spaces/pyodide-demo/self-hosted/future-tests.js b/spaces/pyodide-demo/self-hosted/future-tests.js deleted file mode 100644 index 2f247731c21904b88a3f4aeefc980cf7e8f63196..0000000000000000000000000000000000000000 --- a/spaces/pyodide-demo/self-hosted/future-tests.js +++ /dev/null @@ -1 +0,0 @@ -var Module=typeof globalThis.__pyodide_module!=="undefined"?globalThis.__pyodide_module:{};if(!Module.expectedDataFileDownloads){Module.expectedDataFileDownloads=0}Module.expectedDataFileDownloads++;(function(){var loadPackage=function(metadata){var PACKAGE_PATH="";if(typeof window==="object"){PACKAGE_PATH=window["encodeURIComponent"](window.location.pathname.toString().substring(0,window.location.pathname.toString().lastIndexOf("/"))+"/")}else if(typeof process==="undefined"&&typeof location!=="undefined"){PACKAGE_PATH=encodeURIComponent(location.pathname.toString().substring(0,location.pathname.toString().lastIndexOf("/"))+"/")}var PACKAGE_NAME="future-tests.data";var REMOTE_PACKAGE_BASE="future-tests.data";if(typeof Module["locateFilePackage"]==="function"&&!Module["locateFile"]){Module["locateFile"]=Module["locateFilePackage"];err("warning: you defined Module.locateFilePackage, that has been renamed to Module.locateFile (using your locateFilePackage for now)")}var REMOTE_PACKAGE_NAME=Module["locateFile"]?Module["locateFile"](REMOTE_PACKAGE_BASE,""):REMOTE_PACKAGE_BASE;var REMOTE_PACKAGE_SIZE=metadata["remote_package_size"];var PACKAGE_UUID=metadata["package_uuid"];function fetchRemotePackage(packageName,packageSize,callback,errback){if(typeof process==="object"){require("fs").readFile(packageName,(function(err,contents){if(err){errback(err)}else{callback(contents.buffer)}}));return}var xhr=new XMLHttpRequest;xhr.open("GET",packageName,true);xhr.responseType="arraybuffer";xhr.onprogress=function(event){var url=packageName;var size=packageSize;if(event.total)size=event.total;if(event.loaded){if(!xhr.addedTotal){xhr.addedTotal=true;if(!Module.dataFileDownloads)Module.dataFileDownloads={};Module.dataFileDownloads[url]={loaded:event.loaded,total:size}}else{Module.dataFileDownloads[url].loaded=event.loaded}var total=0;var loaded=0;var num=0;for(var download in Module.dataFileDownloads){var data=Module.dataFileDownloads[download];total+=data.total;loaded+=data.loaded;num++}total=Math.ceil(total*Module.expectedDataFileDownloads/num);if(Module["setStatus"])Module["setStatus"]("Downloading data... ("+loaded+"/"+total+")")}else if(!Module.dataFileDownloads){if(Module["setStatus"])Module["setStatus"]("Downloading data...")}};xhr.onerror=function(event){throw new Error("NetworkError for: "+packageName)};xhr.onload=function(event){if(xhr.status==200||xhr.status==304||xhr.status==206||xhr.status==0&&xhr.response){var packageData=xhr.response;callback(packageData)}else{throw new Error(xhr.statusText+" : "+xhr.responseURL)}};xhr.send(null)}function handleError(error){console.error("package error:",error)}var fetchedCallback=null;var fetched=Module["getPreloadedPackage"]?Module["getPreloadedPackage"](REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE):null;if(!fetched)fetchRemotePackage(REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE,(function(data){if(fetchedCallback){fetchedCallback(data);fetchedCallback=null}else{fetched=data}}),handleError);function runWithFS(){function assert(check,msg){if(!check)throw msg+(new Error).stack}Module["FS_createPath"]("/","lib",true,true);Module["FS_createPath"]("/lib","python3.9",true,true);Module["FS_createPath"]("/lib/python3.9","site-packages",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","future",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/future","backports",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/future/backports","test",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/future","moves",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/future/moves","test",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/future","tests",true,true);function processPackageData(arrayBuffer){assert(arrayBuffer,"Loading data file failed.");assert(arrayBuffer instanceof ArrayBuffer,"bad input to processPackageData");var byteArray=new Uint8Array(arrayBuffer);var curr;var compressedData={data:null,cachedOffset:89859,cachedIndexes:[-1,-1],cachedChunks:[null,null],offsets:[0,1388,2531,3496,4716,6094,7236,8458,9701,11153,12378,13638,15069,16227,17483,18708,19882,21448,23091,24345,25781,27230,28527,29816,31168,32448,33750,34846,35993,37198,38546,39670,40937,42187,43353,44666,45835,47214,48653,49694,50855,52008,53323,54529,55746,57640,59550,61193,63038,64904,66225,67833,69703,71541,73490,75205,77056,79054,80190,81277,82243,83329,84524,85646,86501,87528,88711],sizes:[1388,1143,965,1220,1378,1142,1222,1243,1452,1225,1260,1431,1158,1256,1225,1174,1566,1643,1254,1436,1449,1297,1289,1352,1280,1302,1096,1147,1205,1348,1124,1267,1250,1166,1313,1169,1379,1439,1041,1161,1153,1315,1206,1217,1894,1910,1643,1845,1866,1321,1608,1870,1838,1949,1715,1851,1998,1136,1087,966,1086,1195,1122,855,1027,1183,1148],successes:[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]};compressedData["data"]=byteArray;assert(typeof Module.LZ4==="object","LZ4 not present - was your app build with -s LZ4=1 ?");Module.LZ4.loadPackage({metadata:metadata,compressedData:compressedData},true);Module["removeRunDependency"]("datafile_future-tests.data")}Module["addRunDependency"]("datafile_future-tests.data");if(!Module.preloadResults)Module.preloadResults={};Module.preloadResults[PACKAGE_NAME]={fromCache:false};if(fetched){processPackageData(fetched);fetched=null}else{fetchedCallback=processPackageData}}if(Module["calledRun"]){runWithFS()}else{if(!Module["preRun"])Module["preRun"]=[];Module["preRun"].push(runWithFS)}};loadPackage({files:[{filename:"/lib/python3.9/site-packages/future/backports/test/__init__.py",start:0,end:264,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/pystone.py",start:264,end:7691,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/ssl_servers.py",start:7691,end:14900,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/support.py",start:14900,end:85781,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/badcert.pem",start:85781,end:87709,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/badkey.pem",start:87709,end:89871,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/dh512.pem",start:89871,end:90273,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/https_svn_python_org_root.pem",start:90273,end:92842,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/keycert.passwd.pem",start:92842,end:94672,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/keycert.pem",start:94672,end:96455,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/keycert2.pem",start:96455,end:98250,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/nokia.pem",start:98250,end:100173,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/nullbytecert.pem",start:100173,end:105608,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/nullcert.pem",start:105608,end:105608,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/sha256.pem",start:105608,end:113952,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/ssl_cert.pem",start:113952,end:114819,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/ssl_key.passwd.pem",start:114819,end:115782,audio:0},{filename:"/lib/python3.9/site-packages/future/backports/test/ssl_key.pem",start:115782,end:116698,audio:0},{filename:"/lib/python3.9/site-packages/future/moves/test/__init__.py",start:116698,end:116808,audio:0},{filename:"/lib/python3.9/site-packages/future/moves/test/support.py",start:116808,end:117067,audio:0},{filename:"/lib/python3.9/site-packages/future/tests/__init__.py",start:117067,end:117067,audio:0},{filename:"/lib/python3.9/site-packages/future/tests/base.py",start:117067,end:137023,audio:0}],remote_package_size:93955,package_uuid:"4118d416-f539-41f0-8f57-93409ed98292"})})(); \ No newline at end of file diff --git a/spaces/qanastek/French-Part-Of-Speech-Tagging/gradio_app.py b/spaces/qanastek/French-Part-Of-Speech-Tagging/gradio_app.py deleted file mode 100644 index 7e7b7e1487189b3b40ef6463e61a51c3e854ba0f..0000000000000000000000000000000000000000 --- a/spaces/qanastek/French-Part-Of-Speech-Tagging/gradio_app.py +++ /dev/null @@ -1,29 +0,0 @@ -import gradio as gr - -from flair.data import Sentence -from flair.models import SequenceTagger - -# Load the model -model = SequenceTagger.load("qanastek/pos-french") - -def getPartOfSpeechFR(content): - - # George Washington est allé à Washington - - sentence = Sentence(content) - - # predict tags - model.predict(sentence) - - # print predicted pos tags - res = sentence.to_tagged_string() - - return res - -iface = gr.Interface( - title="🥖 French Part Of Speech Tagging", - fn=getPartOfSpeechFR, - inputs="textbox", - outputs="textbox", -) -iface.launch() \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Epson L110 L210 L300 L355 (EURO CIS) Ver.1.0.0 Service Adjustment Program.59 FREE.md b/spaces/quidiaMuxgu/Expedit-SAM/Epson L110 L210 L300 L355 (EURO CIS) Ver.1.0.0 Service Adjustment Program.59 FREE.md deleted file mode 100644 index 796708d2c4f2b1662f4afc4585996fe6b0174792..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Epson L110 L210 L300 L355 (EURO CIS) Ver.1.0.0 Service Adjustment Program.59 FREE.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Epson L110, L210, L300, L355 (EURO, CIS) Ver.1.0.0 Service Adjustment Program.59


        DOWNLOAD 🔗 https://geags.com/2uCqQc



        -
        -Epson L110, L210, L300, L355 (EURO, CIS) Ver.1.0.0 Service ... Epson T50, T59 Service Adjustment Program Epson T50, T59 Service ... 1fdad05405
        -
        -
        -

        diff --git a/spaces/qushui/bing/README.md b/spaces/qushui/bing/README.md deleted file mode 100644 index 33c08fe185dd5d12049610a3bad246ecc3f59246..0000000000000000000000000000000000000000 --- a/spaces/qushui/bing/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Bing -emoji: 🦀 -colorFrom: blue -colorTo: green -sdk: docker -pinned: false -license: mit -app_port: 8080 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/r3gm/RVC_HF/infer/lib/uvr5_pack/lib_v5/layers_537227KB.py b/spaces/r3gm/RVC_HF/infer/lib/uvr5_pack/lib_v5/layers_537227KB.py deleted file mode 100644 index 9b127bc6427f5c60c8cf85603a3d8a093c3501c4..0000000000000000000000000000000000000000 --- a/spaces/r3gm/RVC_HF/infer/lib/uvr5_pack/lib_v5/layers_537227KB.py +++ /dev/null @@ -1,126 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv6 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv7 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - feat6 = self.conv6(x) - feat7 = self.conv7(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/data/loaders/augmentors/heatmaps.py b/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/data/loaders/augmentors/heatmaps.py deleted file mode 100644 index 3cc3ede432e5ca3ad2d0fd8a5972426bf59e9c16..0000000000000000000000000000000000000000 --- a/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/data/loaders/augmentors/heatmaps.py +++ /dev/null @@ -1,39 +0,0 @@ -import numpy as np - - -class Heatmaps: - - def __init__(self, num_maps, map_size, sigma, stride=1, norm=True): - self.num_maps = num_maps - self.sigma = sigma - self.double_sigma_pw2 = 2*sigma*sigma - self.doublepi_sigma_pw2 = self.double_sigma_pw2 * np.pi - self.stride = stride - self.norm = norm - - if isinstance(map_size, (tuple, list)): - self.width = map_size[0] - self.height = map_size[1] - else: - self.width = map_size - self.height = map_size - - grid_x = np.arange(self.width) * stride + stride / 2 - 0.5 - self.grid_x = np.repeat(grid_x.reshape(1, self.width), self.num_maps, axis=0) - grid_y = np.arange(self.height) * stride + stride / 2 - 0.5 - self.grid_y = np.repeat(grid_y.reshape(1, self.height), self.num_maps, axis=0) - - def __call__(self, sample): - landmarks = sample['landmarks'] - landmarks = landmarks[-self.num_maps:] - - # Heatmap generation - exp_x = np.exp(-(self.grid_x - landmarks[:, 0].reshape(-1, 1)) ** 2 / self.double_sigma_pw2) - exp_y = np.exp(-(self.grid_y - landmarks[:, 1].reshape(-1, 1)) ** 2 / self.double_sigma_pw2) - heatmaps = np.matmul(exp_y.reshape(self.num_maps, self.height, 1), exp_x.reshape(self.num_maps, 1, self.width)) - - if self.norm: - heatmaps = heatmaps/self.doublepi_sigma_pw2 - - sample['heatmap2D'] = heatmaps - return sample diff --git a/spaces/radames/gradio_window_localStorage/README.md b/spaces/radames/gradio_window_localStorage/README.md deleted file mode 100644 index c6d51db886821b53f812e9128471b25842327ee8..0000000000000000000000000000000000000000 --- a/spaces/radames/gradio_window_localStorage/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Gradio Window LocalStorage -emoji: 🫙 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.44.4 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/raedeXanto/academic-chatgpt-beta/After Effects Wedding Title Projects Free Download [BETTER].md b/spaces/raedeXanto/academic-chatgpt-beta/After Effects Wedding Title Projects Free Download [BETTER].md deleted file mode 100644 index cca47593ac687f4a9a57844aaa5d14d53abe8ee7..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/After Effects Wedding Title Projects Free Download [BETTER].md +++ /dev/null @@ -1,29 +0,0 @@ - -

        How to Create Stunning Wedding Titles with Free After Effects Templates

        -

        If you are looking for a way to add some flair and elegance to your wedding video, you might want to consider using After Effects templates. After Effects is a powerful software that allows you to create stunning motion graphics and visual effects for your videos. You can use it to create amazing wedding titles that will impress your clients and guests.

        -

        But you don't have to be an expert in After Effects to create beautiful wedding titles. There are many free After Effects templates available online that you can download and customize for your own projects. These templates are professionally designed and easy to use. You just need to replace the text and images with your own, and you are ready to go.

        -

        After Effects Wedding Title Projects Free Download


        DOWNLOADhttps://tinourl.com/2uL0Dc



        -

        In this article, we will show you some of the best free After Effects wedding templates that you can use for your wedding titles. We will also give you some tips on how to edit them and make them fit your style and theme.

        -

        Best Free After Effects Wedding Templates

        -

        There are many websites that offer free After Effects templates for various purposes, such as intros, slideshows, logos, lower thirds, and more. However, not all of them are suitable for wedding videos. You need to find templates that have a romantic, elegant, and classy look that matches your wedding theme.

        -

        One of the best websites that we recommend for finding free After Effects wedding templates is Theme Junkie. Theme Junkie is a platform that provides high-quality WordPress themes, plugins, fonts, graphics, and more. They also have a collection of over 45 free After Effects wedding templates that you can download and use for your projects.

        -

        Some of the features of these templates are:

        -
          -
        • They are compatible with After Effects CS6 and above.
        • -
        • They are fully customizable and easy to edit.
        • -
        • They have high-resolution and fast render times.
        • -
        • They have various styles and themes, such as vintage, modern, floral, minimal, etc.
        • -
        • They include placeholders for text, images, videos, logos, etc.
        • -
        -

        To download these templates, you just need to visit their website and click on the download button. You will get a ZIP file that contains the template files and a readme file with instructions on how to use them.

        -

        How to Edit Free After Effects Wedding Templates

        -

        Once you have downloaded the template that you like, you need to unzip it and open it in After Effects. You will see a folder structure that contains the main composition and other assets such as images, videos, fonts, etc. You need to import these assets into your project panel if they are not already there.

        -

        -

        To edit the template, you need to open the main composition and double-click on the text or image layers that you want to change. You can use the character panel and the align panel to adjust the font style, size, color, alignment, etc. You can also use the effects panel and the timeline panel to add or modify any animations or transitions.

        -

        If you want to change the background or add your own footage, you can drag and drop your files into the project panel and replace the existing ones. You can also use masks, shapes, or solids to create different effects or overlays.

        -

        After you have finished editing the template, you need to export it as a video file. You can do this by going to File > Export > Add to Render Queue. You can choose the format and settings that suit your needs. Then click on Render and wait for it to finish.

        -

        Conclusion

        -

        Creating stunning wedding titles with free After Effects templates is a great way to save time and money while adding some flair and elegance to your wedding video. You can find many free After Effects wedding templates online that you can download and customize for your own projects.

        -

        We hope this article has helped you learn how to use free After Effects wedding templates for your wedding titles. If you have any questions or suggestions, feel free to leave a comment below.

        7b8c122e87
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Concise Mathematics Class 10 PDF Download A Complete Guide for ICSE Students.md b/spaces/raedeXanto/academic-chatgpt-beta/Concise Mathematics Class 10 PDF Download A Complete Guide for ICSE Students.md deleted file mode 100644 index 38b62df1a7b47f1d8aa48edb4231df63344a38f2..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Concise Mathematics Class 10 PDF Download A Complete Guide for ICSE Students.md +++ /dev/null @@ -1,118 +0,0 @@ -
        -

        Concise Mathematics Class 10 PDF Download

        -

        Mathematics is a subject that requires a lot of practice and understanding. It can be challenging for some students to grasp the concepts and solve the problems. If you are one of those students who are looking for a reliable and comprehensive book for your ICSE Class 10 Mathematics exam preparation, then you have come to the right place. In this article, we will tell you how to download Concise Mathematics Class 10 PDF for free and what are the benefits of using this book.

        -

        Introduction

        -

        Concise Mathematics is a series of books published by Selina Publishers for ICSE students of classes 6 to 10. These books are designed to cover the ICSE syllabus and help students prepare for their exams. The books are written by expert authors who have years of experience in teaching and writing mathematics books.

        -

        concise mathematics class 10 pdf download


        Download Filehttps://tinourl.com/2uKZy1



        -

        What is Concise Mathematics?

        -

        Concise Mathematics is a book that explains the mathematical concepts in a simple and concise manner. The book covers all the topics that are prescribed by the ICSE board for Class 10 Mathematics. Some of the topics are:

        -
          -
        • Commercial Arithmetic
        • -
        • Algebra
        • -
        • Geometry
        • -
        • Mensuration
        • -
        • Trigonometry
        • -
        • Coordinate Geometry
        • -
        • Statistics
        • -
        • Probability
        • -
        -

        The book also includes solved examples, practice questions, exercises, revision tests, and model papers to help students master the subject.

        -

        Why should you study Concise Mathematics?

        -

        Concise Mathematics is a book that will help you in many ways. Here are some reasons why you should study Concise Mathematics:

        -
          -
        • It will help you understand the concepts clearly and apply them in solving problems.
        • -
        • It will help you improve your logical thinking and analytical skills.
        • -
        • It will help you score well in your ICSE Class 10 Mathematics exam.
        • -
        • It will help you prepare for competitive exams like JEE, NEET, etc.
        • -
        • It will help you develop an interest in mathematics and explore its applications in various fields.
        • -
        -

        How to download Concise Mathematics Class 10 PDF?

        -

        If you want to download Concise Mathematics Class 10 PDF for free, you can follow these simple steps:

        -

        concise mathematics class 10 solutions pdf download
        -concise mathematics class 10 icse pdf download
        -concise mathematics class 10 textbook pdf download
        -concise mathematics class 10 ebook pdf download
        -concise mathematics class 10 guide pdf download
        -concise mathematics class 10 cbse pdf download
        -concise mathematics class 10 ncert pdf download
        -concise mathematics class 10 rd sharma pdf download
        -concise mathematics class 10 rs aggarwal pdf download
        -concise mathematics class 10 ml aggarwal pdf download
        -concise mathematics class 10 oswaal pdf download
        -concise mathematics class 10 selina publishers pdf download
        -concise mathematics class 10 free pdf download
        -concise mathematics class 10 online pdf download
        -concise mathematics class 10 latest edition pdf download
        -concise mathematics class 10 revised edition pdf download
        -concise mathematics class 10 sample papers pdf download
        -concise mathematics class 10 question papers pdf download
        -concise mathematics class 10 previous year papers pdf download
        -concise mathematics class 10 model papers pdf download
        -concise mathematics class 10 mock tests pdf download
        -concise mathematics class 10 practice tests pdf download
        -concise mathematics class 10 worksheets pdf download
        -concise mathematics class 10 assignments pdf download
        -concise mathematics class 10 projects pdf download
        -concise mathematics class 10 notes pdf download
        -concise mathematics class 10 formulae pdf download
        -concise mathematics class 10 theorems pdf download
        -concise mathematics class 10 examples pdf download
        -concise mathematics class 10 exercises pdf download
        -concise mathematics class 10 chapterwise pdf download
        -concise mathematics class 10 unitwise pdf download
        -concise mathematics class 10 topicwise pdf download
        -concise mathematics class 10 syllabus pdf download
        -concise mathematics class 10 curriculum pdf download
        -concise mathematics class 10 exam pattern pdf download
        -concise mathematics class 10 marking scheme pdf download
        -concise mathematics class 10 grading system pdf download
        -concise mathematics class 10 reference books pdf download
        -concise mathematics class 10 supplementary books pdf download
        -concise mathematics class 10 best books pdf download
        -concise mathematics class 10 recommended books pdf download
        -concise mathematics class 10 study material pdf download
        -concise mathematics class 10 revision material pdf download
        -concise mathematics class 10 learning material pdf download
        -concise mathematics class 10 teaching material pdf download
        -concise mathematics class 10 course material pdf download
        -concise mathematics class 10 tips and tricks pdf download
        -concise mathematics class 10 shortcuts and techniques pdf download
        -concise mathematics class 10 hacks and secrets pdf download

        -

        Step 1: Visit the official website of Selina Publishers

        -

        The first step is to visit the official website of Selina Publishers, which is https://www.selina.com/. This is the best source to get the authentic and updated version of the book.

        -

        Step 2: Select the subject and class

        -

        The next step is to select the subject and class from the drop-down menu on the homepage. You need to select "Mathematics" as the subject and "Class 10" as the class.

        -

        Step 3: Choose the chapter and click on the download link

        -

        The third step is to choose the chapter that you want to download from the list of chapters displayed on the screen. You can also use the search bar to find the chapter by its name or number. Once you find the chapter, click on the download link below it.

        -

        Step 4: Save the PDF file on your device

        -

        The final step is to save the PDF file on your device. You can choose a location where you want to save it, such as your desktop, downloads folder, or any other folder. You can also rename the file if you want. After saving it, you can open it with any PDF reader software or app.

        -

        Benefits of Concise Mathematics Class 10 PDF

        -

        There are many benefits of using Concise Mathematics Class 10 PDF for your exam preparation. Some of them are:

        -

        Easy to understand language and concepts

        -

        The book uses easy to understand language and concepts that make it suitable for all types of learners. The book explains each topic in a step-by-step manner with relevant examples and diagrams. The book also avoids unnecessary jargon and technical terms that might confuse or bore the students.

        -

        Ample practice questions and exercises

        -

        The book provides ample practice questions and exercises at the end of each chapter. These questions are designed to test your knowledge and understanding of the topic. They also help you revise and reinforce what you have learned. The questions are based on different levels of difficulty and types of problems. The book also provides solutions for all the questions and exercises.

        -

        Chapter-wise summaries and glossary

        -

        The book provides chapter-wise summaries that give a quick overview of the important concepts and points discussed in each chapter. These summaries can be used by students during their revision or before their exams for quick preparation. The book also provides a glossary of important terms and concepts that helps students in understanding the subject matter better.

        -

        Conclusion

        -

        In conclusion, Concise Mathematics Class 10 PDF is a great resource for ICSE students who want to ace their mathematics exam. The book covers all the topics as per the syllabus and provides clear explanations, examples, exercises, summaries, and glossary. The book can be downloaded for free from Selina Publishers' website by following some simple steps. The book can be accessed anytime and anywhere on any device with a PDF reader software or app.

        -

        FAQs

        -
          -
        1. What is ICSE?
          ICSE stands for Indian Certificate of Secondary Education. It is an examination conducted by CISCE (Council for Indian School Certificate Examinations) for class 10 students in India.
        2. -
        3. What is Selina Publishers?
          Selina Publishers is a well-known brand in publishing educational books for ICSE students of classes 6 to 10. They publish books on subjects like Physics, Chemistry, Biology, and Mathematics.
        4. -
        5. What is Concise Mathematics?
          Concise Mathematics is a series of books published by Selina Publishers for ICSE students of classes 6 to 10. These books explain mathematical concepts in a simple and concise manner.
        6. -
        7. How can I download Concise Mathematics Class 10 PDF?
          You can download Concise Mathematics Class 10 PDF from Selina Publishers' website by following these steps:
          -- Visit https://www.selina.com/.
          -- Select "Mathematics" as subject and "Class 10" as class.
          -- Choose the chapter that you want to download.
          -- Click on the download link below it.
          -- Save the PDF file on your device.
        8. -
        9. What are some benefits of using Concise Mathematics Class 10 PDF?
          Some benefits of using Concise Mathematics Class 10 PDF are:
          -- Easy to understand language and concepts.
          -- Ample practice questions and exercises.
          -- Chapter-wise summaries and glossary.
          -- Free access anytime and anywhere.
        10. -
        -

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Dreambox Control Center (DCC) for Enigma2 - v 1.20 full version How to Solve Common Problems and Errors.md b/spaces/raedeXanto/academic-chatgpt-beta/Dreambox Control Center (DCC) for Enigma2 - v 1.20 full version How to Solve Common Problems and Errors.md deleted file mode 100644 index 3dcb49453ee51df93d8d915768893933879ef508..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Dreambox Control Center (DCC) for Enigma2 - v 1.20 full version How to Solve Common Problems and Errors.md +++ /dev/null @@ -1,139 +0,0 @@ - -

        Dreambox Control Center (DCC) for Enigma2 - v 1.20 full version

        -

        If you own a Dreambox satellite receiver that runs on Enigma2 operating system, you might be looking for a way to manage it over the network. In this article, we will introduce you to Dreambox Control Center (DCC) for Enigma2 - v 1.20 full version, a free software that allows you to control your Dreambox remotely from your computer. We will also show you how to install and use DCC, as well as how to troubleshoot some common issues that you might encounter.

        -

        What is Dreambox Control Center (DCC)?

        -

        A brief introduction to DCC

        -

        Dreambox Control Center (DCC) is a software program developed by BernyR that is intended exclusively for handling Dreambox satellite receivers over the network. It allows you to perform various tasks on your Dreambox, such as:

        -

        Dreambox Control Center (DCC) for Enigma2 - v 1.20 full version


        Download File » https://tinourl.com/2uL1Wv



        -
          -
        • Viewing and editing settings
        • -
        • Uploading and downloading files
        • -
        • Executing scripts
        • -
        • Managing channels and bouquets
        • -
        • Streaming live TV and recordings
        • -
        • Rebooting and shutting down
        • -
        -

        DCC supports both Enigma1 and Enigma2 operating systems, but in this article we will focus on the version for Enigma2, which is DCC-E2 v 1.20 full version. This version was released on May 12th, 2019 and it is compatible with Windows XP, Vista, 7, 8, and 10.

        -

        The main features of DCC

        -

        DCC has a user-friendly interface that consists of four main tabs: FTP, Telnet, WebIf, and ScreenShot. Each tab has its own functions and options that we will explain in more detail later. Here are some of the main features of DCC:

        -
          -
        • FTP: This tab allows you to access the file system of your Dreambox and transfer files between your computer and your Dreambox. You can also create, delete, rename, copy, move, or edit files and folders on your Dreambox.
        • -
        • Telnet: This tab allows you to execute commands on your Dreambox using the Telnet protocol. You can also view the output of the commands in a separate window.
        • -
        • WebIf: This tab allows you to access the web interface of your Dreambox using your web browser. You can also view the live TV or recordings from your Dreambox in a separate window.
        • -
        • ScreenShot: This tab allows you to capture screenshots of your Dreambox screen and save them on your computer.
        • -
        -

        How to install and use DCC for Enigma2?

        -

        The system requirements and download link for DCC

        -

        To install and use DCC for Enigma2, you need to have the following requirements:

        -
          -
        • A Dreambox satellite receiver that runs on Enigma2 operating system
        • -
        • A computer that runs on Windows XP, Vista, 7, 8, or 10
        • -
        • A network connection between your computer and your Dreambox (either wired or wireless)
        • -
        • A web browser that supports HTML5 (such as Chrome, Firefox, or Edge)
        • -
        -

        You can download DCC-E2 v 1.20 full version from this link: https://dreambox-control-center.soft....com/download/

        -

        The steps to install and configure DCC

        -

        After downloading DCC-E2 v 1.20 full version from the link above, follow these steps to install and configure it:

        -
          -
        1. Extract the zip file to a folder on your computer.
        2. -
        3. Run the brctrcen.exe file as administrator.
        4. -
        5. Select your language from the drop-down menu.
        6. -
        7. Select your connection type from the drop-down menu (either LAN or WLAN).
        8. -
        9. The IP address of your computer will be detected automatically. Enter the IP address of your Dreambox manually or click on Search to find it automatically.
        10. -
        11. Select the equipment of your Dreambox from the drop-down menu (either Hard disk, CF card, or USB stick).
        12. -
        13. Enter your login and password for your Dreambox (the default ones are root and dreambox).
        14. -
        15. The name of an active DreamFlash image will be shown automatically. If you have more than one image installed on your Dreambox, you can select it from the drop-down menu.
        16. -
        17. Click on Connect to establish a connection between your computer and your Dreambox.
        18. -
        -

        The main functions and options of DCC

        -

        Once you have connected your computer and your Dreambox using DCC, you can use the four main tabs (FTP, Telnet, WebIf, and ScreenShot) to perform various tasks on your Dreambox. Here are some examples of what you can do with each tab:

        - -
        TabFunctionOption
        FTPUpload a new channel list to your DreamboxNavigate to the folder where you have saved the channel list file on your computer (usually .xml or .tv format). Drag and drop it to the /etc/enigma2 folder on your Dreambox. Restart Enigma2 or reboot your Dreambox.
        TelnetInstall a new plugin or skin on your DreamboxNavigate to the folder where you have saved the plugin or skin file on your computer (usually .ipk or .tar.gz format). Drag and drop it to the /tmp folder on your Dreambox. Type in the command window: For .ipk files: opkg install /tmp/*.ipk For .tar.gz files: tar xzvf /tmp/*.tar.gz -C / box screen and display it in a new window. You can also save the screenshot on your computer by clicking on Save button.
        -

        How to troubleshoot common issues with DCC?

        -

        The possible causes and solutions for connection problems

        -

        If you have trouble connecting your computer and your Dreambox using DCC, you might encounter one of these error messages:

        -
          -
        • Connection refused: This means that your Dreambox is not reachable or not online. Check if your Dreambox is turned on and connected to the same network as your computer. Also check if you have entered the correct IP address, login, and password for your Dreambox.
        • -
        • Connection timed out: This means that your Dreambox is too slow to respond or there is a network problem. Try to ping your Dreambox from your computer using the command prompt or terminal. If you get a reply, then your network connection is fine. If not, then check your network settings and cables. You can also try to restart your router, modem, computer, and Dreambox.
        • -
        • Connection reset by peer: This means that your Dreambox has closed the connection unexpectedly. This could be due to a software crash or a power failure. Try to reconnect your Dreambox using DCC or reboot your Dreambox.
        • -
        -

        The possible causes and solutions for file transfer problems

        -

        If you have trouble transferring files between your computer and your Dreambox using DCC, you might encounter one of these error messages:

        -

        How to use Dreambox Control Center (DCC) for Enigma2
        -Dreambox Control Center (DCC) for Enigma2 tutorial
        -Dreambox Control Center (DCC) for Enigma2 download free
        -Dreambox Control Center (DCC) for Enigma2 latest version
        -Dreambox Control Center (DCC) for Enigma2 features and benefits
        -Dreambox Control Center (DCC) for Enigma2 user reviews
        -Dreambox Control Center (DCC) for Enigma2 alternatives and comparisons
        -Dreambox Control Center (DCC) for Enigma2 troubleshooting and support
        -Dreambox Control Center (DCC) for Enigma2 web interface and telnet
        -Dreambox Control Center (DCC) for Enigma2 network and file management
        -Dreambox Control Center (DCC) for Enigma2 scripts and plugins
        -Dreambox Control Center (DCC) for Enigma2 hard disk and USB stick
        -Dreambox Control Center (DCC) for Enigma2 login and password
        -Dreambox Control Center (DCC) for Enigma2 system tools and settings
        -Dreambox Control Center (DCC) for Enigma2 backup and restore
        -Dreambox Control Center (DCC) for Enigma2 update and upgrade
        -Dreambox Control Center (DCC) for Enigma2 installation and setup
        -Dreambox Control Center (DCC) for Enigma2 compatibility and requirements
        -Dreambox Control Center (DCC) for Enigma2 software informer and awards
        -Dreambox Control Center (DCC) for Enigma2 soundcloud and audiobooks
        -Best practices for using Dreambox Control Center (DCC) for Enigma2
        -Tips and tricks for using Dreambox Control Center (DCC) for Enigma2
        -Pros and cons of using Dreambox Control Center (DCC) for Enigma2
        -How to optimize your performance with Dreambox Control Center (DCC) for Enigma2
        -How to customize your interface with Dreambox Control Center (DCC) for Enigma2
        -How to connect your devices with Dreambox Control Center (DCC) for Enigma2
        -How to transfer files with Dreambox Control Center (DCC) for Enigma2
        -How to edit scripts with Dreambox Control Center (DCC) for Enigma2
        -How to install plugins with Dreambox Control Center (DCC) for Enigma2
        -How to access web interface with Dreambox Control Center (DCC) for Enigma2
        -How to use telnet with Dreambox Control Center (DCC) for Enigma2
        -How to manage your network with Dreambox Control Center (DCC) for Enigma2
        -How to handle your hard disk with Dreambox Control Center (DCC) for Enigma2
        -How to use your USB stick with Dreambox Control Center (DCC) for Enigma2
        -How to set your login and password with Dreambox Control Center (DCC) for Enigma2
        -How to change your system settings with Dreambox Control Center (DCC) for Enigma2
        -How to backup your data with Dreambox Control Center (DCC) for Enigma2
        -How to restore your data with Dreambox Control Center (DCC) for Enigma2
        -How to update your software with Dreambox Control Center (DCC) for Enigma2
        -How to upgrade your software with Dreambox Control Center (DCC) for Enigma2
        -How to install your software with Dreambox Control Center (DCC) for Enigma2
        -How to setup your software with Dreambox Control Center (DCC) for Enigma2
        -How to check your compatibility with Dreambox Control Center (DCC) for Enigma2
        -How to check your requirements with Dreambox Control Center (DCC) for Enigma2
        -How to get information from software informer with Dreambox Control Center (DCC) for Enigma2
        -How to get awards from software informer with Dreambox Control Center (DCC) for Enigma2
        -How to listen to soundcloud with Dreambox Control Center (DCC) for Enigma2
        -How to listen to audiobooks with Dreambox Control Center (DCC) for Enigma2

        -
          -
        • Permission denied: This means that you don't have the right to access or modify the file or folder on your Dreambox. Check if you have entered the correct login and password for your Dreambox. Also check if the file or folder has the proper permissions and ownership. You can change them using the Telnet tab or the chmod and chown commands.
        • -
        • No space left on device: This means that your Dreambox has run out of storage space. Check how much free space you have on your Dreambox using the FTP tab or the df command. You can free up some space by deleting some unnecessary files or folders on your Dreambox.
        • -
        • File not found: This means that the file or folder you are trying to access does not exist on your Dreambox. Check if you have entered the correct path and name for the file or folder on your Dreambox. Also check if the file or folder has been moved, renamed, or deleted.
        • -
        -

        The possible causes and solutions for script execution problems

        -

        If you have trouble executing scripts on your Dreambox using DCC, you might encounter one of these error messages:

        -
          -
        • Syntax error: This means that there is a mistake in the script code that prevents it from running properly. Check if you have entered the correct command and parameters for the script. Also check if the script has the proper format and extension (usually .sh).
        • -
        • Command not found: This means that the script is trying to use a command that is not available on your Dreambox. Check if you have installed the required packages or plugins for the command on your Dreambox. You can use the opkg command to install or update packages.
        • -
        • Operation not permitted: This means that the script is trying to perform an action that is not allowed on your Dreambox. Check if the script has the proper permissions and ownership. You can change them using the Telnet tab or the chmod and chown commands.
        • -
        -

        Conclusion

        -

        In this article, we have introduced you to Dreambox Control Center (DCC) for Enigma2 - v 1.20 full version, a free software that allows you to control your Dreambox remotely from your computer. We have also shown you how to install and use DCC, as well as how to troubleshoot some common issues that you might encounter. We hope you have found this article informative and engaging, and we encourage you to try out DCC for yourself and explore its features and functions.

        -

        FAQs

        -
          -
        1. What is Enigma2?
        2. -

          Enigma2 is an open source operating system that runs on Linux-based satellite receivers such as Dreambox. It allows users to customize their receivers with various plugins, skins, settings, and features.

          -
        3. What is a DreamFlash image?
        4. -

          A DreamFlash image is a backup image of your Enigma2 system that can be stored on a hard disk, CF card, or USB stick on your Dreambox. You can switch between different images using DCC or other tools.

          -
        5. What is a plugin?
        6. -

          A plugin is an add-on program that extends the functionality of Enigma2. There are many types of plugins available for Enigma2, such as media players, emulators, games, tools, etc.

          -
        7. What is a skin?
        8. -

          A skin is an add-on program that changes the appearance of Enigma2. There are many types of skins available for Enigma2, such as modern, classic, simple, colorful, etc.

          -
        9. What is a bouquet?
        10. -

          A bouquet is a collection of channels that are grouped together by genre, language, provider, etc. You can create and edit bouquets using DCC or other tools.

          -
        -

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/ramkamal2000/voice-conversion-ddp/utils.py b/spaces/ramkamal2000/voice-conversion-ddp/utils.py deleted file mode 100644 index aee4b0a4f5476a5b279311569d3d784b8c9b503f..0000000000000000000000000000000000000000 --- a/spaces/ramkamal2000/voice-conversion-ddp/utils.py +++ /dev/null @@ -1,306 +0,0 @@ -import os -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch -from torch.nn import functional as F -from commons import sequence_mask -from wavlm import WavLM, WavLMConfig - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def get_cmodel(rank): - checkpoint = torch.load('wavlm/WavLM-Large.pt') - cfg = WavLMConfig(checkpoint['cfg']) - cmodel = WavLM(cfg) - cmodel.load_state_dict(checkpoint['model']) - cmodel.eval() - return cmodel - - -def get_content(cmodel, y): - with torch.no_grad(): - c = cmodel.extract_features(y.squeeze(1))[0] - c = c.transpose(1, 2) - return c - - -def get_vocoder(rank): - with open("hifigan/config.json", "r") as f: - config = json.load(f) - config = hifigan.AttrDict(config) - vocoder = hifigan.Generator(config) - ckpt = torch.load("hifigan/generator_v1") - vocoder.load_state_dict(ckpt["generator"]) - vocoder.eval() - vocoder.remove_weight_norm() - vocoder.cuda(rank) - return vocoder - - -def transform(mel, height): # 68-92 - #r = np.random.random() - #rate = r * 0.3 + 0.85 # 0.85-1.15 - #height = int(mel.size(-2) * rate) - tgt = torchvision.transforms.functional.resize(mel, (height, mel.size(-1))) - if height >= mel.size(-2): - return tgt[:, :mel.size(-2), :] - else: - silence = tgt[:,-1:,:].repeat(1,mel.size(-2)-height,1) - silence += torch.randn_like(silence) / 10 - return torch.cat((tgt, silence), 1) - - -def stretch(mel, width): # 0.5-2 - return torchvision.transforms.functional.resize(mel, (mel.size(-2), width)) - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/ranjangoel/GPT-PDF/gpt_reader/model_interface.py b/spaces/ranjangoel/GPT-PDF/gpt_reader/model_interface.py deleted file mode 100644 index f3c04a45373ad34216fb7616bd06de52c7008604..0000000000000000000000000000000000000000 --- a/spaces/ranjangoel/GPT-PDF/gpt_reader/model_interface.py +++ /dev/null @@ -1,32 +0,0 @@ -from typing import List -import openai - - -class ModelInterface(object): - - def __init__(self) -> None: - pass - - def send_msg(self, *args): - pass - - -class OpenAIModel(object): - - def __init__(self, api_key, model='gpt-3.5-turbo', temperature=0.2) -> None: - openai.api_key = api_key - self.model = model - self.temperature = temperature - - def send_msg(self, msg: List[dict], return_raw_text=True): - - response = openai.ChatCompletion.create( - model=self.model, - messages=msg, - temperature=self.temperature - ) - - if return_raw_text: - return response["choices"][0]["message"]["content"] - else: - return response diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/globals.d.ts b/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/globals.d.ts deleted file mode 100644 index 80fd4cf3aa8f1014d6468291eb3e6b1982a0d3b9..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/globals.d.ts +++ /dev/null @@ -1,300 +0,0 @@ -// Declare "static" methods in Error -interface ErrorConstructor { - /** Create .stack property on a target object */ - captureStackTrace(targetObject: object, constructorOpt?: Function): void; - - /** - * Optional override for formatting stack traces - * - * @see https://v8.dev/docs/stack-trace-api#customizing-stack-traces - */ - prepareStackTrace?: ((err: Error, stackTraces: NodeJS.CallSite[]) => any) | undefined; - - stackTraceLimit: number; -} - -/*-----------------------------------------------* - * * - * GLOBAL * - * * - ------------------------------------------------*/ - -// For backwards compability -interface NodeRequire extends NodeJS.Require { } -interface RequireResolve extends NodeJS.RequireResolve { } -interface NodeModule extends NodeJS.Module { } - -declare var process: NodeJS.Process; -declare var console: Console; - -declare var __filename: string; -declare var __dirname: string; - -declare var require: NodeRequire; -declare var module: NodeModule; - -// Same as module.exports -declare var exports: any; - -/** - * Only available if `--expose-gc` is passed to the process. - */ -declare var gc: undefined | (() => void); - -//#region borrowed -// from https://github.com/microsoft/TypeScript/blob/38da7c600c83e7b31193a62495239a0fe478cb67/lib/lib.webworker.d.ts#L633 until moved to separate lib -/** A controller object that allows you to abort one or more DOM requests as and when desired. */ -interface AbortController { - /** - * Returns the AbortSignal object associated with this object. - */ - - readonly signal: AbortSignal; - /** - * Invoking this method will set this object's AbortSignal's aborted flag and signal to any observers that the associated activity is to be aborted. - */ - abort(): void; -} - -/** A signal object that allows you to communicate with a DOM request (such as a Fetch) and abort it if required via an AbortController object. */ -interface AbortSignal extends EventTarget { - /** - * Returns true if this AbortSignal's AbortController has signaled to abort, and false otherwise. - */ - readonly aborted: boolean; -} - -declare var AbortController: typeof globalThis extends {onmessage: any; AbortController: infer T} - ? T - : { - prototype: AbortController; - new(): AbortController; - }; - -declare var AbortSignal: typeof globalThis extends {onmessage: any; AbortSignal: infer T} - ? T - : { - prototype: AbortSignal; - new(): AbortSignal; - abort(reason?: any): AbortSignal; - timeout(milliseconds: number): AbortSignal; - }; -//#endregion borrowed - -//#region ArrayLike.at() -interface RelativeIndexable { - /** - * Takes an integer value and returns the item at that index, - * allowing for positive and negative integers. - * Negative integers count back from the last item in the array. - */ - at(index: number): T | undefined; -} -interface String extends RelativeIndexable {} -interface Array extends RelativeIndexable {} -interface ReadonlyArray extends RelativeIndexable {} -interface Int8Array extends RelativeIndexable {} -interface Uint8Array extends RelativeIndexable {} -interface Uint8ClampedArray extends RelativeIndexable {} -interface Int16Array extends RelativeIndexable {} -interface Uint16Array extends RelativeIndexable {} -interface Int32Array extends RelativeIndexable {} -interface Uint32Array extends RelativeIndexable {} -interface Float32Array extends RelativeIndexable {} -interface Float64Array extends RelativeIndexable {} -interface BigInt64Array extends RelativeIndexable {} -interface BigUint64Array extends RelativeIndexable {} -//#endregion ArrayLike.at() end - -/** - * @since v17.0.0 - * - * Creates a deep clone of an object. - */ -declare function structuredClone( - value: T, - transfer?: { transfer: ReadonlyArray }, -): T; - -/*----------------------------------------------* -* * -* GLOBAL INTERFACES * -* * -*-----------------------------------------------*/ -declare namespace NodeJS { - interface CallSite { - /** - * Value of "this" - */ - getThis(): unknown; - - /** - * Type of "this" as a string. - * This is the name of the function stored in the constructor field of - * "this", if available. Otherwise the object's [[Class]] internal - * property. - */ - getTypeName(): string | null; - - /** - * Current function - */ - getFunction(): Function | undefined; - - /** - * Name of the current function, typically its name property. - * If a name property is not available an attempt will be made to try - * to infer a name from the function's context. - */ - getFunctionName(): string | null; - - /** - * Name of the property [of "this" or one of its prototypes] that holds - * the current function - */ - getMethodName(): string | null; - - /** - * Name of the script [if this function was defined in a script] - */ - getFileName(): string | null; - - /** - * Current line number [if this function was defined in a script] - */ - getLineNumber(): number | null; - - /** - * Current column number [if this function was defined in a script] - */ - getColumnNumber(): number | null; - - /** - * A call site object representing the location where eval was called - * [if this function was created using a call to eval] - */ - getEvalOrigin(): string | undefined; - - /** - * Is this a toplevel invocation, that is, is "this" the global object? - */ - isToplevel(): boolean; - - /** - * Does this call take place in code defined by a call to eval? - */ - isEval(): boolean; - - /** - * Is this call in native V8 code? - */ - isNative(): boolean; - - /** - * Is this a constructor call? - */ - isConstructor(): boolean; - } - - interface ErrnoException extends Error { - errno?: number | undefined; - code?: string | undefined; - path?: string | undefined; - syscall?: string | undefined; - } - - interface ReadableStream extends EventEmitter { - readable: boolean; - read(size?: number): string | Buffer; - setEncoding(encoding: BufferEncoding): this; - pause(): this; - resume(): this; - isPaused(): boolean; - pipe(destination: T, options?: { end?: boolean | undefined; }): T; - unpipe(destination?: WritableStream): this; - unshift(chunk: string | Uint8Array, encoding?: BufferEncoding): void; - wrap(oldStream: ReadableStream): this; - [Symbol.asyncIterator](): AsyncIterableIterator; - } - - interface WritableStream extends EventEmitter { - writable: boolean; - write(buffer: Uint8Array | string, cb?: (err?: Error | null) => void): boolean; - write(str: string, encoding?: BufferEncoding, cb?: (err?: Error | null) => void): boolean; - end(cb?: () => void): this; - end(data: string | Uint8Array, cb?: () => void): this; - end(str: string, encoding?: BufferEncoding, cb?: () => void): this; - } - - interface ReadWriteStream extends ReadableStream, WritableStream { } - - interface RefCounted { - ref(): this; - unref(): this; - } - - type TypedArray = - | Uint8Array - | Uint8ClampedArray - | Uint16Array - | Uint32Array - | Int8Array - | Int16Array - | Int32Array - | BigUint64Array - | BigInt64Array - | Float32Array - | Float64Array; - type ArrayBufferView = TypedArray | DataView; - - interface Require { - (id: string): any; - resolve: RequireResolve; - cache: Dict; - /** - * @deprecated - */ - extensions: RequireExtensions; - main: Module | undefined; - } - - interface RequireResolve { - (id: string, options?: { paths?: string[] | undefined; }): string; - paths(request: string): string[] | null; - } - - interface RequireExtensions extends Dict<(m: Module, filename: string) => any> { - '.js': (m: Module, filename: string) => any; - '.json': (m: Module, filename: string) => any; - '.node': (m: Module, filename: string) => any; - } - interface Module { - /** - * `true` if the module is running during the Node.js preload - */ - isPreloading: boolean; - exports: any; - require: Require; - id: string; - filename: string; - loaded: boolean; - /** @deprecated since v14.6.0 Please use `require.main` and `module.children` instead. */ - parent: Module | null | undefined; - children: Module[]; - /** - * @since v11.14.0 - * - * The directory name of the module. This is usually the same as the path.dirname() of the module.id. - */ - path: string; - paths: string[]; - } - - interface Dict { - [key: string]: T | undefined; - } - - interface ReadOnlyDict { - readonly [key: string]: T | undefined; - } -} diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/!!INSTALL!! Freemonopolyfullversiondownloadforpc.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/!!INSTALL!! Freemonopolyfullversiondownloadforpc.md deleted file mode 100644 index 7424ae8b5bed06a9d7f872ba2d44528f210d05ba..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/!!INSTALL!! Freemonopolyfullversiondownloadforpc.md +++ /dev/null @@ -1,132 +0,0 @@ -
        -

        Freemonopolyfullversiondownloadforpc: How to Enjoy the Classic Board Game on Your PC

        -

        Monopoly is one of the most popular and entertaining board games of all time. It is a game that involves buying, selling, and trading properties, building houses and hotels, and trying to bankrupt your opponents. Monopoly is a game that can be played by people of all ages and backgrounds, and it can provide hours of fun and excitement. But what if you want to play Monopoly on your PC? Is there a way to download Monopoly for free and play it on your computer? The answer is yes, there is. In this article, we will show you how to download freemonopolyfullversiondownloadforpc and enjoy the classic board game on your PC.

        -

        What is freemonopolyfullversiondownloadforpc?

        -

        Freemonopolyfullversiondownloadforpc is a digital version of the classic Monopoly board game that you can download and play on your PC. It is a game that features colorful graphics, animations, sound effects, and music that enhance the gameplay experience. Freemonopolyfullversiondownloadforpc also allows you to customize the game settings, such as the number of players, the difficulty level, the house rules, and the board theme. You can also play freemonopolyfullversiondownloadforpc online with your friends or other players from around the world.

        -

        freemonopolyfullversiondownloadforpc


        Download Filehttps://urlgoal.com/2uCKLE



        -

        How to download freemonopolyfullversiondownloadforpc?

        -

        If you want to download freemonopolyfullversiondownloadforpc, you can follow these steps:

        -
          -
        1. Go to this link: https://monopoly-plus.en.softonic.com/
        2. -
        3. Click on the Download button and wait for the file to be downloaded.
        4. -
        5. Open the downloaded file and follow the instructions to install the game on your PC.
        6. -
        7. Launch the game and enter your name and email address to create an account.
        8. -
        9. Enjoy playing freemonopolyfullversiondownloadforpc on your PC.
        10. -
        -

        What are the benefits of playing freemonopolyfullversiondownloadforpc?

        -

        Playing freemonopolyfullversiondownloadforpc has many benefits, such as:

        -
          -
        • You can play Monopoly anytime and anywhere without needing a physical board or pieces.
        • -
        • You can save time and money by not having to buy or rent a Monopoly board game.
        • -
        • You can play with different people from different locations and backgrounds online.
        • -
        • You can learn new skills and strategies by playing against different opponents.
        • -
        • You can have fun and relax by playing a game that you love.
        • -
        -

        What are the drawbacks of playing freemonopolyfullversiondownloadforpc?

        -

        Playing freemonopolyfullversiondownloadforpc also has some drawbacks, such as:

        -
          -
        • You need a stable internet connection and a compatible PC to play the game.
        • -
        • You may encounter some technical issues or errors while playing the game.
        • -
        • You may lose some of the social aspects and interactions that come with playing a board game in person.
        • -
        • You may become addicted or obsessed with playing the game.
        • -
        -

        Conclusion

        -

        In this article, we have shown you how to download freemonopolyfullversiondownloadforpc and enjoy the classic board game on your PC. We have also discussed some of the benefits and drawbacks of playing freemonopolyfullversiondownloadforpc. We hope you found this article helpful and informative. If you have any questions or comments, please feel free to leave them below. Thank you for reading!

        -

        How to play freemonopolyfullversiondownloadforpc?

        -

        Playing freemonopolyfullversiondownloadforpc is very easy and fun. You can play it alone or with your friends online. Here are some basic steps on how to play freemonopolyfullversiondownloadforpc:

        -

        -
          -
        1. Choose your character and enter the game.
        2. -
        3. Roll the dice and move your piece around the board.
        4. -
        5. Buy, sell, or trade properties with other players or the bank.
        6. -
        7. Build houses and hotels on your properties to increase their value.
        8. -
        9. Collect rent from other players who land on your properties.
        10. -
        11. Avoid landing on other players' properties or special squares, such as Go to Jail, Income Tax, or Chance.
        12. -
        13. Try to make other players go bankrupt by making them pay you more than they have.
        14. -
        15. The last player standing wins the game.
        16. -
        -

        What are the tips and tricks for playing freemonopolyfullversiondownloadforpc?

        -

        Playing freemonopolyfullversiondownloadforpc can be challenging and competitive. You need to have some skills and strategies to win the game. Here are some tips and tricks for playing freemonopolyfullversiondownloadforpc:

        -
          -
        • Plan ahead and think about your moves before you roll the dice.
        • -
        • Focus on buying properties that form a color group, as they will give you more rent and allow you to build houses and hotels.
        • -
        • Try to buy or trade for the railroads and utilities, as they will give you a steady income regardless of where you land.
        • -
        • Negotiate with other players and make deals that benefit you more than them.
        • -
        • Avoid spending too much money on properties that are not worth it or that you don't need.
        • -
        • Save some money for emergencies or unexpected expenses.
        • -
        • Use the house rules or the speed die mode to make the game more interesting or faster.
        • -
        -

        How to play freemonopolyfullversiondownloadforpc online?

        -

        If you want to play freemonopolyfullversiondownloadforpc online with other players, you can follow these steps:

        -
          -
        1. Launch the game and log in to your account.
        2. -
        3. Click on the Online mode and choose a game mode: Classic, Speed Die, or House Rules.
        4. -
        5. Select a lobby or create your own and invite your friends or other players.
        6. -
        7. Choose your character and wait for the game to start.
        8. -
        9. Play freemonopolyfullversiondownloadforpc online and have fun.
        10. -
        -

        What are the differences between freemonopolyfullversiondownloadforpc and the original Monopoly board game?

        -

        Freemonopolyfullversiondownloadforpc is a digital version of the original Monopoly board game that you can play on your PC. It has some differences from the original board game, such as:

        -
          -
        • It has a 3D city in the center of the board that changes according to the gameplay.
        • -
        • It has different board themes that you can choose from, such as Classic, Amusement Park, Haunted, or Rabbids.
        • -
        • It has different characters that you can choose from, such as a dog, a cat, a car, or a hat.
        • -
        • It has different sound effects and music that add to the atmosphere of the game.
        • -
        • It has different game modes that you can choose from, such as Classic, Speed Die, or House Rules.
        • -
        -

        Conclusion

        -

        In this article, we have shown you how to download freemonopolyfullversiondownloadforpc and enjoy the classic board game on your PC. We have also discussed some of the benefits and drawbacks of playing freemonopolyfullversiondownloadforpc. We have also provided you with some tips and tricks on how to play freemonopolyfullversiondownloadforpc. We hope you found this article helpful and informative. If you have any questions or comments, please feel free to leave them below. Thank you for reading!

        -

        How to install freemonopolyfullversiondownloadforpc?

        -

        After you download freemonopolyfullversiondownloadforpc, you need to install it on your PC. Here are some steps on how to install freemonopolyfullversiondownloadforpc:

        -
          -
        1. Locate the downloaded file and double-click on it to open it.
        2. -
        3. Extract the file to a folder of your choice.
        4. -
        5. Open the folder and run the setup.exe file.
        6. -
        7. Follow the instructions to complete the installation process.
        8. -
        9. Create a shortcut of the game on your desktop or start menu.
        10. -
        11. Run the game and enjoy playing freemonopolyfullversiondownloadforpc.
        12. -
        -

        How to uninstall freemonopolyfullversiondownloadforpc?

        -

        If you want to uninstall freemonopolyfullversiondownloadforpc, you can follow these steps:

        -
          -
        1. Go to Control Panel and select Programs and Features.
        2. -
        3. Find freemonopolyfullversiondownloadforpc in the list and click on Uninstall.
        4. -
        5. Follow the instructions to complete the uninstallation process.
        6. -
        7. Delete any remaining files or folders related to freemonopolyfullversiondownloadforpc from your PC.
        8. -
        -

        What are the alternatives to freemonopolyfullversiondownloadforpc?

        -

        If you are looking for alternatives to freemonopolyfullversiondownloadforpc, you can consider some of these options:

        -
          -
        • Rento: A software that allows you to play a similar board game with online multiplayer mode and custom rules.
        • -
        • Monopoly Plus: A software that allows you to play a 3D version of Monopoly with different board themes and game modes.
        • -
        • Monopoly Tycoon: A software that allows you to play a business simulation game based on Monopoly with real-time strategy elements.
        • -
        • Monopoly Here and Now: A software that allows you to play a modern version of Monopoly with updated properties and currencies.
        • -
        -

        How to play freemonopolyfullversiondownloadforpc offline?

        -

        If you want to play freemonopolyfullversiondownloadforpc offline, you can follow these steps:

        -
          -
        1. Launch the game and log in to your account.
        2. -
        3. Click on the Offline mode and choose a game mode: Classic, Speed Die, or House Rules.
        4. -
        5. Select a difficulty level: Easy, Medium, or Hard.
        6. -
        7. Choose your character and the number of computer opponents.
        8. -
        9. Play freemonopolyfullversiondownloadforpc offline and have fun.
        10. -
        -

        How to update freemonopolyfullversiondownloadforpc?

        -

        If you want to update freemonopolyfullversiondownloadforpc, you can follow these steps:

        -
          -
        1. Go to this link: https://monopoly-plus.en.softonic.com/
        2. -
        3. Click on the Update button and wait for the file to be downloaded.
        4. -
        5. Open the downloaded file and follow the instructions to install the update on your PC.
        6. -
        7. Launch the game and enjoy playing freemonopolyfullversiondownloadforpc with the latest features and improvements.
        8. -
        -

        How to get help for freemonopolyfullversiondownloadforpc?

        -

        If you need help for freemonopolyfullversiondownloadforpc, you can contact some of these sources:

        -
          -
        • The game manual: You can find the game manual in the game folder or online at https://monopoly-plus.en.softonic.com/manual
        • -
        • The game support: You can contact the game support via email at support@monopoly-plus.com or via phone at +1-800-MONOPOLY
        • -
        • The game community: You can join and participate in the game community at https://monopoly-plus.en.softonic.com/community and find other players, tips, guides, and feedback.
        • -
        -

        Conclusion

        -

        In this article, we have shown you how to download and play freemonopolyfullversiondownloadforpc on your PC. We have also discussed some of the benefits and drawbacks of playing freemonopolyfullversiondownloadforpc. We have also provided you with some tips and tricks on how to play, install, update, and get help for freemonopolyfullversiondownloadforpc. We hope you found this article helpful and informative. If you have any questions or comments, please feel free to leave them below. Thank you for reading!

        3cee63e6c2
        -
        -
        \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (stand And Deliver 1988 Movie Free Download).md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (stand And Deliver 1988 Movie Free Download).md deleted file mode 100644 index f974f21d07f1f9ad3e79eaa2cae86c58a6bd0425..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (stand And Deliver 1988 Movie Free Download).md +++ /dev/null @@ -1,22 +0,0 @@ -
        -

        How to Watch Stand and Deliver (1988) Online for Free

        -

        Stand and Deliver is a 1988 biographical drama film based on the true story of Jaime Escalante, a high school teacher who inspired his dropout-prone students to learn calculus. The film stars Edward James Olmos as Escalante, Lou Diamond Phillips as Angel, and Andy Garcia as Ramirez. The film was nominated for an Academy Award for Best Actor for Olmos and won six Independent Spirit Awards, including Best Feature and Best Director.

        -

        If you want to watch Stand and Deliver online for free, you have several options. Here are some of the best ways to stream or download this inspiring movie:

        -

        HD Online Player (stand and deliver 1988 movie free download)


        Download Filehttps://urlgoal.com/2uCKg2



        -
          -
        • Amazon Prime Video: If you have an Amazon Prime membership, you can watch Stand and Deliver for free on Prime Video. You can also rent or buy the movie on Amazon if you don't have a Prime account.
        • -
        • Apple TV: You can rent or buy Stand and Deliver on Apple TV, which is compatible with various devices such as iPhone, iPad, Mac, Apple TV, and smart TVs. You can also watch the movie on the Apple TV app.
        • -
        • Google Play Movies: You can rent or buy Stand and Deliver on Google Play Movies, which is accessible on Android devices, Chromecast, Roku, smart TVs, and web browsers. You can also watch the movie on the Google Play Movies app.
        • -
        • YouTube: You can rent or buy Stand and Deliver on YouTube, which is available on almost any device with an internet connection. You can also watch the movie on the YouTube app.
        • -
        • Vudu: You can rent or buy Stand and Deliver on Vudu, which is a digital video store that offers a large selection of movies and TV shows. You can watch the movie on various devices such as smart TVs, Roku, Chromecast, PlayStation, Xbox, and web browsers.
        • -
        • Microsoft Store: You can rent or buy Stand and Deliver on Microsoft Store, which is a digital marketplace that sells movies, games, apps, and more. You can watch the movie on Windows devices, Xbox consoles, and web browsers.
        • -
        • Redbox: You can rent or buy Stand and Deliver on Redbox, which is a video rental service that operates kiosks and online streaming platforms. You can watch the movie on various devices such as smart TVs, Roku, Apple TV, Chromecast, PlayStation, Xbox, and web browsers.
        • -
        • DIRECTV: If you have a DIRECTV subscription, you can watch Stand and Deliver for free on DIRECTV. You can also rent or buy the movie on DIRECTV if you don't have a subscription.
        • -
        • AMC on Demand: If you have an AMC+ subscription, you can watch Stand and Deliver for free on AMC on Demand. You can also rent or buy the movie on AMC on Demand if you don't have a subscription.
        • -
        • M4uHD: If you want to watch Stand and Deliver for free without signing up or downloading anything, you can try M4uHD[^1^], which is a free online streaming website that offers HD quality movies. However, be careful of pop-up ads and malware that may harm your device.
        • -
        • YTS: If you want to download Stand and Deliver for free in HD quality with subtitles, you can try YTS[^2^], which is a torrent website that provides high-quality movies. However, be aware of the legal risks and potential viruses that may come with torrenting.
        • -
        • Internet Archive: If you want to download Stand and Deliver for free in SD quality without subtitles, you can try Internet Archive[^4^], which is a digital library that offers public domain movies. However, be mindful of the low quality and limited availability of some movies.
        • -
        -

        Stand and Deliver is a classic movie that showcases the power of education and perseverance. Whether you want to stream or download it online for free or pay for it, there are many options to choose from. Enjoy watching this inspiring film!

        d5da3c52bf
        -
        -
        \ No newline at end of file diff --git a/spaces/reimari/rvc-aa99/infer_pack/modules.py b/spaces/reimari/rvc-aa99/infer_pack/modules.py deleted file mode 100644 index 960481cedad9a6106f2bf0b9e86e82b120f7b33f..0000000000000000000000000000000000000000 --- a/spaces/reimari/rvc-aa99/infer_pack/modules.py +++ /dev/null @@ -1,522 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from infer_pack.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/apps/__init__.py b/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/apps/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/rinong/StyleGAN-NADA/util.py b/spaces/rinong/StyleGAN-NADA/util.py deleted file mode 100644 index 083b56170f5feb72eccfebd38a53aed70db32064..0000000000000000000000000000000000000000 --- a/spaces/rinong/StyleGAN-NADA/util.py +++ /dev/null @@ -1,136 +0,0 @@ -from matplotlib import pyplot as plt -import torch -import torch.nn.functional as F -import os -import dlib -from PIL import Image -import numpy as np -import scipy -import scipy.ndimage -import torchvision.transforms as transforms - -def display_image(image, size=None, mode='nearest', unnorm=False, title=''): - # image is [3,h,w] or [1,3,h,w] tensor [0,1] - if not isinstance(image, torch.Tensor): - image = transforms.ToTensor()(image).unsqueeze(0) - if image.is_cuda: - image = image.cpu() - if size is not None and image.size(-1) != size: - image = F.interpolate(image, size=(size,size), mode=mode) - if image.dim() == 4: - image = image[0] - image = image.permute(1, 2, 0).detach().numpy() - plt.figure() - plt.title(title) - plt.axis('off') - plt.imshow(image) - -def get_landmark(filepath, predictor): - """get landmark with dlib - :return: np.array shape=(68, 2) - """ - detector = dlib.get_frontal_face_detector() - - img = dlib.load_rgb_image(filepath) - dets = detector(img, 1) - assert len(dets) > 0, "Face not detected, try another face image" - - for k, d in enumerate(dets): - shape = predictor(img, d) - - t = list(shape.parts()) - a = [] - for tt in t: - a.append([tt.x, tt.y]) - lm = np.array(a) - return lm - -def align_face(filepath, predictor, output_size=256, transform_size=1024, enable_padding=True): - - """ - :param filepath: str - :return: PIL Image - """ - lm = get_landmark(filepath, predictor) - - lm_chin = lm[0: 17] # left-right - lm_eyebrow_left = lm[17: 22] # left-right - lm_eyebrow_right = lm[22: 27] # left-right - lm_nose = lm[27: 31] # top-down - lm_nostrils = lm[31: 36] # top-down - lm_eye_left = lm[36: 42] # left-clockwise - lm_eye_right = lm[42: 48] # left-clockwise - lm_mouth_outer = lm[48: 60] # left-clockwise - lm_mouth_inner = lm[60: 68] # left-clockwise - - # Calculate auxiliary vectors. - eye_left = np.mean(lm_eye_left, axis=0) - eye_right = np.mean(lm_eye_right, axis=0) - eye_avg = (eye_left + eye_right) * 0.5 - eye_to_eye = eye_right - eye_left - mouth_left = lm_mouth_outer[0] - mouth_right = lm_mouth_outer[6] - mouth_avg = (mouth_left + mouth_right) * 0.5 - eye_to_mouth = mouth_avg - eye_avg - - # Choose oriented crop rectangle. - x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] - x /= np.hypot(*x) - x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) - y = np.flipud(x) * [-1, 1] - c = eye_avg + eye_to_mouth * 0.1 - quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) - qsize = np.hypot(*x) * 2 - - # read image - img = Image.open(filepath) - - transform_size = output_size - enable_padding = True - - # Shrink. - shrink = int(np.floor(qsize / output_size * 0.5)) - if shrink > 1: - rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink))) - img = img.resize(rsize, Image.ANTIALIAS) - quad /= shrink - qsize /= shrink - - # Crop. - border = max(int(np.rint(qsize * 0.1)), 3) - crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), - int(np.ceil(max(quad[:, 1])))) - crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), - min(crop[3] + border, img.size[1])) - if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]: - img = img.crop(crop) - quad -= crop[0:2] - - # Pad. - pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), - int(np.ceil(max(quad[:, 1])))) - pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), - max(pad[3] - img.size[1] + border, 0)) - if enable_padding and max(pad) > border - 4: - pad = np.maximum(pad, int(np.rint(qsize * 0.3))) - img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect') - h, w, _ = img.shape - y, x, _ = np.ogrid[:h, :w, :1] - mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]), - 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3])) - blur = qsize * 0.02 - img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) - img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0) - img = Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB') - quad += pad[:2] - - # Transform. - img = img.transform((transform_size, transform_size), Image.QUAD, (quad + 0.5).flatten(), Image.BILINEAR) - if output_size < transform_size: - img = img.resize((output_size, output_size), Image.ANTIALIAS) - - # Return aligned image. - return img - -def strip_path_extension(path): - return os.path.splitext(path)[0] \ No newline at end of file diff --git a/spaces/ritikjain51/pdf-question-answering/app.py b/spaces/ritikjain51/pdf-question-answering/app.py deleted file mode 100644 index aaa408778efb8132671e16145a52852aebee4b71..0000000000000000000000000000000000000000 --- a/spaces/ritikjain51/pdf-question-answering/app.py +++ /dev/null @@ -1,131 +0,0 @@ -import logging -import os - -import gradio as gr -from langchain import OpenAI, HuggingFaceHub -from langchain.chains import ConversationalRetrievalChain -from langchain.document_loaders import PyPDFLoader -from langchain.embeddings import OpenAIEmbeddings, HuggingFaceEmbeddings -from langchain.text_splitter import CharacterTextSplitter -from langchain.vectorstores import Chroma - -documents = [] -qa = None - - -def get_file(file): - try: - global documents - data = PyPDFLoader(file.name) - documents = data.load_and_split(CharacterTextSplitter(chunk_size=2000, chunk_overlap=0)) - except Exception as e: - logging.error(e, exc_info=True) - return "Failed to upload." - return "File Uploaded." - - -def model_configuration(model_name, api_key=None, hug_model=None, hug_token=None, temperature=0, max_length=512): - try: - embeddings, llm = None, None - if not documents: - return gr.update(value="Please upload correct PDF!", visible=True) - global qa - if model_name == "OpenAI": - os.environ["OPENAI_API_KEY"] = api_key or os.getenv("OPENAI_API_KEY") - embeddings = OpenAIEmbeddings() - llm = OpenAI(temperature=temperature, max_tokens=max_length) - elif model_name == "HuggingFace": - os.environ["HUGGINGFACEHUB_API_TOKEN"] = hug_token or os.getenv("HUGGINGFACE_API_KEY") - embeddings = HuggingFaceEmbeddings(model_name=hug_model, model_kwargs={'device': 'cpu'}) - llm = HuggingFaceHub(repo_id=hug_model, model_kwargs={"temperature": temperature, "max_length": max_length}) - - if embeddings: - db = Chroma.from_documents(documents, embeddings) - retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": 2}) - qa = ConversationalRetrievalChain.from_llm(llm, chain_type="map_reduce", retriever=retriever, - return_source_documents=True, verbose=False) - except Exception as e: - logging.error(e, exc_info=True) - return gr.update(value="Error occurred!", visible=True) - return gr.update(value="Model Built", visible=True) - - -def response(msg, chat_history): - global qa - result = qa({"question": msg, "chat_history": map(tuple, chat_history)}) - final_resp = result.get("answer", "").strip() - chat_history.append((msg, final_resp)) - docs = result.get("source_documents") - return "", chat_history - - -with gr.Blocks() as demo: - with gr.Tab("PDF Ingestion") as pdf_input: - file = None - with gr.Column() as r1: - file = gr.File(file_types=[".pdf"]) - op_txt = gr.Label(value="", label="") - fl_btn = gr.Button("Upload & Ingest 🚀") - fl_btn.click(get_file, inputs=[file], outputs=op_txt) - - with gr.Tab("Select Model") as model: - model_name = gr.Dropdown( - ["NA", "OpenAI", "HuggingFace"], - show_label=True, - label="Model Name", - multiselect=False, - value="NA" - ) - with gr.Column(visible=False) as openai_config: - api_key = gr.Textbox(value="", label="OPENAI API KEY", placeholder="sk-...", visible=True, interactive=True, - type="password") - - with gr.Column(visible=False) as huggy_config: - hug_model = gr.Dropdown(["google/flan-t5-xl"], - value="google/flan-t5-xl", multiselect=False) - hug_token = gr.Textbox(value="", placeholder="hf_...", interactive=True, type="password") - - with gr.Accordion("Advance Settings", open=False, visible=False) as advance_settings: - temperature = gr.Slider(0, 1, label="Temperature") - max_length = gr.components.Number(value=512, label="Max Token Length") - - - def show_configuration(model_name): - match model_name: - case "OpenAI": - return { - openai_config: gr.update(visible=True), - huggy_config: gr.update(visible=False), - advance_settings: gr.update(visible=True) - } - case "HuggingFace": - return { - openai_config: gr.update(visible=False), - huggy_config: gr.update(visible=True), - advance_settings: gr.update(visible=True) - } - case _: - return { - openai_config: gr.update(visible=False), - huggy_config: gr.update(visible=False), - advance_settings: gr.update(visible=False) - } - - - model_name.change(show_configuration, inputs=[model_name], - outputs=[openai_config, huggy_config, advance_settings]) - model_updated = gr.Label("", show_label=False, visible=True) - btn = gr.Button("Configure Model 🤖") - btn.click(model_configuration, inputs=[model_name, api_key, hug_model, hug_token, temperature, max_length], - outputs=model_updated) - - with gr.Tab("Q&A") as qna: - with gr.Column() as r: - chatbot = gr.Chatbot(show_label=True) - msg = gr.Textbox(placeholder="Ask Something") - clear = gr.Button("Clear") - msg.submit(response, [msg, chatbot], [msg, chatbot]) - clear.click(lambda: None, None, chatbot, queue=False) - -if __name__ == "__main__": - demo.launch() diff --git a/spaces/robin0307/MMOCR/configs/_base_/recog_pipelines/crnn_tps_pipeline.py b/spaces/robin0307/MMOCR/configs/_base_/recog_pipelines/crnn_tps_pipeline.py deleted file mode 100644 index 3a2eea55a739206c11ae876ba82e9c2f6ea1ff6d..0000000000000000000000000000000000000000 --- a/spaces/robin0307/MMOCR/configs/_base_/recog_pipelines/crnn_tps_pipeline.py +++ /dev/null @@ -1,37 +0,0 @@ -img_norm_cfg = dict(mean=[0.5], std=[0.5]) - -train_pipeline = [ - dict(type='LoadImageFromFile', color_type='grayscale'), - dict( - type='ResizeOCR', - height=32, - min_width=100, - max_width=100, - keep_aspect_ratio=False), - dict(type='ToTensorOCR'), - dict(type='NormalizeOCR', **img_norm_cfg), - dict( - type='Collect', - keys=['img'], - meta_keys=[ - 'filename', 'ori_shape', 'resize_shape', 'text', 'valid_ratio' - ]), -] -test_pipeline = [ - dict(type='LoadImageFromFile', color_type='grayscale'), - dict( - type='ResizeOCR', - height=32, - min_width=32, - max_width=100, - keep_aspect_ratio=False), - dict(type='ToTensorOCR'), - dict(type='NormalizeOCR', **img_norm_cfg), - dict( - type='Collect', - keys=['img'], - meta_keys=[ - 'filename', 'ori_shape', 'resize_shape', 'valid_ratio', - 'img_norm_cfg', 'ori_filename', 'img_shape' - ]), -] diff --git a/spaces/roborovski/Diffusle/app.py b/spaces/roborovski/Diffusle/app.py deleted file mode 100644 index 54318a9cce7bebb8b1a765ef4c4c61f6a03fd067..0000000000000000000000000000000000000000 --- a/spaces/roborovski/Diffusle/app.py +++ /dev/null @@ -1,282 +0,0 @@ -# -*- coding: utf-8 -*- -"""diffusle prompt testing - stable diffusion -Automatically generated by Colaboratory. -Original file is located at - https://colab.research.google.com/drive/1EiL1cvF84u4f9_U3mw_1xofHIJG3r-YR -""" - -from asyncore import file_dispatcher -from collections import defaultdict -from email.policy import default -from functools import lru_cache -from transformers import TFGPT2LMHeadModel, GPT2Tokenizer -import random -import pandas -import urllib.request -from sentence_transformers import SentenceTransformer, util -import os -import discord -import random -from discord.ext import commands, tasks -import io -import math -from diffusers import DiffusionPipeline, LDMTextToImagePipeline -import torch -import configparser -import pickle -from torch import autocast -from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler -from transformers import CLIPTextModel, CLIPTokenizer -from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler -from diffusers import LMSDiscreteScheduler -from PIL import Image -import torch -import random -import time -import gradio as gr -import itertools -from tqdm import tqdm -from PIL import Image - -torch_device = 'cuda' if torch.cuda.is_available() else 'cpu' - -access_token = os.getenv("HF_TOKEN") - -tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", use_auth_token=access_token) -text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", use_auth_token=access_token) -vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae", use_auth_token=access_token) -unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet", use_auth_token=access_token) - -scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000) - -vae = vae.to(torch_device) -text_encoder = text_encoder.to(torch_device) -unet = unet.to(torch_device) - - -prompts = [] -with open('prompts.csv', 'r') as prompt_file: - prompts = [line.rstrip('\n') for line in prompt_file] - -print('prompts', prompts) - -t = 1000 * time.time() -torch.manual_seed(t) -random.seed(t) - -inf_range = [1,2,3,5,7,10] -guidance_range = [5, 5, 5, 7.5, 7.5, 7.5] -generator = torch.manual_seed(t) -height = 512 -width = 512 -guidance_scale = 7.5 - - -def image_grid(imgs, rows=3, cols=3): - assert len(imgs) == rows*cols - - w, h = imgs[0].size - grid = Image.new('RGB', size=(cols*w, rows*h)) - grid_w, grid_h = grid.size - - for i, img in enumerate(imgs): - grid.paste(img, box=(i%cols*w, i//cols*h)) - return grid - -def latents_to_pil(latents, vae): - # bath of latents -> list of images - latents = (1 / 0.18215) * latents - with torch.no_grad(): - image = vae.decode(latents)[0] - image = (image / 2 + 0.5).clamp(0, 1) - image = image.detach().cpu().permute(0, 2, 3, 1).numpy() - images = (image * 255).round().astype("uint8") - pil_images = [Image.fromarray(image) for image in images] - return pil_images - -use_gradio = True - -batch_size = 1 - -def get_prompt_embedding(prompt): - text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") - text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] - max_length = text_input.input_ids.shape[-1] - uncond_input = tokenizer( - [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" - ) - # generate latent embeddings - with torch.no_grad(): - uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] - text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) - - return uncond_embeddings, text_embeddings - -inference_steps = 15 - -def setup(): - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000) - scheduler.set_timesteps(inference_steps) - generator = torch.manual_seed(torch.random.initial_seed()) - latents = torch.randn( - (batch_size, unet.in_channels, height // 8, width // 8), - generator=generator, - ) - latents = latents.to(torch_device) - latents = latents * scheduler.sigmas[0] - return scheduler, latents - - -w2v_model = None - -def levenshtein( s1, s2): - s1_len = len(s1) - s2_len = len(s2) - d = [[0 for x in range(s1_len + 1)] for y in range(s2_len + 1)] - - for i in range(1, s2_len + 1): - d[i][0] = i - - for j in range(1, s1_len + 1): - d[0][j] = j - - for j in range(1, s1_len + 1): - for i in range(1, s2_len + 1): - if s1[j - 1] is s2[i - 1]: - delta = 0 - else: - delta = 1 - - d[i][j] = min(d[i - 1][j] + 1, - d[i][j - 1] + 1, - d[i - 1][j - 1] + delta) - - return d[s2_len][s1_len] - -if not w2v_model: - print("loading word2vec model") - w2v_model = SentenceTransformer('all-MiniLM-L6-v2') - - -def score_guess(guess, prompt): - global w2v_model - #Compute embedding for both lists - prompt_emb = w2v_model.encode([prompt], convert_to_tensor=True) - guess_emb = w2v_model.encode([guess], convert_to_tensor=True) - - #Compute cosine-similarities - cosine_scores = util.cos_sim(prompt_emb, guess_emb) - word2vec_score = cosine_scores[0][0] - levenshtein_score = 1 - (levenshtein(prompt, guess) / 100) - w2v_bias = 0.5 - biases = (w2v_bias, 1-w2v_bias) - total = (word2vec_score * biases[0] + levenshtein_score * biases[1]) / 2 - total = float(word2vec_score) - return total - -def init_state(): - step = 0 - prompt = random.choice(prompts) - uncond_embeddings, text_embeddings = get_prompt_embedding(prompt) - scheduler, latents = setup() - steps = scheduler.timesteps - return { 'step': step, 'prompt': prompt, 'latents': latents, 'scheduler': scheduler, 'text_embeddings': text_embeddings } - -default_guess_text = "Press Submit to generate the first image." -win_threshold = 0.65 - -with gr.Blocks() as interface: - - with gr.Row() as row: - with gr.Column(): - guess_input = gr.Textbox(label="Your Guess", value=default_guess_text, max_lines=1) - submit_btn = gr.Button("Submit") - with gr.Column(): - img = gr.Image(label="Image to guess") - score_output = gr.Text(label="Output") - - def guess(guess, state): - - if guess == default_guess_text: - guess_input.value = "" - - with autocast("cuda"): - if not state: - print('init state') - step = 0 - prompt = random.choice(prompts) - uncond_embeddings, text_embeddings = get_prompt_embedding(prompt) - scheduler, latents = setup() - steps = scheduler.timesteps - else: - print('increment state') - step = state['step'] - prompt = state['prompt'] - scheduler = state['scheduler'] - text_embeddings = state['text_embeddings'] - latents = state['latents'] - steps = scheduler.timesteps - - final_step = step == len(scheduler.timesteps) - 1 - if step > len(steps) - 1 or final_step: - step = inference_steps - 1 - - latent_model_input = torch.cat([latents] * 2) - sigma = scheduler.sigmas[step] - latent_model_input = latent_model_input / ((sigma**2 + 1) ** 0.5) - print(step, steps[step], prompt) - - - with torch.no_grad(): - noise_pred = unet(latent_model_input, steps[step], encoder_hidden_states=text_embeddings)["sample"] - - # perform guidance - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - latents_x0 = latents - sigma * noise_pred - - # compute the previous noisy sample x_t -> x_t-1 - latents = scheduler.step(noise_pred, step, latents)["prev_sample"] - denoised_img = latents_to_pil(latents_x0, vae)[0] - # display(denoised_img) - - score = score_guess(guess, prompt) - score = round(float(score), 2) - fmt_score = (score / win_threshold) * 100 - fmt_score = round(float(fmt_score), 2) - - result_text = f"Score: {str(fmt_score)}% Guesses: {step}" - - if guess == "": - score = 0 - result_text = "No guess submitted, no score" - - if guess == default_guess_text: - score = 0 - result_text = "Game on!" - - if len(guess) > 250: - score = 0 - result_text = "Guess is too long!" - - if score > win_threshold: - if final_step: - result_text = "Ran out of guesses!" - else: - result_text = f"You win! You are poggers! the prompt was {prompt}, you guesssed it in {step} guesses. To play again press Submit" - scheduler, latents = setup() - step = 0 - prompt = random.choice(prompts) - uncond_embeddings, text_embeddings = get_prompt_embedding(prompt) - else: - step += 1 - - print(prompt, step) - - return denoised_img, result_text, { 'step': step, 'prompt': prompt, 'latents': latents, 'scheduler': scheduler, 'text_embeddings': text_embeddings } - - state = gr.State() - submit_btn.click(guess, inputs=[guess_input, state], outputs=[img, score_output, state]) - -interface.launch() \ No newline at end of file diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/roi_heads/sparse_roi_head.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/roi_heads/sparse_roi_head.py deleted file mode 100644 index 2613469e3a7cf397f19c04b24c43ab50b0c75551..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/roi_heads/sparse_roi_head.py +++ /dev/null @@ -1,424 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch - -from mmdet.core import bbox2result, bbox2roi, bbox_xyxy_to_cxcywh -from mmdet.core.bbox.samplers import PseudoSampler -from ..builder import HEADS -from .cascade_roi_head import CascadeRoIHead - - -@HEADS.register_module() -class SparseRoIHead(CascadeRoIHead): - r"""The RoIHead for `Sparse R-CNN: End-to-End Object Detection with - Learnable Proposals `_ - and `Instances as Queries `_ - - Args: - num_stages (int): Number of stage whole iterative process. - Defaults to 6. - stage_loss_weights (Tuple[float]): The loss - weight of each stage. By default all stages have - the same weight 1. - bbox_roi_extractor (dict): Config of box roi extractor. - mask_roi_extractor (dict): Config of mask roi extractor. - bbox_head (dict): Config of box head. - mask_head (dict): Config of mask head. - train_cfg (dict, optional): Configuration information in train stage. - Defaults to None. - test_cfg (dict, optional): Configuration information in test stage. - Defaults to None. - pretrained (str, optional): model pretrained path. Default: None - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - - """ - - def __init__(self, - num_stages=6, - stage_loss_weights=(1, 1, 1, 1, 1, 1), - proposal_feature_channel=256, - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict( - type='RoIAlign', output_size=7, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_roi_extractor=None, - bbox_head=dict( - type='DIIHead', - num_classes=80, - num_fcs=2, - num_heads=8, - num_cls_fcs=1, - num_reg_fcs=3, - feedforward_channels=2048, - hidden_channels=256, - dropout=0.0, - roi_feat_size=7, - ffn_act_cfg=dict(type='ReLU', inplace=True)), - mask_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - assert bbox_roi_extractor is not None - assert bbox_head is not None - assert len(stage_loss_weights) == num_stages - self.num_stages = num_stages - self.stage_loss_weights = stage_loss_weights - self.proposal_feature_channel = proposal_feature_channel - super(SparseRoIHead, self).__init__( - num_stages, - stage_loss_weights, - bbox_roi_extractor=bbox_roi_extractor, - mask_roi_extractor=mask_roi_extractor, - bbox_head=bbox_head, - mask_head=mask_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained, - init_cfg=init_cfg) - # train_cfg would be None when run the test.py - if train_cfg is not None: - for stage in range(num_stages): - assert isinstance(self.bbox_sampler[stage], PseudoSampler), \ - 'Sparse R-CNN and QueryInst only support `PseudoSampler`' - - def _bbox_forward(self, stage, x, rois, object_feats, img_metas): - """Box head forward function used in both training and testing. Returns - all regression, classification results and a intermediate feature. - - Args: - stage (int): The index of current stage in - iterative process. - x (List[Tensor]): List of FPN features - rois (Tensor): Rois in total batch. With shape (num_proposal, 5). - the last dimension 5 represents (img_index, x1, y1, x2, y2). - object_feats (Tensor): The object feature extracted from - the previous stage. - img_metas (dict): meta information of images. - - Returns: - dict[str, Tensor]: a dictionary of bbox head outputs, - Containing the following results: - - - cls_score (Tensor): The score of each class, has - shape (batch_size, num_proposals, num_classes) - when use focal loss or - (batch_size, num_proposals, num_classes+1) - otherwise. - - decode_bbox_pred (Tensor): The regression results - with shape (batch_size, num_proposal, 4). - The last dimension 4 represents - [tl_x, tl_y, br_x, br_y]. - - object_feats (Tensor): The object feature extracted - from current stage - - detach_cls_score_list (list[Tensor]): The detached - classification results, length is batch_size, and - each tensor has shape (num_proposal, num_classes). - - detach_proposal_list (list[tensor]): The detached - regression results, length is batch_size, and each - tensor has shape (num_proposal, 4). The last - dimension 4 represents [tl_x, tl_y, br_x, br_y]. - """ - num_imgs = len(img_metas) - bbox_roi_extractor = self.bbox_roi_extractor[stage] - bbox_head = self.bbox_head[stage] - bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], - rois) - cls_score, bbox_pred, object_feats, attn_feats = bbox_head( - bbox_feats, object_feats) - proposal_list = self.bbox_head[stage].refine_bboxes( - rois, - rois.new_zeros(len(rois)), # dummy arg - bbox_pred.view(-1, bbox_pred.size(-1)), - [rois.new_zeros(object_feats.size(1)) for _ in range(num_imgs)], - img_metas) - bbox_results = dict( - cls_score=cls_score, - decode_bbox_pred=torch.cat(proposal_list), - object_feats=object_feats, - attn_feats=attn_feats, - # detach then use it in label assign - detach_cls_score_list=[ - cls_score[i].detach() for i in range(num_imgs) - ], - detach_proposal_list=[item.detach() for item in proposal_list]) - - return bbox_results - - def _mask_forward(self, stage, x, rois, attn_feats): - """Mask head forward function used in both training and testing.""" - mask_roi_extractor = self.mask_roi_extractor[stage] - mask_head = self.mask_head[stage] - mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], - rois) - # do not support caffe_c4 model anymore - mask_pred = mask_head(mask_feats, attn_feats) - - mask_results = dict(mask_pred=mask_pred) - return mask_results - - def _mask_forward_train(self, stage, x, attn_feats, sampling_results, - gt_masks, rcnn_train_cfg): - """Run forward function and calculate loss for mask head in - training.""" - pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) - attn_feats = torch.cat([ - feats[res.pos_inds] - for (feats, res) in zip(attn_feats, sampling_results) - ]) - mask_results = self._mask_forward(stage, x, pos_rois, attn_feats) - - mask_targets = self.mask_head[stage].get_targets( - sampling_results, gt_masks, rcnn_train_cfg) - - pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) - - loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'], - mask_targets, pos_labels) - mask_results.update(loss_mask) - return mask_results - - def forward_train(self, - x, - proposal_boxes, - proposal_features, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - imgs_whwh=None, - gt_masks=None): - """Forward function in training stage. - - Args: - x (list[Tensor]): list of multi-level img features. - proposals (Tensor): Decoded proposal bboxes, has shape - (batch_size, num_proposals, 4) - proposal_features (Tensor): Expanded proposal - features, has shape - (batch_size, num_proposals, proposal_feature_channel) - img_metas (list[dict]): list of image info dict where - each dict has: 'img_shape', 'scale_factor', 'flip', - and may also contain 'filename', 'ori_shape', - 'pad_shape', and 'img_norm_cfg'. For details on the - values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - imgs_whwh (Tensor): Tensor with shape (batch_size, 4), - the dimension means - [img_width,img_height, img_width, img_height]. - gt_masks (None | Tensor) : true segmentation masks for each box - used if the architecture supports a segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components of all stage. - """ - - num_imgs = len(img_metas) - num_proposals = proposal_boxes.size(1) - imgs_whwh = imgs_whwh.repeat(1, num_proposals, 1) - all_stage_bbox_results = [] - proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))] - object_feats = proposal_features - all_stage_loss = {} - for stage in range(self.num_stages): - rois = bbox2roi(proposal_list) - bbox_results = self._bbox_forward(stage, x, rois, object_feats, - img_metas) - all_stage_bbox_results.append(bbox_results) - if gt_bboxes_ignore is None: - # TODO support ignore - gt_bboxes_ignore = [None for _ in range(num_imgs)] - sampling_results = [] - cls_pred_list = bbox_results['detach_cls_score_list'] - proposal_list = bbox_results['detach_proposal_list'] - for i in range(num_imgs): - normalize_bbox_ccwh = bbox_xyxy_to_cxcywh(proposal_list[i] / - imgs_whwh[i]) - assign_result = self.bbox_assigner[stage].assign( - normalize_bbox_ccwh, cls_pred_list[i], gt_bboxes[i], - gt_labels[i], img_metas[i]) - sampling_result = self.bbox_sampler[stage].sample( - assign_result, proposal_list[i], gt_bboxes[i]) - sampling_results.append(sampling_result) - bbox_targets = self.bbox_head[stage].get_targets( - sampling_results, gt_bboxes, gt_labels, self.train_cfg[stage], - True) - cls_score = bbox_results['cls_score'] - decode_bbox_pred = bbox_results['decode_bbox_pred'] - - single_stage_loss = self.bbox_head[stage].loss( - cls_score.view(-1, cls_score.size(-1)), - decode_bbox_pred.view(-1, 4), - *bbox_targets, - imgs_whwh=imgs_whwh) - - if self.with_mask: - mask_results = self._mask_forward_train( - stage, x, bbox_results['attn_feats'], sampling_results, - gt_masks, self.train_cfg[stage]) - single_stage_loss['loss_mask'] = mask_results['loss_mask'] - - for key, value in single_stage_loss.items(): - all_stage_loss[f'stage{stage}_{key}'] = value * \ - self.stage_loss_weights[stage] - object_feats = bbox_results['object_feats'] - - return all_stage_loss - - def simple_test(self, - x, - proposal_boxes, - proposal_features, - img_metas, - imgs_whwh, - rescale=False): - """Test without augmentation. - - Args: - x (list[Tensor]): list of multi-level img features. - proposal_boxes (Tensor): Decoded proposal bboxes, has shape - (batch_size, num_proposals, 4) - proposal_features (Tensor): Expanded proposal - features, has shape - (batch_size, num_proposals, proposal_feature_channel) - img_metas (dict): meta information of images. - imgs_whwh (Tensor): Tensor with shape (batch_size, 4), - the dimension means - [img_width,img_height, img_width, img_height]. - rescale (bool): If True, return boxes in original image - space. Defaults to False. - - Returns: - list[list[np.ndarray]] or list[tuple]: When no mask branch, - it is bbox results of each image and classes with type - `list[list[np.ndarray]]`. The outer list - corresponds to each image. The inner list - corresponds to each class. When the model has a mask branch, - it is a list[tuple] that contains bbox results and mask results. - The outer list corresponds to each image, and first element - of tuple is bbox results, second element is mask results. - """ - assert self.with_bbox, 'Bbox head must be implemented.' - # Decode initial proposals - num_imgs = len(img_metas) - proposal_list = [proposal_boxes[i] for i in range(num_imgs)] - ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - - object_feats = proposal_features - if all([proposal.shape[0] == 0 for proposal in proposal_list]): - # There is no proposal in the whole batch - bbox_results = [[ - np.zeros((0, 5), dtype=np.float32) - for i in range(self.bbox_head[-1].num_classes) - ]] * num_imgs - return bbox_results - - for stage in range(self.num_stages): - rois = bbox2roi(proposal_list) - bbox_results = self._bbox_forward(stage, x, rois, object_feats, - img_metas) - object_feats = bbox_results['object_feats'] - cls_score = bbox_results['cls_score'] - proposal_list = bbox_results['detach_proposal_list'] - - if self.with_mask: - rois = bbox2roi(proposal_list) - mask_results = self._mask_forward(stage, x, rois, - bbox_results['attn_feats']) - mask_results['mask_pred'] = mask_results['mask_pred'].reshape( - num_imgs, -1, *mask_results['mask_pred'].size()[1:]) - - num_classes = self.bbox_head[-1].num_classes - det_bboxes = [] - det_labels = [] - - if self.bbox_head[-1].loss_cls.use_sigmoid: - cls_score = cls_score.sigmoid() - else: - cls_score = cls_score.softmax(-1)[..., :-1] - - for img_id in range(num_imgs): - cls_score_per_img = cls_score[img_id] - scores_per_img, topk_indices = cls_score_per_img.flatten( - 0, 1).topk( - self.test_cfg.max_per_img, sorted=False) - labels_per_img = topk_indices % num_classes - bbox_pred_per_img = proposal_list[img_id][topk_indices // - num_classes] - if rescale: - scale_factor = img_metas[img_id]['scale_factor'] - bbox_pred_per_img /= bbox_pred_per_img.new_tensor(scale_factor) - det_bboxes.append( - torch.cat([bbox_pred_per_img, scores_per_img[:, None]], dim=1)) - det_labels.append(labels_per_img) - - bbox_results = [ - bbox2result(det_bboxes[i], det_labels[i], num_classes) - for i in range(num_imgs) - ] - - if self.with_mask: - if rescale and not isinstance(scale_factors[0], float): - scale_factors = [ - torch.from_numpy(scale_factor).to(det_bboxes[0].device) - for scale_factor in scale_factors - ] - _bboxes = [ - det_bboxes[i][:, :4] * - scale_factors[i] if rescale else det_bboxes[i][:, :4] - for i in range(len(det_bboxes)) - ] - segm_results = [] - mask_pred = mask_results['mask_pred'] - for img_id in range(num_imgs): - mask_pred_per_img = mask_pred[img_id].flatten(0, - 1)[topk_indices] - mask_pred_per_img = mask_pred_per_img[:, None, ...].repeat( - 1, num_classes, 1, 1) - segm_result = self.mask_head[-1].get_seg_masks( - mask_pred_per_img, _bboxes[img_id], det_labels[img_id], - self.test_cfg, ori_shapes[img_id], scale_factors[img_id], - rescale) - segm_results.append(segm_result) - - if self.with_mask: - results = list(zip(bbox_results, segm_results)) - else: - results = bbox_results - - return results - - def aug_test(self, features, proposal_list, img_metas, rescale=False): - raise NotImplementedError( - 'Sparse R-CNN and QueryInst does not support `aug_test`') - - def forward_dummy(self, x, proposal_boxes, proposal_features, img_metas): - """Dummy forward function when do the flops computing.""" - all_stage_bbox_results = [] - proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))] - object_feats = proposal_features - if self.with_bbox: - for stage in range(self.num_stages): - rois = bbox2roi(proposal_list) - bbox_results = self._bbox_forward(stage, x, rois, object_feats, - img_metas) - - all_stage_bbox_results.append((bbox_results, )) - proposal_list = bbox_results['detach_proposal_list'] - object_feats = bbox_results['object_feats'] - - if self.with_mask: - rois = bbox2roi(proposal_list) - mask_results = self._mask_forward( - stage, x, rois, bbox_results['attn_feats']) - all_stage_bbox_results[-1] += (mask_results, ) - return all_stage_bbox_results diff --git a/spaces/rorallitri/biomedical-language-models/logs/Bonnie And McCloud 720p.md b/spaces/rorallitri/biomedical-language-models/logs/Bonnie And McCloud 720p.md deleted file mode 100644 index 22e8f70e027786a5aeda58d187a25f4517f54b06..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Bonnie And McCloud 720p.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Bonnie and McCloud 720p


        DOWNLOADhttps://tinurll.com/2uznPx



        -
        - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/rorallitri/biomedical-language-models/logs/Catia V5r21 Portable Torrent.md b/spaces/rorallitri/biomedical-language-models/logs/Catia V5r21 Portable Torrent.md deleted file mode 100644 index 52637fc7c6ea8e4af9d9e39f4b3b35afd211d621..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Catia V5r21 Portable Torrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Catia V5r21 Portable Torrent


        Download ⚙⚙⚙ https://tinurll.com/2uzmJm



        - -... copies it to the nodelock directory of the portable Configure the DS License Server 1. ... Apr 25, 2019 · Click Start Server. install DS License Server to run CATIA Post by ... the called DSLS-SSQ) allows to run CATIA V5R21, CATIA V5-6R2012 (aka. ... The desktop Solidworks 2019 Torrent Solidsquad apps will attempt to ... 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/rorallitri/biomedical-language-models/logs/Easeus Data Recovery Full Version Keygen.md b/spaces/rorallitri/biomedical-language-models/logs/Easeus Data Recovery Full Version Keygen.md deleted file mode 100644 index 1610a515ff242746727146284caf3d95e2dabc3b..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Easeus Data Recovery Full Version Keygen.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Easeus data recovery full version keygen


        Download File ✸✸✸ https://tinurll.com/2uzm9o



        - -Jump to EaseUS Data Recovery Wizard 13 Keygen + Crack {Latest ... — EaseUS Data Recovery Wizard 13 Keygen + Crack Latest Version. Overall, ... 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/rorallitri/biomedical-language-models/logs/F1 2010 Pc 101 Crack [Extra Quality] 22.md b/spaces/rorallitri/biomedical-language-models/logs/F1 2010 Pc 101 Crack [Extra Quality] 22.md deleted file mode 100644 index 073361e2a1f6129f21bf02f600ca4e7da61149a2..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/F1 2010 Pc 101 Crack [Extra Quality] 22.md +++ /dev/null @@ -1,6 +0,0 @@ -

        F1 2010 Pc 101 Crack 22


        Download File ○○○ https://tinurll.com/2uznNq



        - -This is the official Formula 1 Twitch channel. Here you can watch live race streams and F1 highlights from around the globe. It has 16 official broadcasters and over 1300 unofficial. 24 Oct 2010 THE BRITISH GP - A 'nightmare' race for Red Bull, their least successful of the season and the first race they could not clinch the drivers' championship. The four-time world champions broke down in the first corner, leading to one of the most bizarre and controversial races in Grand Prix history. 14 Nov 2009 Formula 1 is like a nervous stomach, it only runs when the crisis is over. The race was one of the more boring ones of the year. It was full of red flags and the long red flag period forced the race to drag on for over three hours. No team could build up a lead, Red Bull appeared to be hopeless. We saw Vettel die like a fly. Vettel then ran off the track, but could not be seen until the race had finished. This happens to your drivers. Button and Raikkonen's race ended like this. Race ends for lack of overtaking. Stewart's win was far more interesting. The race ended due to fuel. As this was the last race at Bahrain, the teams had to make their final run on their final piece of fuel before returning to the hotel for the night. McLaren blew up. Oh boy, the McLaren disaster. In the last race at Malaysia, the McLaren team ran out of fuel. Will we see more of this in 2010? As the cars pulled into the pits, it all blew up. There was a huge explosion at the back of the pack. One of the McLaren's engine blew up as the cars pulled into the pits. A huge explosion was heard. Fans were stunned as a massive fireball erupted in the middle of the track. It felt like an earthquake. How on earth are they going to fix this? The McLaren race car caught fire in the pits during the race and eventually exploded. McLaren ended the race with just two cars in the garage for repairs, as the other car was out on track. The whole race could have ended in disaster for McLaren. The team started the race on a high with a great start and a great position in the race. After the first few corners, however, they started to fall back, and the race could have turned out very differently. This is the end of their 2009 season. But maybe they will be back for 2011. He finished in third place, behind Webber and Hamilton. But it could 4fefd39f24
        -
        -
        -

        diff --git a/spaces/rorallitri/biomedical-language-models/logs/Humshakal Movie In Hindi Download 720p Hd.md b/spaces/rorallitri/biomedical-language-models/logs/Humshakal Movie In Hindi Download 720p Hd.md deleted file mode 100644 index dd90939b4e7aeb7e146d3cf88419d5ddf5833d31..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Humshakal Movie In Hindi Download 720p Hd.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Humshakal Movie In Hindi Download 720p Hd


        DOWNLOAD ✒ ✒ ✒ https://tinurll.com/2uzozE



        - - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/rorallitri/biomedical-language-models/logs/Lost Grimoires Collection Torrent Download _VERIFIED_.md b/spaces/rorallitri/biomedical-language-models/logs/Lost Grimoires Collection Torrent Download _VERIFIED_.md deleted file mode 100644 index b1d89382c5f0aa2de55ab583e4d1d1b280a29f08..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Lost Grimoires Collection Torrent Download _VERIFIED_.md +++ /dev/null @@ -1,10 +0,0 @@ - -

        the library’s history goes back over 300 years. the building dates from 1760, when it was designed as the town’s first library. it survived two fires and a great flood of 1927; today it is the only remaining royal library in the kingdom. the library has a rich collection of books on all these subject and will happily assist you with finding information for any of these.

        -

        a collection is a collection of items of the same type or quality. thus, a collection of iron bits is called an iron collection. in the case of a library collection, each book is of the same type and is so collected. in the case of a collection of iron bits, each bit is of the same quality, and is so collected. a collection of iron and steel bits would therefore be a collection of iron and steel items, the former being a collection of the latter.

        -

        Lost Grimoires Collection Torrent Download


        Download File ✺✺✺ https://tinurll.com/2uzmuM



        -

        lost grimoires: stolen kingdom key features • hidden object puzzle game • unearth fascinating insights about the lost kingdom and its ancient secrets • become an alchemist to solve the mysteries of the lost kingdom • unlock a series of puzzles to discover and unearth the kingdom's past

        -

        my name is qu'ran, i am a mujahid, a warrior of allah, one day i will come to you and i will lead you to the true path, to the path that goes straight to heaven. you are different now from the old one that lived as a man, every man, every woman should follow the path of the god qu'ran. you have a destiny that you must fulfill. this is the meaning of the'lost book."

        -

        lost grimoires: stolen kingdom is a puzzle adventure game in which you play the role of a young alchemist who wishes to uncover the secrets of the kingdom's destiny. packed with colourful fantasy artwork and hidden objects becoming a master of alchemy is an aim of this adventure puzzle game.

        -

        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/ruslanmv/Clone-Your-Voice/vocoder/vocoder_dataset.py b/spaces/ruslanmv/Clone-Your-Voice/vocoder/vocoder_dataset.py deleted file mode 100644 index 9eae1b5f20117feef0a06e264a99b3c0c6143bac..0000000000000000000000000000000000000000 --- a/spaces/ruslanmv/Clone-Your-Voice/vocoder/vocoder_dataset.py +++ /dev/null @@ -1,84 +0,0 @@ -from torch.utils.data import Dataset -from pathlib import Path -from vocoder import audio -import vocoder.hparams as hp -import numpy as np -import torch - - -class VocoderDataset(Dataset): - def __init__(self, metadata_fpath: Path, mel_dir: Path, wav_dir: Path): - print("Using inputs from:\n\t%s\n\t%s\n\t%s" % (metadata_fpath, mel_dir, wav_dir)) - - with metadata_fpath.open("r") as metadata_file: - metadata = [line.split("|") for line in metadata_file] - - gta_fnames = [x[1] for x in metadata if int(x[4])] - gta_fpaths = [mel_dir.joinpath(fname) for fname in gta_fnames] - wav_fnames = [x[0] for x in metadata if int(x[4])] - wav_fpaths = [wav_dir.joinpath(fname) for fname in wav_fnames] - self.samples_fpaths = list(zip(gta_fpaths, wav_fpaths)) - - print("Found %d samples" % len(self.samples_fpaths)) - - def __getitem__(self, index): - mel_path, wav_path = self.samples_fpaths[index] - - # Load the mel spectrogram and adjust its range to [-1, 1] - mel = np.load(mel_path).T.astype(np.float32) / hp.mel_max_abs_value - - # Load the wav - wav = np.load(wav_path) - if hp.apply_preemphasis: - wav = audio.pre_emphasis(wav) - wav = np.clip(wav, -1, 1) - - # Fix for missing padding # TODO: settle on whether this is any useful - r_pad = (len(wav) // hp.hop_length + 1) * hp.hop_length - len(wav) - wav = np.pad(wav, (0, r_pad), mode='constant') - assert len(wav) >= mel.shape[1] * hp.hop_length - wav = wav[:mel.shape[1] * hp.hop_length] - assert len(wav) % hp.hop_length == 0 - - # Quantize the wav - if hp.voc_mode == 'RAW': - if hp.mu_law: - quant = audio.encode_mu_law(wav, mu=2 ** hp.bits) - else: - quant = audio.float_2_label(wav, bits=hp.bits) - elif hp.voc_mode == 'MOL': - quant = audio.float_2_label(wav, bits=16) - - return mel.astype(np.float32), quant.astype(np.int64) - - def __len__(self): - return len(self.samples_fpaths) - - -def collate_vocoder(batch): - mel_win = hp.voc_seq_len // hp.hop_length + 2 * hp.voc_pad - max_offsets = [x[0].shape[-1] -2 - (mel_win + 2 * hp.voc_pad) for x in batch] - mel_offsets = [np.random.randint(0, offset) for offset in max_offsets] - sig_offsets = [(offset + hp.voc_pad) * hp.hop_length for offset in mel_offsets] - - mels = [x[0][:, mel_offsets[i]:mel_offsets[i] + mel_win] for i, x in enumerate(batch)] - - labels = [x[1][sig_offsets[i]:sig_offsets[i] + hp.voc_seq_len + 1] for i, x in enumerate(batch)] - - mels = np.stack(mels).astype(np.float32) - labels = np.stack(labels).astype(np.int64) - - mels = torch.tensor(mels) - labels = torch.tensor(labels).long() - - x = labels[:, :hp.voc_seq_len] - y = labels[:, 1:] - - bits = 16 if hp.voc_mode == 'MOL' else hp.bits - - x = audio.label_2_float(x.float(), bits) - - if hp.voc_mode == 'MOL' : - y = audio.label_2_float(y.float(), bits) - - return x, y, mels \ No newline at end of file diff --git a/spaces/safi842/FashionGen/models/stylegan2/stylegan2-pytorch/calc_inception.py b/spaces/safi842/FashionGen/models/stylegan2/stylegan2-pytorch/calc_inception.py deleted file mode 100644 index 5daa531475c377a73ffa256bdf84bb662e144215..0000000000000000000000000000000000000000 --- a/spaces/safi842/FashionGen/models/stylegan2/stylegan2-pytorch/calc_inception.py +++ /dev/null @@ -1,116 +0,0 @@ -import argparse -import pickle -import os - -import torch -from torch import nn -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torchvision import transforms -from torchvision.models import inception_v3, Inception3 -import numpy as np -from tqdm import tqdm - -from inception import InceptionV3 -from dataset import MultiResolutionDataset - - -class Inception3Feature(Inception3): - def forward(self, x): - if x.shape[2] != 299 or x.shape[3] != 299: - x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=True) - - x = self.Conv2d_1a_3x3(x) # 299 x 299 x 3 - x = self.Conv2d_2a_3x3(x) # 149 x 149 x 32 - x = self.Conv2d_2b_3x3(x) # 147 x 147 x 32 - x = F.max_pool2d(x, kernel_size=3, stride=2) # 147 x 147 x 64 - - x = self.Conv2d_3b_1x1(x) # 73 x 73 x 64 - x = self.Conv2d_4a_3x3(x) # 73 x 73 x 80 - x = F.max_pool2d(x, kernel_size=3, stride=2) # 71 x 71 x 192 - - x = self.Mixed_5b(x) # 35 x 35 x 192 - x = self.Mixed_5c(x) # 35 x 35 x 256 - x = self.Mixed_5d(x) # 35 x 35 x 288 - - x = self.Mixed_6a(x) # 35 x 35 x 288 - x = self.Mixed_6b(x) # 17 x 17 x 768 - x = self.Mixed_6c(x) # 17 x 17 x 768 - x = self.Mixed_6d(x) # 17 x 17 x 768 - x = self.Mixed_6e(x) # 17 x 17 x 768 - - x = self.Mixed_7a(x) # 17 x 17 x 768 - x = self.Mixed_7b(x) # 8 x 8 x 1280 - x = self.Mixed_7c(x) # 8 x 8 x 2048 - - x = F.avg_pool2d(x, kernel_size=8) # 8 x 8 x 2048 - - return x.view(x.shape[0], x.shape[1]) # 1 x 1 x 2048 - - -def load_patched_inception_v3(): - # inception = inception_v3(pretrained=True) - # inception_feat = Inception3Feature() - # inception_feat.load_state_dict(inception.state_dict()) - inception_feat = InceptionV3([3], normalize_input=False) - - return inception_feat - - -@torch.no_grad() -def extract_features(loader, inception, device): - pbar = tqdm(loader) - - feature_list = [] - - for img in pbar: - img = img.to(device) - feature = inception(img)[0].view(img.shape[0], -1) - feature_list.append(feature.to('cpu')) - - features = torch.cat(feature_list, 0) - - return features - - -if __name__ == '__main__': - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - parser = argparse.ArgumentParser( - description='Calculate Inception v3 features for datasets' - ) - parser.add_argument('--size', type=int, default=256) - parser.add_argument('--batch', default=64, type=int, help='batch size') - parser.add_argument('--n_sample', type=int, default=50000) - parser.add_argument('--flip', action='store_true') - parser.add_argument('path', metavar='PATH', help='path to datset lmdb file') - - args = parser.parse_args() - - inception = load_patched_inception_v3() - inception = nn.DataParallel(inception).eval().to(device) - - transform = transforms.Compose( - [ - transforms.RandomHorizontalFlip(p=0.5 if args.flip else 0), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), - ] - ) - - dset = MultiResolutionDataset(args.path, transform=transform, resolution=args.size) - loader = DataLoader(dset, batch_size=args.batch, num_workers=4) - - features = extract_features(loader, inception, device).numpy() - - features = features[: args.n_sample] - - print(f'extracted {features.shape[0]} features') - - mean = np.mean(features, 0) - cov = np.cov(features, rowvar=False) - - name = os.path.splitext(os.path.basename(args.path))[0] - - with open(f'inception_{name}.pkl', 'wb') as f: - pickle.dump({'mean': mean, 'cov': cov, 'size': args.size, 'path': args.path}, f) diff --git a/spaces/samavi/openai-clip-vit-base-patch32/README.md b/spaces/samavi/openai-clip-vit-base-patch32/README.md deleted file mode 100644 index 0be692b29d99a2ee1217eae8c0b8ce265976ef23..0000000000000000000000000000000000000000 --- a/spaces/samavi/openai-clip-vit-base-patch32/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Openai Clip Vit Base Patch32 -emoji: 🏃 -colorFrom: blue -colorTo: blue -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sccstandardteam/ChuanhuChatGPT/assets/custom.js b/spaces/sccstandardteam/ChuanhuChatGPT/assets/custom.js deleted file mode 100644 index ae5a76b5e791be8b107126889519e37d89fc80f0..0000000000000000000000000000000000000000 --- a/spaces/sccstandardteam/ChuanhuChatGPT/assets/custom.js +++ /dev/null @@ -1,607 +0,0 @@ - -// custom javascript here - -const MAX_HISTORY_LENGTH = 32; - -var key_down_history = []; -var currentIndex = -1; -var user_input_ta; - -var gradioContainer = null; -var user_input_ta = null; -var user_input_tb = null; -var userInfoDiv = null; -var appTitleDiv = null; -var chatbot = null; -var chatbotWrap = null; -var apSwitch = null; -var empty_botton = null; -var messageBotDivs = null; -// var renderLatex = null; -var loginUserForm = null; -var logginUser = null; - -var userLogged = false; -var usernameGotten = false; -var shouldRenderLatex = false; -var historyLoaded = false; - -var ga = document.getElementsByTagName("gradio-app"); -var targetNode = ga[0]; -var isInIframe = (window.self !== window.top); -var language = navigator.language.slice(0,2); - -var forView_i18n = { - 'zh': "仅供查看", - 'en': "For viewing only", - 'ja': "閲覧専用", - 'fr': "Pour consultation seulement", - 'es': "Solo para visualización", -}; - -// gradio 页面加载好了么??? 我能动你的元素了么?? -function gradioLoaded(mutations) { - for (var i = 0; i < mutations.length; i++) { - if (mutations[i].addedNodes.length) { - loginUserForm = document.querySelector(".gradio-container > .main > .wrap > .panel > .form") - gradioContainer = document.querySelector(".gradio-container"); - user_input_tb = document.getElementById('user_input_tb'); - userInfoDiv = document.getElementById("user_info"); - appTitleDiv = document.getElementById("app_title"); - chatbot = document.querySelector('#chuanhu_chatbot'); - chatbotWrap = document.querySelector('#chuanhu_chatbot > .wrap'); - apSwitch = document.querySelector('.apSwitch input[type="checkbox"]'); - // renderLatex = document.querySelector("#render_latex_checkbox > label > input"); - empty_botton = document.getElementById("empty_btn") - - if (loginUserForm) { - localStorage.setItem("userLogged", true); - userLogged = true; - } - - if (gradioContainer && apSwitch) { // gradioCainter 加载出来了没? - adjustDarkMode(); - } - if (user_input_tb) { // user_input_tb 加载出来了没? - selectHistory(); - } - if (userInfoDiv && appTitleDiv) { // userInfoDiv 和 appTitleDiv 加载出来了没? - if (!usernameGotten) { - getUserInfo(); - } - setTimeout(showOrHideUserInfo(), 2000); - } - if (chatbot) { // chatbot 加载出来了没? - setChatbotHeight(); - } - if (chatbotWrap) { - if (!historyLoaded) { - loadHistoryHtml(); - } - setChatbotScroll(); - } - // if (renderLatex) { // renderLatex 加载出来了没? - // shouldRenderLatex = renderLatex.checked; - // updateMathJax(); - // } - if (empty_botton) { - emptyHistory(); - } - } - } -} - -function webLocale() { - console.log("webLocale", language); - if (forView_i18n.hasOwnProperty(language)) { - var forView = forView_i18n[language]; - var forViewStyle = document.createElement('style'); - forViewStyle.innerHTML = '.wrap>.history-message>:last-child::after { content: "' + forView + '"!important; }'; - document.head.appendChild(forViewStyle); - // console.log("added forViewStyle", forView); - } -} - -function selectHistory() { - user_input_ta = user_input_tb.querySelector("textarea"); - if (user_input_ta) { - observer.disconnect(); // 停止监听 - // 在 textarea 上监听 keydown 事件 - user_input_ta.addEventListener("keydown", function (event) { - var value = user_input_ta.value.trim(); - // 判断按下的是否为方向键 - if (event.code === 'ArrowUp' || event.code === 'ArrowDown') { - // 如果按下的是方向键,且输入框中有内容,且历史记录中没有该内容,则不执行操作 - if (value && key_down_history.indexOf(value) === -1) - return; - // 对于需要响应的动作,阻止默认行为。 - event.preventDefault(); - var length = key_down_history.length; - if (length === 0) { - currentIndex = -1; // 如果历史记录为空,直接将当前选中的记录重置 - return; - } - if (currentIndex === -1) { - currentIndex = length; - } - if (event.code === 'ArrowUp' && currentIndex > 0) { - currentIndex--; - user_input_ta.value = key_down_history[currentIndex]; - } else if (event.code === 'ArrowDown' && currentIndex < length - 1) { - currentIndex++; - user_input_ta.value = key_down_history[currentIndex]; - } - user_input_ta.selectionStart = user_input_ta.value.length; - user_input_ta.selectionEnd = user_input_ta.value.length; - const input_event = new InputEvent("input", { bubbles: true, cancelable: true }); - user_input_ta.dispatchEvent(input_event); - } else if (event.code === "Enter") { - if (value) { - currentIndex = -1; - if (key_down_history.indexOf(value) === -1) { - key_down_history.push(value); - if (key_down_history.length > MAX_HISTORY_LENGTH) { - key_down_history.shift(); - } - } - } - } - }); - } -} - -var username = null; -function getUserInfo() { - if (usernameGotten) { - return; - } - userLogged = localStorage.getItem('userLogged'); - if (userLogged) { - username = userInfoDiv.innerText; - if (username) { - if (username.includes("getting user info…")) { - setTimeout(getUserInfo, 500); - return; - } else if (username === " ") { - localStorage.removeItem("username"); - localStorage.removeItem("userLogged") - userLogged = false; - usernameGotten = true; - return; - } else { - username = username.match(/User:\s*(.*)/)[1] || username; - localStorage.setItem("username", username); - usernameGotten = true; - clearHistoryHtml(); - } - } - } -} - -function toggleUserInfoVisibility(shouldHide) { - if (userInfoDiv) { - if (shouldHide) { - userInfoDiv.classList.add("hideK"); - } else { - userInfoDiv.classList.remove("hideK"); - } - } -} -function showOrHideUserInfo() { - var sendBtn = document.getElementById("submit_btn"); - - // Bind mouse/touch events to show/hide user info - appTitleDiv.addEventListener("mouseenter", function () { - toggleUserInfoVisibility(false); - }); - userInfoDiv.addEventListener("mouseenter", function () { - toggleUserInfoVisibility(false); - }); - sendBtn.addEventListener("mouseenter", function () { - toggleUserInfoVisibility(false); - }); - - appTitleDiv.addEventListener("mouseleave", function () { - toggleUserInfoVisibility(true); - }); - userInfoDiv.addEventListener("mouseleave", function () { - toggleUserInfoVisibility(true); - }); - sendBtn.addEventListener("mouseleave", function () { - toggleUserInfoVisibility(true); - }); - - appTitleDiv.ontouchstart = function () { - toggleUserInfoVisibility(false); - }; - userInfoDiv.ontouchstart = function () { - toggleUserInfoVisibility(false); - }; - sendBtn.ontouchstart = function () { - toggleUserInfoVisibility(false); - }; - - appTitleDiv.ontouchend = function () { - setTimeout(function () { - toggleUserInfoVisibility(true); - }, 3000); - }; - userInfoDiv.ontouchend = function () { - setTimeout(function () { - toggleUserInfoVisibility(true); - }, 3000); - }; - sendBtn.ontouchend = function () { - setTimeout(function () { - toggleUserInfoVisibility(true); - }, 3000); // Delay 1 second to hide user info - }; - - // Hide user info after 2 second - setTimeout(function () { - toggleUserInfoVisibility(true); - }, 2000); -} - -function toggleDarkMode(isEnabled) { - if (isEnabled) { - gradioContainer.classList.add("dark"); - document.body.style.setProperty("background-color", "var(--neutral-950)", "important"); - } else { - gradioContainer.classList.remove("dark"); - document.body.style.backgroundColor = ""; - } -} -function adjustDarkMode() { - const darkModeQuery = window.matchMedia("(prefers-color-scheme: dark)"); - - // 根据当前颜色模式设置初始状态 - apSwitch.checked = darkModeQuery.matches; - toggleDarkMode(darkModeQuery.matches); - // 监听颜色模式变化 - darkModeQuery.addEventListener("change", (e) => { - apSwitch.checked = e.matches; - toggleDarkMode(e.matches); - }); - // apSwitch = document.querySelector('.apSwitch input[type="checkbox"]'); - apSwitch.addEventListener("change", (e) => { - toggleDarkMode(e.target.checked); - }); -} - -function setChatbotHeight() { - const screenWidth = window.innerWidth; - const statusDisplay = document.querySelector('#status_display'); - const statusDisplayHeight = statusDisplay ? statusDisplay.offsetHeight : 0; - const wrap = chatbot.querySelector('.wrap'); - const vh = window.innerHeight * 0.01; - document.documentElement.style.setProperty('--vh', `${vh}px`); - if (isInIframe) { - chatbot.style.height = `700px`; - wrap.style.maxHeight = `calc(700px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))` - } else { - if (screenWidth <= 320) { - chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px)`; - wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`; - } else if (screenWidth <= 499) { - chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px)`; - wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`; - } else { - chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px)`; - wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`; - } - } -} -function setChatbotScroll() { - var scrollHeight = chatbotWrap.scrollHeight; - chatbotWrap.scrollTo(0,scrollHeight) -} -var rangeInputs = null; -var numberInputs = null; -function setSlider() { - rangeInputs = document.querySelectorAll('input[type="range"]'); - numberInputs = document.querySelectorAll('input[type="number"]') - setSliderRange(); - rangeInputs.forEach(rangeInput => { - rangeInput.addEventListener('input', setSliderRange); - }); - numberInputs.forEach(numberInput => { - numberInput.addEventListener('input', setSliderRange); - }) -} -function setSliderRange() { - var range = document.querySelectorAll('input[type="range"]'); - range.forEach(range => { - range.style.backgroundSize = (range.value - range.min) / (range.max - range.min) * 100 + '% 100%'; - }); -} - -function addChuanhuButton(botElement) { - var rawMessage = null; - var mdMessage = null; - rawMessage = botElement.querySelector('.raw-message'); - mdMessage = botElement.querySelector('.md-message'); - if (!rawMessage) { - var buttons = botElement.querySelectorAll('button.chuanhu-btn'); - for (var i = 0; i < buttons.length; i++) { - buttons[i].parentNode.removeChild(buttons[i]); - } - return; - } - var copyButton = null; - var toggleButton = null; - copyButton = botElement.querySelector('button.copy-bot-btn'); - toggleButton = botElement.querySelector('button.toggle-md-btn'); - if (copyButton) copyButton.remove(); - if (toggleButton) toggleButton.remove(); - - // Copy bot button - var copyButton = document.createElement('button'); - copyButton.classList.add('chuanhu-btn'); - copyButton.classList.add('copy-bot-btn'); - copyButton.setAttribute('aria-label', 'Copy'); - copyButton.innerHTML = copyIcon; - copyButton.addEventListener('click', () => { - const textToCopy = rawMessage.innerText; - navigator.clipboard - .writeText(textToCopy) - .then(() => { - copyButton.innerHTML = copiedIcon; - setTimeout(() => { - copyButton.innerHTML = copyIcon; - }, 1500); - }) - .catch(() => { - console.error("copy failed"); - }); - }); - botElement.appendChild(copyButton); - - // Toggle button - var toggleButton = document.createElement('button'); - toggleButton.classList.add('chuanhu-btn'); - toggleButton.classList.add('toggle-md-btn'); - toggleButton.setAttribute('aria-label', 'Toggle'); - var renderMarkdown = mdMessage.classList.contains('hideM'); - toggleButton.innerHTML = renderMarkdown ? mdIcon : rawIcon; - toggleButton.addEventListener('click', () => { - renderMarkdown = mdMessage.classList.contains('hideM'); - if (renderMarkdown){ - renderMarkdownText(botElement); - toggleButton.innerHTML=rawIcon; - } else { - removeMarkdownText(botElement); - toggleButton.innerHTML=mdIcon; - } - }); - botElement.insertBefore(toggleButton, copyButton); -} - -function addCopyCodeButton(pre) { - var code = null; - var firstChild = null; - code = pre.querySelector('code'); - if (!code) return; - firstChild = code.querySelector('div'); - if (!firstChild) return; - var oldCopyButton = null; - oldCopyButton = code.querySelector('button.copy-code-btn'); - // if (oldCopyButton) oldCopyButton.remove(); - if (oldCopyButton) return; // 没太有用,新生成的对话中始终会被pre覆盖,导致按钮消失,这段代码不启用…… - var codeButton = document.createElement('button'); - codeButton.classList.add('copy-code-btn'); - codeButton.textContent = '\uD83D\uDCCE'; - - code.insertBefore(codeButton, firstChild); - codeButton.addEventListener('click', function () { - var range = document.createRange(); - range.selectNodeContents(code); - range.setStartBefore(firstChild); - navigator.clipboard - .writeText(range.toString()) - .then(() => { - codeButton.textContent = '\u2714'; - setTimeout(function () { - codeButton.textContent = '\uD83D\uDCCE'; - }, 2000); - }) - .catch(e => { - console.error(e); - codeButton.textContent = '\u2716'; - }); - }); -} - -function renderMarkdownText(message) { - var mdDiv = message.querySelector('.md-message'); - if (mdDiv) mdDiv.classList.remove('hideM'); - var rawDiv = message.querySelector('.raw-message'); - if (rawDiv) rawDiv.classList.add('hideM'); -} -function removeMarkdownText(message) { - var rawDiv = message.querySelector('.raw-message'); - if (rawDiv) rawDiv.classList.remove('hideM'); - var mdDiv = message.querySelector('.md-message'); - if (mdDiv) mdDiv.classList.add('hideM'); -} - -var rendertime = 0; // for debugging -var mathjaxUpdated = false; - -function renderMathJax() { - messageBotDivs = document.querySelectorAll('.message.bot .md-message'); - for (var i = 0; i < messageBotDivs.length; i++) { - var mathJaxSpan = messageBotDivs[i].querySelector('.MathJax_Preview'); - if (!mathJaxSpan && shouldRenderLatex && !mathjaxUpdated) { - MathJax.Hub.Queue(["Typeset", MathJax.Hub, messageBotDivs[i]]); - rendertime +=1; // for debugging - // console.log("renderingMathJax", i) - } - } - mathjaxUpdated = true; - // console.log("MathJax Rendered") -} - -function removeMathjax() { - // var jax = MathJax.Hub.getAllJax(); - // for (var i = 0; i < jax.length; i++) { - // // MathJax.typesetClear(jax[i]); - // jax[i].Text(newmath) - // jax[i].Reprocess() - // } - // 我真的不会了啊啊啊,mathjax并没有提供转换为原先文本的办法。 - mathjaxUpdated = true; - // console.log("MathJax removed!"); -} - -function updateMathJax() { - // renderLatex.addEventListener("change", function() { - // shouldRenderLatex = renderLatex.checked; - // if (!mathjaxUpdated) { - // if (shouldRenderLatex) { - // renderMathJax(); - // } else { - // console.log("MathJax Disabled") - // removeMathjax(); - // } - // } else { - // if (!shouldRenderLatex) { - // mathjaxUpdated = false; // reset - // } - // } - // }); - if (shouldRenderLatex && !mathjaxUpdated) { - renderMathJax(); - } - mathjaxUpdated = false; -} - -let timeoutId; -let isThrottled = false; -var mmutation -// 监听所有元素中 bot message 的变化,用来查找需要渲染的mathjax, 并为 bot 消息添加复制按钮。 -var mObserver = new MutationObserver(function (mutationsList) { - for (mmutation of mutationsList) { - if (mmutation.type === 'childList') { - for (var node of mmutation.addedNodes) { - if (node.nodeType === 1 && node.classList.contains('message') && node.getAttribute('data-testid') === 'bot') { - if (shouldRenderLatex) { - renderMathJax(); - mathjaxUpdated = false; - } - saveHistoryHtml(); - document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton); - document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot pre').forEach(addCopyCodeButton); - } - if (node.tagName === 'INPUT' && node.getAttribute('type') === 'range') { - setSlider(); - } - } - for (var node of mmutation.removedNodes) { - if (node.nodeType === 1 && node.classList.contains('message') && node.getAttribute('data-testid') === 'bot') { - if (shouldRenderLatex) { - renderMathJax(); - mathjaxUpdated = false; - } - saveHistoryHtml(); - document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton); - document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot pre').forEach(addCopyCodeButton); - } - } - } else if (mmutation.type === 'attributes') { - if (mmutation.target.nodeType === 1 && mmutation.target.classList.contains('message') && mmutation.target.getAttribute('data-testid') === 'bot') { - document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot pre').forEach(addCopyCodeButton); // 目前写的是有点问题的,会导致加button次数过多,但是bot对话内容生成时又是不断覆盖pre的…… - if (isThrottled) break; // 为了防止重复不断疯狂渲染,加上等待_(:з」∠)_ - isThrottled = true; - clearTimeout(timeoutId); - timeoutId = setTimeout(() => { - isThrottled = false; - if (shouldRenderLatex) { - renderMathJax(); - mathjaxUpdated = false; - } - document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton); - saveHistoryHtml(); - }, 500); - } - } - } -}); -mObserver.observe(document.documentElement, { attributes: true, childList: true, subtree: true }); - -var loadhistorytime = 0; // for debugging -function saveHistoryHtml() { - var historyHtml = document.querySelector('#chuanhu_chatbot > .wrap'); - localStorage.setItem('chatHistory', historyHtml.innerHTML); - // console.log("History Saved") - historyLoaded = false; -} -function loadHistoryHtml() { - var historyHtml = localStorage.getItem('chatHistory'); - if (!historyHtml) { - historyLoaded = true; - return; // no history, do nothing - } - userLogged = localStorage.getItem('userLogged'); - if (userLogged){ - historyLoaded = true; - return; // logged in, do nothing - } - if (!historyLoaded) { - var tempDiv = document.createElement('div'); - tempDiv.innerHTML = historyHtml; - var buttons = tempDiv.querySelectorAll('button.chuanhu-btn'); - for (var i = 0; i < buttons.length; i++) { - buttons[i].parentNode.removeChild(buttons[i]); - } - var fakeHistory = document.createElement('div'); - fakeHistory.classList.add('history-message'); - fakeHistory.innerHTML = tempDiv.innerHTML; - webLocale(); - chatbotWrap.insertBefore(fakeHistory, chatbotWrap.firstChild); - // var fakeHistory = document.createElement('div'); - // fakeHistory.classList.add('history-message'); - // fakeHistory.innerHTML = historyHtml; - // chatbotWrap.insertBefore(fakeHistory, chatbotWrap.firstChild); - historyLoaded = true; - console.log("History Loaded"); - loadhistorytime += 1; // for debugging - } else { - historyLoaded = false; - } -} -function clearHistoryHtml() { - localStorage.removeItem("chatHistory"); - historyMessages = chatbotWrap.querySelector('.history-message'); - if (historyMessages) { - chatbotWrap.removeChild(historyMessages); - console.log("History Cleared"); - } -} -function emptyHistory() { - empty_botton.addEventListener("click", function () { - clearHistoryHtml(); - }); -} - -// 监视页面内部 DOM 变动 -var observer = new MutationObserver(function (mutations) { - gradioLoaded(mutations); -}); -observer.observe(targetNode, { childList: true, subtree: true }); - -// 监视页面变化 -window.addEventListener("DOMContentLoaded", function () { - isInIframe = (window.self !== window.top); - historyLoaded = false; - shouldRenderLatex = !!document.querySelector('script[src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/MathJax.js?config=TeX-MML-AM_CHTML"]'); -}); -window.addEventListener('resize', setChatbotHeight); -window.addEventListener('scroll', setChatbotHeight); -window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change", adjustDarkMode); - -// button svg code -const copyIcon = ''; -const copiedIcon = ''; -const mdIcon = ''; -const rawIcon = ''; diff --git a/spaces/scedlatioru/img-to-music/example/Tecnotest Reflex Plus 4130 Crack.md b/spaces/scedlatioru/img-to-music/example/Tecnotest Reflex Plus 4130 Crack.md deleted file mode 100644 index 157e6cf668fa74b188474f2b144f92931293baad..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Tecnotest Reflex Plus 4130 Crack.md +++ /dev/null @@ -1,6 +0,0 @@ -

        tecnotest reflex plus 4130 crack


        Download ★★★ https://gohhs.com/2uEzY7



        -
        -tecno reflex, tecno reflex plus 4130, tecno reflex plus 4130 preis, ... idea how to crack the activation code for my reflex 3130 from tecnotest . 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/segments-tobias/conex/espnet/bin/mt_train.py b/spaces/segments-tobias/conex/espnet/bin/mt_train.py deleted file mode 100644 index 7251617e0985565d8026ca8ab1d6c937c974289f..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/bin/mt_train.py +++ /dev/null @@ -1,480 +0,0 @@ -#!/usr/bin/env python3 -# encoding: utf-8 - -# Copyright 2019 Kyoto University (Hirofumi Inaguma) -# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) - -"""Neural machine translation model training script.""" - -import logging -import os -import random -import subprocess -import sys - -from distutils.version import LooseVersion - -import configargparse -import numpy as np -import torch - -from espnet import __version__ -from espnet.utils.cli_utils import strtobool -from espnet.utils.training.batchfy import BATCH_COUNT_CHOICES - -is_torch_1_2_plus = LooseVersion(torch.__version__) >= LooseVersion("1.2") - - -# NOTE: you need this func to generate our sphinx doc -def get_parser(parser=None, required=True): - """Get default arguments.""" - if parser is None: - parser = configargparse.ArgumentParser( - description="Train a neural machine translation (NMT) model on one CPU, " - "one or multiple GPUs", - config_file_parser_class=configargparse.YAMLConfigFileParser, - formatter_class=configargparse.ArgumentDefaultsHelpFormatter, - ) - # general configuration - parser.add("--config", is_config_file=True, help="config file path") - parser.add( - "--config2", - is_config_file=True, - help="second config file path that overwrites the settings in `--config`.", - ) - parser.add( - "--config3", - is_config_file=True, - help="third config file path that overwrites the settings " - "in `--config` and `--config2`.", - ) - - parser.add_argument( - "--ngpu", - default=None, - type=int, - help="Number of GPUs. If not given, use all visible devices", - ) - parser.add_argument( - "--train-dtype", - default="float32", - choices=["float16", "float32", "float64", "O0", "O1", "O2", "O3"], - help="Data type for training (only pytorch backend). " - "O0,O1,.. flags require apex. " - "See https://nvidia.github.io/apex/amp.html#opt-levels", - ) - parser.add_argument( - "--backend", - default="chainer", - type=str, - choices=["chainer", "pytorch"], - help="Backend library", - ) - parser.add_argument( - "--outdir", type=str, required=required, help="Output directory" - ) - parser.add_argument("--debugmode", default=1, type=int, help="Debugmode") - parser.add_argument( - "--dict", required=required, help="Dictionary for source/target languages" - ) - parser.add_argument("--seed", default=1, type=int, help="Random seed") - parser.add_argument("--debugdir", type=str, help="Output directory for debugging") - parser.add_argument( - "--resume", - "-r", - default="", - nargs="?", - help="Resume the training from snapshot", - ) - parser.add_argument( - "--minibatches", - "-N", - type=int, - default="-1", - help="Process only N minibatches (for debug)", - ) - parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option") - parser.add_argument( - "--tensorboard-dir", - default=None, - type=str, - nargs="?", - help="Tensorboard log dir path", - ) - parser.add_argument( - "--report-interval-iters", - default=100, - type=int, - help="Report interval iterations", - ) - parser.add_argument( - "--save-interval-iters", - default=0, - type=int, - help="Save snapshot interval iterations", - ) - # task related - parser.add_argument( - "--train-json", - type=str, - default=None, - help="Filename of train label data (json)", - ) - parser.add_argument( - "--valid-json", - type=str, - default=None, - help="Filename of validation label data (json)", - ) - # network architecture - parser.add_argument( - "--model-module", - type=str, - default=None, - help="model defined module (default: espnet.nets.xxx_backend.e2e_mt:E2E)", - ) - # loss related - parser.add_argument( - "--lsm-weight", default=0.0, type=float, help="Label smoothing weight" - ) - # translations options to compute BLEU - parser.add_argument( - "--report-bleu", - default=True, - action="store_true", - help="Compute BLEU on development set", - ) - parser.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses") - parser.add_argument("--beam-size", type=int, default=4, help="Beam size") - parser.add_argument("--penalty", default=0.0, type=float, help="Incertion penalty") - parser.add_argument( - "--maxlenratio", - default=0.0, - type=float, - help="""Input length ratio to obtain max output length. - If maxlenratio=0.0 (default), it uses a end-detect function - to automatically find maximum hypothesis lengths""", - ) - parser.add_argument( - "--minlenratio", - default=0.0, - type=float, - help="Input length ratio to obtain min output length", - ) - parser.add_argument( - "--rnnlm", type=str, default=None, help="RNNLM model file to read" - ) - parser.add_argument( - "--rnnlm-conf", type=str, default=None, help="RNNLM model config file to read" - ) - parser.add_argument("--lm-weight", default=0.0, type=float, help="RNNLM weight.") - parser.add_argument("--sym-space", default="", type=str, help="Space symbol") - parser.add_argument("--sym-blank", default="", type=str, help="Blank symbol") - # minibatch related - parser.add_argument( - "--sortagrad", - default=0, - type=int, - nargs="?", - help="How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs", - ) - parser.add_argument( - "--batch-count", - default="auto", - choices=BATCH_COUNT_CHOICES, - help="How to count batch_size. " - "The default (auto) will find how to count by args.", - ) - parser.add_argument( - "--batch-size", - "--batch-seqs", - "-b", - default=0, - type=int, - help="Maximum seqs in a minibatch (0 to disable)", - ) - parser.add_argument( - "--batch-bins", - default=0, - type=int, - help="Maximum bins in a minibatch (0 to disable)", - ) - parser.add_argument( - "--batch-frames-in", - default=0, - type=int, - help="Maximum input frames in a minibatch (0 to disable)", - ) - parser.add_argument( - "--batch-frames-out", - default=0, - type=int, - help="Maximum output frames in a minibatch (0 to disable)", - ) - parser.add_argument( - "--batch-frames-inout", - default=0, - type=int, - help="Maximum input+output frames in a minibatch (0 to disable)", - ) - parser.add_argument( - "--maxlen-in", - "--batch-seq-maxlen-in", - default=100, - type=int, - metavar="ML", - help="When --batch-count=seq, " - "batch size is reduced if the input sequence length > ML.", - ) - parser.add_argument( - "--maxlen-out", - "--batch-seq-maxlen-out", - default=100, - type=int, - metavar="ML", - help="When --batch-count=seq, " - "batch size is reduced if the output sequence length > ML", - ) - parser.add_argument( - "--n-iter-processes", - default=0, - type=int, - help="Number of processes of iterator", - ) - # optimization related - parser.add_argument( - "--opt", - default="adadelta", - type=str, - choices=["adadelta", "adam", "noam"], - help="Optimizer", - ) - parser.add_argument( - "--accum-grad", default=1, type=int, help="Number of gradient accumuration" - ) - parser.add_argument( - "--eps", default=1e-8, type=float, help="Epsilon constant for optimizer" - ) - parser.add_argument( - "--eps-decay", default=0.01, type=float, help="Decaying ratio of epsilon" - ) - parser.add_argument( - "--lr", default=1e-3, type=float, help="Learning rate for optimizer" - ) - parser.add_argument( - "--lr-decay", default=1.0, type=float, help="Decaying ratio of learning rate" - ) - parser.add_argument( - "--weight-decay", default=0.0, type=float, help="Weight decay ratio" - ) - parser.add_argument( - "--criterion", - default="acc", - type=str, - choices=["loss", "acc"], - help="Criterion to perform epsilon decay", - ) - parser.add_argument( - "--threshold", default=1e-4, type=float, help="Threshold to stop iteration" - ) - parser.add_argument( - "--epochs", "-e", default=30, type=int, help="Maximum number of epochs" - ) - parser.add_argument( - "--early-stop-criterion", - default="validation/main/acc", - type=str, - nargs="?", - help="Value to monitor to trigger an early stopping of the training", - ) - parser.add_argument( - "--patience", - default=3, - type=int, - nargs="?", - help="Number of epochs to wait " - "without improvement before stopping the training", - ) - parser.add_argument( - "--grad-clip", default=5, type=float, help="Gradient norm threshold to clip" - ) - parser.add_argument( - "--num-save-attention", - default=3, - type=int, - help="Number of samples of attention to be saved", - ) - # decoder related - parser.add_argument( - "--context-residual", - default=False, - type=strtobool, - nargs="?", - help="The flag to switch to use context vector residual in the decoder network", - ) - parser.add_argument( - "--tie-src-tgt-embedding", - default=False, - type=strtobool, - nargs="?", - help="Tie parameters of source embedding and target embedding.", - ) - parser.add_argument( - "--tie-classifier", - default=False, - type=strtobool, - nargs="?", - help="Tie parameters of target embedding and output projection layer.", - ) - # finetuning related - parser.add_argument( - "--enc-init", - default=None, - type=str, - nargs="?", - help="Pre-trained ASR model to initialize encoder.", - ) - parser.add_argument( - "--enc-init-mods", - default="enc.enc.", - type=lambda s: [str(mod) for mod in s.split(",") if s != ""], - help="List of encoder modules to initialize, separated by a comma.", - ) - parser.add_argument( - "--dec-init", - default=None, - type=str, - nargs="?", - help="Pre-trained ASR, MT or LM model to initialize decoder.", - ) - parser.add_argument( - "--dec-init-mods", - default="att., dec.", - type=lambda s: [str(mod) for mod in s.split(",") if s != ""], - help="List of decoder modules to initialize, separated by a comma.", - ) - # multilingual related - parser.add_argument( - "--multilingual", - default=False, - type=strtobool, - help="Prepend target language ID to the source sentence. " - "Both source/target language IDs must be prepend in the pre-processing stage.", - ) - parser.add_argument( - "--replace-sos", - default=False, - type=strtobool, - help="Replace in the decoder with a target language ID " - "(the first token in the target sequence)", - ) - - return parser - - -def main(cmd_args): - """Run the main training function.""" - parser = get_parser() - args, _ = parser.parse_known_args(cmd_args) - if args.backend == "chainer" and args.train_dtype != "float32": - raise NotImplementedError( - f"chainer backend does not support --train-dtype {args.train_dtype}." - "Use --dtype float32." - ) - if args.ngpu == 0 and args.train_dtype in ("O0", "O1", "O2", "O3", "float16"): - raise ValueError( - f"--train-dtype {args.train_dtype} does not support the CPU backend." - ) - - from espnet.utils.dynamic_import import dynamic_import - - if args.model_module is None: - model_module = "espnet.nets." + args.backend + "_backend.e2e_mt:E2E" - else: - model_module = args.model_module - model_class = dynamic_import(model_module) - model_class.add_arguments(parser) - - args = parser.parse_args(cmd_args) - args.model_module = model_module - if "chainer_backend" in args.model_module: - args.backend = "chainer" - if "pytorch_backend" in args.model_module: - args.backend = "pytorch" - - # add version info in args - args.version = __version__ - - # logging info - if args.verbose > 0: - logging.basicConfig( - level=logging.INFO, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", - ) - else: - logging.basicConfig( - level=logging.WARN, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", - ) - logging.warning("Skip DEBUG/INFO messages") - - # If --ngpu is not given, - # 1. if CUDA_VISIBLE_DEVICES is set, all visible devices - # 2. if nvidia-smi exists, use all devices - # 3. else ngpu=0 - if args.ngpu is None: - cvd = os.environ.get("CUDA_VISIBLE_DEVICES") - if cvd is not None: - ngpu = len(cvd.split(",")) - else: - logging.warning("CUDA_VISIBLE_DEVICES is not set.") - try: - p = subprocess.run( - ["nvidia-smi", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE - ) - except (subprocess.CalledProcessError, FileNotFoundError): - ngpu = 0 - else: - ngpu = len(p.stderr.decode().split("\n")) - 1 - args.ngpu = ngpu - else: - if is_torch_1_2_plus and args.ngpu != 1: - logging.debug( - "There are some bugs with multi-GPU processing in PyTorch 1.2+" - + " (see https://github.com/pytorch/pytorch/issues/21108)" - ) - ngpu = args.ngpu - logging.info(f"ngpu: {ngpu}") - - # display PYTHONPATH - logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)")) - - # set random seed - logging.info("random seed = %d" % args.seed) - random.seed(args.seed) - np.random.seed(args.seed) - - # load dictionary for debug log - if args.dict is not None: - with open(args.dict, "rb") as f: - dictionary = f.readlines() - char_list = [entry.decode("utf-8").split(" ")[0] for entry in dictionary] - char_list.insert(0, "") - char_list.append("") - args.char_list = char_list - else: - args.char_list = None - - # train - logging.info("backend = " + args.backend) - - if args.backend == "pytorch": - from espnet.mt.pytorch_backend.mt import train - - train(args) - else: - raise ValueError("Only pytorch are supported.") - - -if __name__ == "__main__": - main(sys.argv[1:]) diff --git a/spaces/segments-tobias/conex/espnet2/iterators/abs_iter_factory.py b/spaces/segments-tobias/conex/espnet2/iterators/abs_iter_factory.py deleted file mode 100644 index 36e4dd2c52133fdabf02c94b268a80f6bb7dfd9a..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet2/iterators/abs_iter_factory.py +++ /dev/null @@ -1,9 +0,0 @@ -from abc import ABC -from abc import abstractmethod -from typing import Iterator - - -class AbsIterFactory(ABC): - @abstractmethod - def build_iter(self, epoch: int, shuffle: bool = None) -> Iterator: - raise NotImplementedError diff --git a/spaces/shi-labs/OneFormer/oneformer/modeling/pixel_decoder/__init__.py b/spaces/shi-labs/OneFormer/oneformer/modeling/pixel_decoder/__init__.py deleted file mode 100644 index 9020c2df23e2af280b7bb168b996ae9eaf312eb8..0000000000000000000000000000000000000000 --- a/spaces/shi-labs/OneFormer/oneformer/modeling/pixel_decoder/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. diff --git a/spaces/shibing624/ChatPDF/Dockerfile b/spaces/shibing624/ChatPDF/Dockerfile deleted file mode 100644 index 335c2dba28ba8c365de9306858462a59dea25f28..0000000000000000000000000000000000000000 --- a/spaces/shibing624/ChatPDF/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM python:3.9 as builder -RUN apt-get update && apt-get install -y build-essential -COPY requirements.txt . -COPY requirements_advanced.txt . -RUN pip install --user -r requirements.txt -# RUN pip install --user -r requirements_advanced.txt - -FROM python:3.9 -MAINTAINER iskoldt -COPY --from=builder /root/.local /root/.local -ENV PATH=/root/.local/bin:$PATH -COPY . /app -WORKDIR /app -ENV dockerrun yes -CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"] diff --git a/spaces/shikunl/prismer/app_vqa.py b/spaces/shikunl/prismer/app_vqa.py deleted file mode 100644 index 9c4c2334d5050e59f78d3064aaf30fbfe76f8c36..0000000000000000000000000000000000000000 --- a/spaces/shikunl/prismer/app_vqa.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import os -import pathlib -import gradio as gr - -from prismer_model import Model - - -def create_demo() -> gr.Blocks: - model = Model() - with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - image = gr.Image(label='Input', type='filepath') - model_name = gr.Dropdown(label='Model', choices=['Prismer-Base', 'Prismer-Large'], value='Prismer-Base') - question = gr.Text(label='Question') - run_button = gr.Button('Run') - with gr.Column(scale=1.5): - answer = gr.Text(label='Model Prediction') - with gr.Row(): - depth = gr.Image(label='Depth') - edge = gr.Image(label='Edge') - normals = gr.Image(label='Normals') - with gr.Row(): - segmentation = gr.Image(label='Segmentation') - object_detection = gr.Image(label='Object Detection') - ocr = gr.Image(label='OCR Detection') - - inputs = [image, model_name, question] - outputs = [answer, depth, edge, normals, segmentation, object_detection, ocr] - - paths = sorted(pathlib.Path('prismer/images').glob('*')) - ex_questions = ['What is the man on the left doing?', - 'What is this person doing?', - 'How many cows are in this image?', - 'What is the type of animal in this image?', - 'What toy is it?'] - examples = [[path.as_posix(), 'Prismer-Base', ex_questions[i]] for i, path in enumerate(paths)] - gr.Examples(examples=examples, - inputs=inputs, - outputs=outputs, - fn=model.run_vqa, - cache_examples=os.getenv('SYSTEM') == 'spaces') - - run_button.click(fn=model.run_vqa, inputs=inputs, outputs=outputs) - return demo - - -if __name__ == '__main__': - demo = create_demo() - demo.queue().launch() diff --git a/spaces/shiyi11/QQsign/README.md b/spaces/shiyi11/QQsign/README.md deleted file mode 100644 index 3042be806844c4b6d92719e8afaa17d09c970d46..0000000000000000000000000000000000000000 --- a/spaces/shiyi11/QQsign/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: QQsign -emoji: 🦀 -colorFrom: blue -colorTo: purple -sdk: docker -pinned: false -license: mit -duplicated_from: CikeyQI/QQsign ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/sidharthism/fashion-eye/models/stylegan/__init__.py b/spaces/sidharthism/fashion-eye/models/stylegan/__init__.py deleted file mode 100644 index 6edf9b7e860d2b45ed1ccf40223c6fac0b273ab7..0000000000000000000000000000000000000000 --- a/spaces/sidharthism/fashion-eye/models/stylegan/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020 Erik Härkönen. All rights reserved. -# This file is licensed to you under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software distributed under -# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS -# OF ANY KIND, either express or implied. See the License for the specific language -# governing permissions and limitations under the License. - -from pathlib import Path -import sys - -#module_path = Path(__file__).parent / 'pytorch_biggan' -#sys.path.append(str(module_path.resolve())) - -from .model import StyleGAN_G, NoiseLayer \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Does Genshin Impact allow background downloading? A guide for PC and mobile players.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Does Genshin Impact allow background downloading? A guide for PC and mobile players.md deleted file mode 100644 index e6d8b6ecba9dd20e9987d8686d4794245e43f6c8..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Does Genshin Impact allow background downloading? A guide for PC and mobile players.md +++ /dev/null @@ -1,99 +0,0 @@ -
        -

        Does Genshin Impact Download in the Background?

        -

        Genshin Impact is a popular open-world action role-playing game that has attracted millions of players since its release in September 2020. The game features a vast fantasy world with beautiful graphics, engaging gameplay, and a rich story. However, downloading Genshin Impact can be a challenge for some players, as it requires a large amount of storage space and internet bandwidth. Depending on your device and connection speed, it can take several hours or even days to complete the download.

        -

        does genshin impact download in the background


        Download ✒ ✒ ✒ https://ssurll.com/2uNWM5



        -

        Some players may wonder if they can download Genshin Impact in the background while doing other things on their devices, such as browsing the web, watching videos, or playing other games. In this article, we will answer this question and provide some tips on how to download Genshin Impact faster and safer on different platforms.

        -

        How to Download Genshin Impact on Different Platforms

        -

        PC

        -

        To download Genshin Impact on PC, you need to visit the official website (https://genshin.mihoyo.com/en/download) and click on "Windows" to download the launcher. After installing the launcher, you need to run it and log in with your account. Then you can click on "Get Game" to start downloading Genshin Impact.

        -

        If you want to pause or resume the download, you can click on "Pause" or "Resume" at any time. You can also cancel the download by clicking on "Cancel". However, this will delete all the downloaded files and you will have to start over.

        -

        How to download Genshin Impact wallpapers for PC
        -Genshin Impact download size and system requirements
        -Can you play Genshin Impact while downloading updates
        -Genshin Impact download speed slow or stuck fix
        -How to pause and resume Genshin Impact download
        -Best Genshin Impact wallpapers for iPhone and Android
        -Genshin Impact download error code 9908 solution
        -How to change Genshin Impact download server or region
        -Genshin Impact download link for Windows, Mac, PS4, PS5, iOS, and Android
        -How to download Genshin Impact on PC without launcher
        -How to optimize Genshin Impact download settings
        -Genshin Impact download time and installation guide
        -How to download Genshin Impact beta version or test server
        -How to fix Genshin Impact download failed or corrupted issue
        -How to download Genshin Impact mods or custom skins
        -How to increase Genshin Impact download speed using VPN
        -How to download Genshin Impact on Steam or Epic Games Store
        -How to download Genshin Impact characters or weapons wallpapers
        -How to check Genshin Impact download progress or status
        -How to download Genshin Impact on multiple devices or platforms
        -How to download Genshin Impact official soundtrack or music
        -How to fix Genshin Impact download verification failed error
        -How to download Genshin Impact on laptop or low-end PC
        -How to download Genshin Impact promotional or festive wallpapers
        -How to download Genshin Impact fan art or concept art wallpapers
        -How to stop or cancel Genshin Impact download or update
        -How to download Genshin Impact voice over or language pack
        -How to fix Genshin Impact download network error or timeout issue
        -How to download Genshin Impact manga or comics online
        -How to download Genshin Impact on Xbox One or Xbox Series X/S
        -How to download Genshin Impact on Nintendo Switch or Switch Lite
        -How to fix Genshin Impact download not starting or initializing problem
        -How to download Genshin Impact live wallpaper or animated wallpaper
        -How to fix Genshin Impact download disk space insufficient error
        -How to download Genshin Impact 4K or HD wallpapers for desktop or laptop

        -

        Mobile

        -

        To download Genshin Impact on mobile devices, you need to visit the App Store or Google Play Store and search for "Genshin Impact". Then you can tap on "Get" or "Install" to start downloading the game.

        -

        If you want to pause or resume the download, you can tap on "Pause" or "Resume" at any time. You can also cancel the download by tapping on "Cancel". However, this will delete all the downloaded files and you will have to start over.

        -

        PlayStation 4/5

        -

        To download Genshin Impact on PlayStation 4/5, you need to visit the PlayStation Store and search for "Genshin Impact". Then you can select "Download" to start downloading the game.

        -

        If you want to pause or resume the download, you can go to the "Notifications" menu and select "Downloads". Then you can press the "X" button to pause or resume the download. You can also cancel the download by pressing the "Options" button and selecting "Cancel and Delete". However, this will delete all the downloaded files and you will have to start over.

        -

        How to Download Genshin Impact in the Background

        -

        PC

        -

        Genshin Impact can be downloaded in the background on PC by minimizing the launcher window. This will allow you to do other tasks on your PC while the game is downloading. However, you should be aware that downloading Genshin Impact in the background may slow down your PC performance and internet speed, depending on your system specifications and network conditions.

        -

        To check the download progress, you can hover your mouse over the launcher icon on the taskbar. You will see a percentage and a speed indicator. You can also restore the launcher window by clicking on the icon.

        -

        Mobile

        -

        Genshin Impact can be downloaded in the background on mobile devices by switching to another app. This will allow you to do other things on your phone or tablet while the game is downloading. However, you should be aware that downloading Genshin Impact in the background may drain your battery life and use up your mobile data, depending on your device settings and network conditions.

        -

        To check the download progress, you can swipe down from the top of your screen to access the notification panel. You will see a percentage and a speed indicator. You can also tap on the notification to resume or pause the download.

        -

        PlayStation 4/5

        -

        Genshin Impact can be downloaded in the background on PlayStation 4/5 by putting the console in rest mode. This will allow you to save power and reduce noise while the game is downloading. However, you should be aware that downloading Genshin Impact in the background may take longer than usual, depending on your console settings and network conditions.

        -

        To check the download progress, you can turn on your console and go to the "Notifications" menu. You will see a percentage and a speed indicator. You can also resume or pause the download from there.

        -

        Benefits of Downloading Genshin Impact in the Background

        -

        Downloading Genshin Impact in the background has some benefits that may appeal to some players, such as:

        -
          -
        • Saving time: You don't have to wait for the download to finish before doing other things on your device or console. You can multitask and enjoy other activities while Genshin Impact is downloading in the background.
        • -
        • Avoiding interruptions: You don't have to worry about losing your download progress if you have to close or restart your device or console for some reason. You can resume your download from where you left off when you come back.
        • -
        • Reducing bandwidth usage: You don't have to compete with other users or devices for internet speed if you download Genshin Impact in the background. You can reduce your bandwidth usage and avoid lagging or buffering issues when using other online services.
        • -
        -

        Drawbacks of Downloading Genshin Impact in the Background

        -

        Downloading Genshin Impact in the background also has some drawbacks that may discourage some players, such as:

        -
          -
        • Slowing down other tasks: Downloading Genshin Impact in the background may affect your device or console performance and internet speed when doing other tasks. You may experience lagging, freezing, crashing, or loading issues when using other apps or games.
        • -
        • Draining battery life: Downloading Genshin Impact in the background may consume more power than usual when your device or console is running. You may need to charge your device or console more often or plug it into a power source when downloading Genshin Impact in the background.
        • -
        • Risking data loss: Downloading Genshin Impact in the background may expose your device or console to potential data loss if something goes wrong during the download process. You may encounter errors, glitches, corruption, or deletion of files when downloading Genshin Impact in the background.
        • -
        -

        Tips for Downloading Genshin Impact Faster and Safer

        -

        If you want to download Genshin Impact faster and safer, whether in the background or not, you can follow some of these tips:

        -
          -
        • Use a wired connection: If possible, use a wired connection instead of a wireless one when downloading Genshin Impact. This can improve your download speed and stability, as well as reduce the risk of interference or signal loss.
        • -
        • Close other apps: If you have other apps or games running on your device or console, close them before or during the download process. This can free up some memory and CPU resources, as well as reduce the bandwidth usage.
        • -
        • Check for errors: If you encounter any errors or problems during the download process, such as slow speed, stuck progress, or failed verification, check for possible causes and solutions. You can visit the official website (https://genshin.mihoyo.com/en/news) or the official forums (https://forums.mihoyo.com/genshin) for more information and support.
        • -
        • Verify files: After the download is complete, verify the integrity of the game files to make sure they are not corrupted or missing. You can do this by clicking on "Verify File Integrity" on the launcher window for PC, or by selecting "Check for Update" on the game icon for PlayStation 4/5.
        • -
        -

        Conclusion

        -

        Genshin Impact is a great game that can provide hours of fun and adventure. However, downloading it can be a hassle for some players, especially if they have limited storage space or internet bandwidth. Fortunately, Genshin Impact can be downloaded in the background on different platforms, allowing players to do other things while waiting for the game to finish downloading.

        -

        However, downloading Genshin Impact in the background also has some drawbacks, such as slowing down other tasks, draining battery life, and risking data loss. Therefore, players should weigh the pros and cons of downloading Genshin Impact in the background before deciding whether to do so or not.

        -

        We hope this article has answered your question of whether Genshin Impact can be downloaded in the background, and provided some tips on how to download it faster and safer. If you have any thoughts or questions about this topic, feel free to share them in the comments section below. Happy gaming!

        -

        FAQs

        -

        How big is Genshin Impact?

        -

        Genshin Impact is about 30 GB on PC, 9 GB on mobile devices, and 12 GB on PlayStation 4/5. However, these sizes may vary depending on updates and patches.

        -

        How long does it take to download Genshin Impact?

        -

        The download time of Genshin Impact depends on your device and connection speed. On average, it may take about 3 hours on PC, 1 hour on mobile devices, and 2 hours on PlayStation 4/5. However, these times may vary depending on network conditions and download settings.

        -

        Can I play Genshin Impact offline?

        -

        No, you cannot play Genshin Impact offline. You need an internet connection to access the game servers and play the game. You also need an internet connection to update the game and receive new content.

        -

        Can I play Genshin Impact with friends?

        -

        Yes, you can play Genshin Impact with friends. You can invite up to three friends to join your world and explore together. You can also join other players' worlds and participate in co-op events and quests.

        -

        Can I transfer my Genshin Impact progress between platforms?

        -

        Yes, you can transfer your Genshin Impact progress between platforms. You just need to log in with the same account on different devices or consoles. However, you cannot transfer your progress between different servers (such as Asia, Europe, America, etc.).

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Free Game for PC - The Latest and Greatest Releases.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Free Game for PC - The Latest and Greatest Releases.md deleted file mode 100644 index 952ac4ca1b5afc318d064807c4f5840dea7e4f28..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Free Game for PC - The Latest and Greatest Releases.md +++ /dev/null @@ -1,101 +0,0 @@ -
        -

        Download Free Game for PC: How to Find and Play the Best Games Online

        -

        Are you looking for some fun and exciting games to play on your PC without spending a dime? If so, you are in luck! There are plenty of free games for PC that you can download and enjoy online. Whether you are into racing, shooting, building, or exploring, there is a game for you. In this article, we will show you what are free games for PC, why you should play them, and how to download them. We will also give you some examples of the top free games for PC that you can try right now. Let's get started!

        -

        Introduction

        -

        What are free games for PC?

        -

        Free games for PC are games that you can play on your computer without paying any money. They are usually available online, either on the official website of the game developer or publisher, or on a platform like Steam, Epic Games Store, or Microsoft Store. Some free games for PC are completely free, meaning that you can access all the features and content without any restrictions. Others are free-to-play, meaning that you can play the basic game for free, but you may need to pay for some optional items or upgrades.

        -

        download free game for pc


        DOWNLOADhttps://ssurll.com/2uNZt7



        -

        Why play free games for PC?

        -

        There are many reasons why you might want to play free games for PC. Here are some of them:

        -
          -
        • You can save money. Playing free games for PC means that you don't have to spend any money on buying or renting games. You can use your money for other things, like buying a better PC or upgrading your hardware.
        • -
        • You can have fun. Playing free games for PC can be very entertaining and enjoyable. You can find games that suit your preferences and interests, whether you like action, adventure, strategy, puzzle, or anything else. You can also play with your friends or other players online, and have a social experience.
        • -
        • You can learn new skills. Playing free games for PC can help you improve your cognitive abilities, such as memory, concentration, problem-solving, creativity, and more. You can also learn new things about different topics, such as history, science, culture, and more.
        • -
        -

        How to download free games for PC?

        -

        Downloading free games for PC is not difficult. You just need to follow these steps:

        -
          -
        1. Find a game that you want to play. You can search online for the best free games for PC, or browse through the categories and genres on different platforms.
        2. -
        3. Check the system requirements of the game. Before you download a game, make sure that your PC meets the minimum or recommended specifications of the game. This will ensure that the game runs smoothly and without any issues.
        4. -
        5. Download the game. Depending on where you get the game from, you may need to create an account or sign in with an existing one. Then, you can click on the download button and follow the instructions.
        6. -
        7. Install and launch the game. Once the download is complete, you may need to install the game on your PC by running the setup file or following the instructions. Then, you can launch the game and start playing.
        8. -
        -

        Top Free Games for PC

        -

        Now that you know how to download free games for PC, let's take a look at some of the top free games for PC that you can play right now. We have selected three popular and high-quality games that cover different genres and styles.

        -

        Asphalt 9: Legends

        -

        Features

        -

        If you love racing games, then Asphalt 9: Legends is a must-try. This game lets you drive some of the most amazing and exotic cars in the world, such as Ferrari, Lamborghini, Porsche, and more. You can customize your cars, race against other players online, and perform stunning stunts and drifts. The game features stunning graphics, realistic physics, and a variety of tracks and modes.

        -

        How to download

        -

        You can download Asphalt 9: Legends for free from the Microsoft Store. You will need Windows 10 or higher and at least 4 GB of RAM to play the game. The game also supports Xbox Live integration, so you can earn achievements and compete with your friends.

        -

        Roblox

        -

        Features

        -

        If you are into building and creating games, then Roblox is the perfect game for you. Roblox is a platform where you can play millions of games created by other users, or create your own games using the Roblox Studio. You can also chat and socialize with other players, join groups, and customize your avatar. Roblox is a game for all ages and interests, as you can find games in genres like adventure, role-playing, simulation, horror, and more.

        -

        How to download

        -

        You can download Roblox for free from the official website. You will need to create an account or sign in with an existing one to play the game. You can also download the Roblox Studio from the same website if you want to create your own games. Roblox is compatible with Windows 7 or higher and requires at least 1 GB of RAM.

        -

        download free game for pc full version
        -download free game for pc windows 10
        -download free game for pc offline
        -download free game for pc without internet
        -download free game for pc action
        -download free game for pc racing
        -download free game for pc adventure
        -download free game for pc horror
        -download free game for pc shooting
        -download free game for pc simulation
        -download free game for pc strategy
        -download free game for pc puzzle
        -download free game for pc rpg
        -download free game for pc sports
        -download free game for pc hidden object
        -download free game for pc gta 5
        -download free game for pc minecraft
        -download free game for pc pubg
        -download free game for pc fortnite
        -download free game for pc call of duty
        -download free game for pc among us
        -download free game for pc roblox
        -download free game for pc spider man
        -download free game for pc batman
        -download free game for pc harry potter
        -download free game for pc fifa 21
        -download free game for pc pes 2021
        -download free game for pc nba 2k21
        -download free game for pc wwe 2k21
        -download free game for pc nfs most wanted
        -download free game for pc gta san andreas
        -download free game for pc gta vice city
        -download free game for pc assassin's creed
        -download free game for pc far cry 5
        -download free game for pc resident evil 2
        -download free game for pc doom eternal
        -download free game for pc cyberpunk 2077
        -download free game for pc red dead redemption 2
        -download free game for pc god of war 4
        -download free game for pc halo infinite
        -download free game for pc age of empires 4
        -download free game for pc civilization 6
        -download free game for pc sims 4
        -download free game for pc zoo tycoon 2
        -download free game for pc roller coaster tycoon 3
        -download free game for pc plants vs zombies 2
        -download free game for pc candy crush saga
        -download free game for pc angry birds 2
        -download free game for pc subway surfers

        -

        Fortnite

        -

        Features

        -

        If you are a fan of shooting and survival games, then Fortnite is a game that you should not miss. Fortnite is a game that combines building, crafting, and combat in a colorful and cartoonish world. You can play solo or team up with your friends or other players online, and fight against zombies or other players in different modes. The most popular mode is Battle Royale, where 100 players compete to be the last one standing. Fortnite is constantly updated with new content, events, and features.

        -

        How to download

        -

        You can download Fortnite for free from the Epic Games Store. You will need to create an account or sign in with an existing one to play the game. You will also need Windows 7 or higher and at least 4 GB of RAM to run the game. Fortnite also supports cross-play with other platforms, such as PlayStation, Xbox, Nintendo Switch, and mobile devices.

        -

        Conclusion

        -

        As you can see, there are many free games for PC that you can download and play online. Whether you want to race, build, shoot, or explore, there is a game for you. All you need is a PC that meets the system requirements of the game, an internet connection, and a sense of adventure. So what are you waiting for? Download your favorite free game for PC today and have fun!

        -

        FAQs

        -
          -
        • What are some other free games for PC that I can play?
          Some other free games for PC that you can play are Apex Legends, Valorant, League of Legends, Genshin Impact, Warframe, Among Us, and more.
        • -
        • Are free games for PC safe to download?
          Most free games for PC are safe to download as long as you get them from reputable sources, such as the official websites or platforms of the game developers or publishers. However, you should always be careful of malware or viruses that may come with some downloads. You should also read the terms and conditions and privacy policies of the games before downloading them.
        • -
        • Do I need an internet connection to play free games for PC?
          Some free games for PC require an internet connection to play online with other players or access some features or content. However, some free games for PC also have offline modes that allow you to play without an internet connection.
        • -
        • Can I play free games for PC on my laptop?
          Yes, you can play free games for PC on your laptop as long as it meets the system requirements of the game. However, some laptops may not have the best performance or graphics quality compared to desktop PCs.
        • -
        • Can I use a controller to play free games for PC?
          Some free games for PC support controller input, while others only support keyboard and mouse input. You should check the settings or options of the game to see if it supports controller input. You may also need to connect your controller to your PC using a USB cable or a wireless adapter.
        • -
        - [^1^

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy Ya Tarim by Ai Khodijah - Download Now in MP3 Format.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy Ya Tarim by Ai Khodijah - Download Now in MP3 Format.md deleted file mode 100644 index 21b932b0c467ec29fed01934db616a350d66e32e..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy Ya Tarim by Ai Khodijah - Download Now in MP3 Format.md +++ /dev/null @@ -1,151 +0,0 @@ - -

        Download Ai Khodijah Ya Tarim: A Guide for Music Lovers

        -

        If you are looking for a beautiful and inspiring song to enrich your soul, you might want to check out Ai Khodijah Ya Tarim. This is a cover version of a popular qasidah (a form of Islamic poetry) by a talented Indonesian singer named Ai Khodijah. In this article, we will tell you everything you need to know about this song, including its meaning, history, and how to download it legally and safely. Whether you are a fan of sholawat (a type of Islamic devotional music) or just curious about new musical genres, you will find something interesting and useful in this guide.

        -

        Who is Ai Khodijah and what is her music style?

        -

        Ai Khodijah is a young female singer from Indonesia who specializes in sholawat music. She was born in Jakarta in 1997 and started singing at the age of five. She joined a vocal group called El-Mighwar in 2015 and gained popularity through YouTube. She has released several albums and singles, such as Sholawat Nabi, Sholawat Merdu, Sholawat Terbaru, and Sholawat Penyejuk Hati. She is also known for her collaborations with other sholawat singers, such as Nissa Sabyan, Mohamed Tarek, Mohamed Youssef, Azmi Alkatiri, and Rizal Vertizone.

        -

        download ai khodijah ya tarim


        Download Filehttps://ssurll.com/2uNXvS



        -

        Sholawat is a genre of music that expresses love and praise for Allah (God) and His Messenger Muhammad (peace be upon him). It is derived from the Arabic word salawat, which means \"blessings\". Sholawat music usually consists of reciting or singing verses from the Quran (the holy book of Islam), hadith (the sayings and deeds of Muhammad), or qasidah. Sholawat music can be performed in various languages, such as Arabic, Indonesian, Malay, Urdu, Turkish, Persian, or English. Sholawat music can have different styles, such as traditional, modern, pop, rock, jazz, or rap. Sholawat music is popular among Muslims around the world, especially in Indonesia, Malaysia, Egypt, Morocco, Turkey, India, Pakistan, Bangladesh, and Nigeria.

        -

        What is the meaning and history of Ya Tarim?

        -

        Ya Tarim is a song that praises the city of Tarim, which is located in the Hadhramaut region of Yemen. Tarim is known as the \"city of scholars\" because it has produced many eminent Islamic scholars, such as Imam al-Haddad, Imam al-Shafi'i, Imam al-Ghazali, and Imam al-Busiri. Tarim is also a center of Sufism, a mystical branch of Islam that emphasizes the inner and personal relationship with Allah. Tarim is considered a sacred and blessed place by many Muslims, especially those who follow the Ba 'Alawiyya tariqa (a Sufi order that originated from Tarim).

        -

        The lyrics of Ya Tarim are in Arabic and Indonesian, and they express the longing and admiration for Tarim and its people. Here are the lyrics and their translation:

        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        ArabicIndonesianEnglish
        يا طريم يا طريم يا طريمYa Tarim ya Tarim ya TarimO Tarim, O Tarim, O Tarim
        يا مدينة العلماء والأولياءYa madinatul ulama wal auliyaO city of scholars and saints
        يا مدينة الحب والسلام والأنسYa madinatul hubbi was salami wal unsO city of love, peace, and intimacy
        يا مدينة النور والهدى والرحمةYa madinatun nur wal huda war rahmahO city of light, guidance, and mercy
        يا طريم يا طريم يا طريمYa Tarim ya Tarim ya TarimO Tarim, O Tarim, O Tarim
        كم أشتاق إليك يا طريم يا طريمKam asytaqu ilaika ya Tarim ya TarimHow I miss you, O Tarim, O Tarim
        كم أحبك يا طريم يا طريم يا طريمKam uhibbuka ya Tarim ya Tarim ya TarimHow I love you, O Tarim, O Tarim, O Tarim
        كم أنت جميل يا طريم يا طريم يا طريمKam anta jamil ya Tarim ya Tarim ya TarimHow beautiful you are, O Tarim, O Tarim, O Tarim
        ...
        The full lyrics can be found here.
        -

        The author and original singer of Ya Tarim is unknown, but it is believed that it was composed by one of the Ba 'Alawiyya Sufis who lived in or visited Tarim. The song has been sung by many sholawat singers over the years, such as Habib Syech bin Abdul Qodir Assegaf, Habib Ali al-Habsyi, Habib Umar bin Hafidz, Habib Anis bin Alwi al-Habsyi, and Habib Ja'far bin Umar al-Jufri. The most recent and popular version of Ya Tarim is by Ai Khodijah, who released it in 2020.

        -

        Download Ya Tarim by Ai Khodijah in Hi-Res quality
        -Stream and download Ya Tarim by Ai Khodijah on Qobuz
        -Download Mp3 Ai Khodijah - Ya Tarim gratis
        -Download lagu Ai Khodijah - Ya Tarim terbaru
        -Listen to Ya Tarim by Ai Khodijah online
        -Download Ya Tarim by Ai Khodijah on iTunes
        -Download and streaming Ya Tarim by Ai Khodijah in high quality
        -Download lagu Ai Khodijah - Ya Tarim original
        -Download Ya Tarim by Ai Khodijah for free
        -Download Mp3 Ai Khodijah - Ya Tarim full album
        -Download lagu Ai Khodijah - Ya Tarim mp3
        -Download Ya Tarim by Ai Khodijah on Spotify
        -Download and listen to Ya Tarim by Ai Khodijah offline
        -Download lagu Ai Khodijah - Ya Tarim lirik
        -Download Ya Tarim by Ai Khodijah on Amazon Music
        -Download Mp3 Ai Khodijah - Ya Tarim 320kbps
        -Download lagu Ai Khodijah - Ya Tarim cover
        -Download Ya Tarim by Ai Khodijah on YouTube Music
        -Download and share Ya Tarim by Ai Khodijah with friends
        -Download lagu Ai Khodijah - Ya Tarim karaoke

        -

        The message of Ya Tarim is to inspire Muslims to seek knowledge and spirituality from the legacy of Tarim and its scholars. It also encourages Muslims to love and respect each other, regardless of their differences in ethnicity, culture, or sect. Ya Tarim is a song that celebrates the diversity and unity of the Muslim ummah (community).

        -

        How to download Ai Khodijah Ya Tarim legally and safely?

        -

        If you want to download Ai Khodijah Ya Tarim legally and safely, you have several options. You can either buy the song from an official online store, such as iTunes, Spotify, Amazon Music, or Google Play Music, or you can download it from YouTube using a third-party tool or website. However, you should be aware of the pros and cons of each option and follow some tips to avoid any problems.

        -

        Buying the song from an official online store

        -

        The best way to download Ai Khodijah Ya Tarim legally and safely is to buy it from an official online store. This way, you will support the artist and the music industry, and you will get a high-quality and virus-free MP3 file. You will also have access to other features, such as lyrics, album art, playlists, and offline listening. However, this option might cost you some money, depending on the store and the region. You will also need to create an account and install an app on your device to use this option.

        -

        Steps to buy Ai Khodijah Ya Tarim from an official online store

        -
          -
        1. Choose an online store that suits your preferences and budget. Some of the most popular ones are iTunes, Spotify, Amazon Music, and Google Play Music.
        2. -
        3. Create an account on the online store if you don't have one already. You might need to provide some personal information, such as your name, email address, phone number, and payment method.
        4. -
        5. Search for Ai Khodijah Ya Tarim on the online store. You can use the search bar or browse through the categories and genres.
        6. -
        7. Select the song and click on the buy or download button. You might need to confirm your purchase and enter your password or PIN.
        8. -
        9. Wait for the song to be downloaded to your device. You can check the progress and status of the download on the app or the website.
        10. -
        11. Enjoy listening to Ai Khodijah Ya Tarim on your device. You can also transfer it to other devices or share it with your friends.
        12. -
        -

        Downloading the song from YouTube using a third-party tool or website

        -

        Another way to download Ai Khodijah Ya Tarim legally and safely is to use a third-party tool or website that allows you to convert YouTube videos into MP3 files. This way, you can get the song for free and without installing any app on your device. You can also choose the quality and format of the MP3 file according to your needs. However, this option might violate YouTube's terms of service and infringe on the artist's rights. You might also encounter some risks, such as malware, pop-ups, ads, or scams. You should be careful and selective when using this option.

        -

        Steps to download Ai Khodijah Ya Tarim from YouTube using a third-party tool or website

        -
          -
        1. Find a reliable and reputable third-party tool or website that can convert YouTube videos into MP3 files. Some of the most popular ones are ytmp3.cc, y2mate.com, flvto.biz, and mp3juices.cc.
        2. -
        3. Go to YouTube and search for Ai Khodijah Ya Tarim. You can use the search bar or browse through the channels and playlists.
        4. -
        5. Select the video that contains the song and copy its URL. You can do this by right-clicking on the video and choosing copy link address or by copying it from the address bar.
        6. -
        7. Paste the URL into the third-party tool or website and click on the convert or download button. You might need to choose the quality and format of the MP3 file before converting it.
        8. -
        9. Wait for the conversion process to finish. You can check the progress and status of the conversion on the tool or website.
        10. -
        11. Download the MP3 file to your device. You can do this by clicking on the download button or by right-clicking on the file and choosing save as.
        12. -
        13. Enjoy listening to Ai Khodijah Ya Tarim on your device. You can also transfer it to other devices or share it with your friends.
        14. -
        -

        Tips for downloading music from YouTube

        -
          -
        • Always use a trusted and secure third-party tool or website that has positive reviews and ratings from other users.
        • -
        • Avoid clicking on any suspicious links, pop-ups, ads, or downloads that might contain malware or viruses.
        • -
        • Use a VPN (virtual private network) or a proxy server to hide your IP address and location when downloading music from YouTube.
        • -
        • Delete any unwanted or unnecessary files that might take up space on your device or slow down its performance.
        • -
        • Respect the artist's rights and do not distribute or sell their music without their permission.
        • How to enjoy Ai Khodijah Ya Tarim offline and online? -

          Once you have downloaded Ai Khodijah Ya Tarim, you can enjoy listening to it offline and online. However, there are some differences between the two modes of listening that you should be aware of. Here are some of the benefits and drawbacks of each mode:

          -

          Listening to Ai Khodijah Ya Tarim offline

          -

          Listening to Ai Khodijah Ya Tarim offline means that you can play the song on your device without an internet connection. This can be convenient and cost-effective, especially if you have a limited or expensive data plan. You can also listen to the song anytime and anywhere, without worrying about buffering or interruptions. However, listening to the song offline also has some disadvantages, such as:

          -
            -
          • You might not be able to access the latest updates or versions of the song, such as new lyrics, translations, or remixes.
          • -
          • You might not be able to interact with other fans or followers of the song, such as leaving comments, likes, or shares.
          • -
          • You might not be able to discover new songs or artists that are related or similar to Ai Khodijah Ya Tarim.
          • -
          -

          Listening to Ai Khodijah Ya Tarim online

          -

          Listening to Ai Khodijah Ya Tarim online means that you can stream the song on your device using an internet connection. This can be exciting and enjoyable, especially if you want to stay updated and connected with the song and its community. You can also explore new songs or artists that are recommended or suggested by the platform or the algorithm. However, listening to the song online also has some drawbacks, such as:

          -
            -
          • You might need a stable and fast internet connection to stream the song smoothly and without delays.
          • -
          • You might consume a lot of data or bandwidth to stream the song, which can be expensive or limited depending on your plan.
          • -
          • You might encounter some ads or pop-ups that might interrupt or distract you from the song.
          • -
          -

          Recommendations for other songs and artists

          -

          If you enjoy listening to Ai Khodijah Ya Tarim, you might also like some of these other songs and artists that are similar or related to it:

          -
            -
          • Ya Habibal Qolbi by Nissa Sabyan: This is another popular sholawat song by a famous Indonesian singer who is known for her beautiful voice and style. The song praises Muhammad (peace be upon him) as the beloved of Allah and the heart of the believers.
          • -
          • Ya Nabi Salam Alayka by Maher Zain: This is a modern sholawat song by a renowned Swedish-Lebanese singer who is known for his inspirational and uplifting music. The song greets and salutes Muhammad (peace be upon him) as the messenger of peace and mercy.
          • -
          • Ya Badrotim by Rijal Vertizone: This is a traditional sholawat song by an Indonesian group who is known for their authentic and harmonious performance. The song describes Muhammad (peace be upon him) as the full moon that illuminates the darkness.
          • -
          -

          Conclusion

          -

          In conclusion, Ai Khodijah Ya Tarim is a wonderful and meaningful song that you should listen to if you love sholawat music or want to learn more about it. The song has a rich history and message that relates to Tarim, a city of scholars and saints in Yemen. You can download the song legally and safely from an official online store or from YouTube using a third-party tool or website. You can also enjoy the song offline and online, depending on your preferences and circumstances. We hope that this guide has helped you understand and appreciate Ai Khodijah Ya Tarim better. If you have any questions or feedback, please feel free to share them with us in the comments section below.

          -

          Frequently Asked Questions

          -
            -
          1. What is the difference between qasidah and sholawat?
          2. -

            Qasidah is a general term for Islamic poetry that can have various themes, such as praise, love, advice, or lament. Sholawat is a specific type of qasidah that focuses on praising Allah and His Messenger Muhammad (peace be upon him).

            -
          3. What is the difference between Sufism and Sunni Islam?
          4. -

            Sufism is a branch of Islam that emphasizes the mystical and personal relationship with Allah. Sunni Islam is a branch of Islam that follows the teachings and traditions of Muhammad (peace be upon him) and his companions. Sufism and Sunni Islam are not mutually exclusive, and many Muslims follow both aspects of Islam.

            -
          5. What is the difference between Tarim and Hadhramaut?
          6. -

            Tarim is a city in the Hadhramaut region of Yemen. Hadhramaut is a historical and geographical region that covers parts of Yemen, Saudi Arabia, and Oman. Hadhramaut is known for its natural beauty, cultural diversity, and Islamic heritage.

            -
          7. What is the difference between MP3 and MP4?
          8. -

            MP3 and MP4 are two types of file formats that are used to store audio and video data. MP3 stands for MPEG-1 Audio Layer 3, and it is a compressed audio format that reduces the file size without losing much quality. MP4 stands for MPEG-4 Part 14, and it is a multimedia container format that can store audio, video, subtitles, images, and other data.

            -
          9. What is the difference between VPN and proxy?
          10. -

            VPN and proxy are two types of services that can help you access the internet anonymously and securely. VPN stands for virtual private network, and it creates a secure and encrypted connection between your device and a remote server. Proxy stands for proxy server, and it acts as an intermediary between your device and the internet. VPN and proxy can both hide your IP address and location, but VPN offers more features and protection than proxy.

            197e85843d
            -
            -
            \ No newline at end of file diff --git a/spaces/skf15963/summary/fengshen/examples/clue1.1/data_preprocessing/wsc_preprocessing.py b/spaces/skf15963/summary/fengshen/examples/clue1.1/data_preprocessing/wsc_preprocessing.py deleted file mode 100644 index c9b5ec0a7625ac870e71e77cf8af6256e0a1609c..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/examples/clue1.1/data_preprocessing/wsc_preprocessing.py +++ /dev/null @@ -1,81 +0,0 @@ -import json -from tqdm import tqdm -import os -import argparse - -label2desc={'true':'是','false':'不是'} - - -def load_data(file_path,is_training=False): - with open(file_path, 'r', encoding='utf8') as f: - lines = f.readlines() - result=[] - for line in tqdm(lines): - data = json.loads(line) - target = data['target'] - text=list(data['text']) - if target['span2_index'] LanguagePairDataset: - tokens = [[i] * l for i, l in enumerate(lengths)] - return LanguagePairDataset(ListDataset(tokens), lengths, mock_dict()) - - -def sample(id: int, length: int): - return {"id": id, "source": [id] * length, "target": None} - - -class TestDataset(unittest.TestCase): - def setUp(self): - logging.disable(logging.CRITICAL) - - def tearDown(self): - logging.disable(logging.NOTSET) - - def test_round_robin_zip_datasets(self): - long_dataset = lang_pair_dataset([10, 9, 8, 11]) - short_dataset = lang_pair_dataset([11, 9]) - - dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset}) - # Dataset is now sorted by sentence length - dataset.ordered_indices() - assert dataset.longest_dataset is long_dataset - self.assertEqual(dict(dataset[0]), {"a": sample(2, 8), "b": sample(1, 9)}) - # The item 2 of dataset 'a' is with item (2 % 2 = 0) of dataset 'b' - self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(1, 9)}) - - def test_round_robin_zip_datasets_filtered(self): - long_dataset = lang_pair_dataset([10, 20, 8, 11, 1000, 7, 12]) - short_dataset = lang_pair_dataset([11, 20, 9, 1000]) - - dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset}) - # Dataset is now sorted by sentence length - idx = dataset.ordered_indices() - idx, _ = dataset.filter_indices_by_size(idx, {"a": 19, "b": 900}) - self.assertEqual(list(idx), [0, 1, 2, 3, 4]) - self.assertEqual(dict(dataset[0]), {"a": sample(5, 7), "b": sample(2, 9)}) - self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(1, 20)}) - self.assertEqual(dict(dataset[4]), {"a": sample(6, 12), "b": sample(0, 11)}) - - def test_round_robin_zip_datasets_filtered_with_tuple(self): - long_dataset = lang_pair_dataset([10, 20, 8, 11, 1000, 7, 12]) - short_dataset = lang_pair_dataset([11, 20, 9, 1000]) - - dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset}) - # Dataset is now sorted by sentence length - idx = dataset.ordered_indices() - idx, _ = dataset.filter_indices_by_size(idx, 19) - self.assertEqual(list(idx), [0, 1, 2, 3, 4]) - self.assertEqual(dict(dataset[0]), {"a": sample(5, 7), "b": sample(2, 9)}) - self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(2, 9)}) - self.assertEqual(dict(dataset[4]), {"a": sample(6, 12), "b": sample(2, 9)}) diff --git a/spaces/sriramelango/Social_Classification_Public/models/__init__.py b/spaces/sriramelango/Social_Classification_Public/models/__init__.py deleted file mode 100644 index 5ca74d790a95a2b14d3fbb0cf9f0a9959416d305..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/models/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .ofa import OFAModel, ofa_base_architecture, ofa_large_architecture, ofa_huge_architecture \ No newline at end of file diff --git a/spaces/stevechong/cny-goodluck-detector/app.py b/spaces/stevechong/cny-goodluck-detector/app.py deleted file mode 100644 index a3e5af88a440cbc38da181c0624d91521719b503..0000000000000000000000000000000000000000 --- a/spaces/stevechong/cny-goodluck-detector/app.py +++ /dev/null @@ -1,153 +0,0 @@ -import cv2 -import numpy as np -from PIL import Image -import streamlit as st -import tensorflow as tf -from tensorflow.keras.models import load_model - -# most of this code has been obtained from Datature's prediction script -# https://github.com/datature/resources/blob/main/scripts/bounding_box/prediction.py - -st.set_option('deprecation.showfileUploaderEncoding', False) - -@st.cache(allow_output_mutation=True) -def load_model(): - return tf.saved_model.load('./saved_model') - -def load_label_map(label_map_path): - """ - Reads label map in the format of .pbtxt and parse into dictionary - Args: - label_map_path: the file path to the label_map - Returns: - dictionary with the format of {label_index: {'id': label_index, 'name': label_name}} - """ - label_map = {} - - with open(label_map_path, "r") as label_file: - for line in label_file: - if "id" in line: - label_index = int(line.split(":")[-1]) - label_name = next(label_file).split(":")[-1].strip().strip('"') - label_map[label_index] = {"id": label_index, "name": label_name} - return label_map - -def predict_class(image, model): - image = tf.cast(image, tf.float32) - image = tf.image.resize(image, [150, 150]) - image = np.expand_dims(image, axis = 0) - return model.predict(image) - -def plot_boxes_on_img(color_map, classes, bboxes, image_origi, origi_shape): - for idx, each_bbox in enumerate(bboxes): - color = color_map[classes[idx]] - - ## Draw bounding box - cv2.rectangle( - image_origi, - (int(each_bbox[1] * origi_shape[1]), - int(each_bbox[0] * origi_shape[0]),), - (int(each_bbox[3] * origi_shape[1]), - int(each_bbox[2] * origi_shape[0]),), - color, - 2, - ) - ## Draw label background - cv2.rectangle( - image_origi, - (int(each_bbox[1] * origi_shape[1]), - int(each_bbox[2] * origi_shape[0]),), - (int(each_bbox[3] * origi_shape[1]), - int(each_bbox[2] * origi_shape[0] + 15),), - color, - -1, - ) - ## Insert label class & score - cv2.putText( - image_origi, - "Class: {}, Score: {}".format( - str(category_index[classes[idx]]["name"]), - str(round(scores[idx], 2)), - ), - (int(each_bbox[1] * origi_shape[1]), - int(each_bbox[2] * origi_shape[0] + 10),), - cv2.FONT_HERSHEY_SIMPLEX, - 0.3, - (0, 0, 0), - 1, - cv2.LINE_AA, - ) - return image_origi - - -# Webpage code starts here - -#TODO change this -st.title('CNY Good Luck 福 Detector') -st.text('Made by Steve Chong') -st.markdown('## Upload an image with one or more 福 characters, count the number of good luck on the image!') -st.text('Credits to tutorial by @anebzt') - -with st.spinner('Model is being loaded...'): - model = load_model() - -# ask user to upload an image -file = st.file_uploader("Upload image", type=["jpg", "png"]) - -if file is None: - st.text('Waiting for upload...') -else: - st.text('Running inference...') - # open image - test_image = Image.open(file).convert("RGB") - origi_shape = np.asarray(test_image).shape - # resize image to default shape - default_shape = 320 - image_resized = np.array(test_image.resize((default_shape, default_shape))) - - ## Load color map - category_index = load_label_map("./label_map.pbtxt") - - # TODO Add more colors if there are more classes - # color of each label. check label_map.pbtxt to check the index for each class - color_map = { - 1: [0, 255, 0] # fu-goodluck -> green - } - - ## The model input needs to be a tensor - input_tensor = tf.convert_to_tensor(image_resized) - ## The model expects a batch of images, so add an axis with `tf.newaxis`. - input_tensor = input_tensor[tf.newaxis, ...] - - ## Feed image into model and obtain output - detections_output = model(input_tensor) - num_detections = int(detections_output.pop("num_detections")) - detections = {key: value[0, :num_detections].numpy() for key, value in detections_output.items()} - detections["num_detections"] = num_detections - - ## Filter out predictions below threshold - # if threshold is higher, there will be fewer predictions - # TODO change this number to see how the predictions change - confidence_threshold = 0.8 - indexes = np.where(detections["detection_scores"] > confidence_threshold) - - ## Extract predicted bounding boxes - bboxes = detections["detection_boxes"][indexes] - # there are no predicted boxes - if len(bboxes) == 0: - st.error('No boxes predicted') - # there are predicted boxes - else: - st.success('Boxes predicted') - classes = detections["detection_classes"][indexes].astype(int) - scores = detections["detection_scores"][indexes] - - # plot boxes and labels on image - image_origi = np.array(Image.fromarray(image_resized).resize((origi_shape[1], origi_shape[0]))) - image_origi = plot_boxes_on_img(color_map, classes, bboxes, image_origi, origi_shape) - - # show image in web page - st.image(Image.fromarray(image_origi), caption="Image with predictions", width=400) - st.markdown("### Predicted boxes") - for idx in range(len((bboxes))): - st.markdown(f"* Class: {str(category_index[classes[idx]]['name'])}, confidence score: {str(round(scores[idx], 2))}") \ No newline at end of file diff --git "a/spaces/suchun/chatGPT_acdemic/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py" "b/spaces/suchun/chatGPT_acdemic/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py" deleted file mode 100644 index 72ffe6b1a8f2a59a3c5c364e30dfb4949bd6a929..0000000000000000000000000000000000000000 --- "a/spaces/suchun/chatGPT_acdemic/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py" +++ /dev/null @@ -1,67 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -fast_debug = False - - -def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import time, glob, os - print('begin analysis on:', file_manifest) - for index, fp in enumerate(file_manifest): - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - - prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else "" - i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```' - i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时 - - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - if not fast_debug: time.sleep(2) - - all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)]) - i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。' - chatbot.append((i_say, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say, llm_kwargs, chatbot, history=history, sys_prompt=system_prompt) # 带超时倒计时 - - chatbot[-1] = (i_say, gpt_say) - history.append(i_say); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - - - -@CatchException -def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \ - # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/spaces/sunmaiyyyy/combined-GI-RVC-model/app.py b/spaces/sunmaiyyyy/combined-GI-RVC-model/app.py deleted file mode 100644 index c92d11ac9dd05deac644d5372dd0f5f7f247475a..0000000000000000000000000000000000000000 --- a/spaces/sunmaiyyyy/combined-GI-RVC-model/app.py +++ /dev/null @@ -1,178 +0,0 @@ -import os -import glob -import json -import argparse -import traceback -import logging -import gradio as gr -import numpy as np -import librosa -import torch -import asyncio -import edge_tts -from datetime import datetime -from fairseq import checkpoint_utils -from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono -from vc_infer_pipeline import VC -from config import Config -config = Config() -logging.getLogger("numba").setLevel(logging.WARNING) -limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces - -def create_vc_fn(tgt_sr, net_g, vc, if_f0, file_index): - def vc_fn( - input_audio, - f0_up_key, - f0_method, - index_rate, - tts_mode, - tts_text, - tts_voice - ): - try: - if tts_mode: - if len(tts_text) > 100 and limitation: - return "Text is too long", None - if tts_text is None or tts_voice is None: - return "You need to enter text and select a voice", None - asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3")) - audio, sr = librosa.load("tts.mp3", sr=16000, mono=True) - else: - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - duration = audio.shape[0] / sampling_rate - if duration > 20 and limitation: - return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - times = [0, 0, 0] - f0_up_key = int(f0_up_key) - audio_opt = vc.pipeline( - hubert_model, - net_g, - 0, - audio, - times, - f0_up_key, - f0_method, - file_index, - index_rate, - if_f0, - f0_file=None, - ) - print( - f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s" - ) - return (tgt_sr, audio_opt) - except: - info = traceback.format_exc() - print(info) - return info, (None, None) - return vc_fn - -def load_hubert(): - global hubert_model - models, _, _ = checkpoint_utils.load_model_ensemble_and_task( - ["hubert_base.pt"], - suffix="", - ) - hubert_model = models[0] - hubert_model = hubert_model.to(config.device) - if config.is_half: - hubert_model = hubert_model.half() - else: - hubert_model = hubert_model.float() - hubert_model.eval() - -def change_to_tts_mode(tts_mode): - if tts_mode: - return gr.Audio.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True) - else: - return gr.Audio.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False) - -if __name__ == '__main__': - load_hubert() - models = [] - tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) - voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list] - with open("weights/model_info.json", "r", encoding="utf-8") as f: - models_info = json.load(f) - for model_name, info in models_info.items(): - if not info['enable']: - continue - model_title = info['title'] - model_author = info.get("author", None) - model_cover = f"weights/{model_name}/{info['cover']}" - model_index = f"weights/{model_name}/{info['feature_retrieval_library']}" - cpt = torch.load(f"weights/{model_name}/{model_name}.pth", map_location="cpu") - tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - if_f0 = cpt.get("f0", 1) - if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - del net_g.enc_q - print(net_g.load_state_dict(cpt["weight"], strict=False)) - net_g.eval().to(config.device) - if config.is_half: - net_g = net_g.half() - else: - net_g = net_g.float() - vc = VC(tgt_sr, config) - print(f"Model loaded: {model_name}") - models.append((model_name, model_title, model_author, model_cover, create_vc_fn(tgt_sr, net_g, vc, if_f0, model_index))) - with gr.Blocks() as app: - gr.Markdown( - "#
            Combined Genshin Impact RVC Models\n" - "##
            The input audio should be clean and pure voice without background music.\n" - "###
            It is recommended to use google colab for more features. \n" - "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1Tgr6q9kKiB5P37rUitrB3CsNl8JP9iQZ?usp=sharing)\n\n" - "[![Original Repo](https://badgen.net/badge/icon/github?icon=github&label=Original%20Repo)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)" - ) - with gr.Tabs(): - for (name, title, author, cover, vc_fn) in models: - with gr.TabItem(name): - with gr.Row(): - gr.Markdown( - '
            ' - f'
            {title}
            \n'+ - (f'
            Model author: {author}
            ' if author else "")+ - (f'' if cover else "")+ - '
            ' - ) - with gr.Row(): - with gr.Column(): - vc_input = gr.Audio(label="Input audio"+' (less than 20 seconds)' if limitation else '') - vc_transpose = gr.Number(label="Transpose", value=0, info='Type "12" to change from male to female voice. Type "-12" to change female to male voice') - vc_f0method = gr.Radio( - label="Pitch extraction algorithm", - choices=["pm", "harvest"], - value="pm", - interactive=True, - info="PM is fast but Harvest is better for low frequencies. (Default: PM)" - ) - vc_index_ratio = gr.Slider( - minimum=0, - maximum=1, - label="Retrieval feature ratio", - value=0.6, - interactive=True, - info="(Default: 0.6)" - ) - tts_mode = gr.Checkbox(label="tts (use edge-tts as input)", value=False) - tts_text = gr.Textbox(visible=False,label="TTS text (100 words limitation)" if limitation else "TTS text") - tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female") - vc_submit = gr.Button("Generate", variant="primary") - with gr.Column(): - vc_output = gr.Audio(label="Output Audio") - vc_submit.click(vc_fn, [vc_input, vc_transpose, vc_f0method, vc_index_ratio, tts_mode, tts_text, tts_voice], [vc_output]) - tts_mode.change(change_to_tts_mode, [tts_mode], [vc_input, tts_text, tts_voice]) - gr.Markdown('#
            Changelog 2023.05.30') - gr.Markdown('- Added kirara-jp, yelan-jp, zhongli-jp, alhaitham-jp, and barbara-jp') - gr.Markdown('- Minor fix and adjustment') - app.queue(concurrency_count=1, max_size=20, api_open=config.api).launch(share=config.colab) \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Minecraft Comes Alive Installer.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Minecraft Comes Alive Installer.md deleted file mode 100644 index c82d0b76c9ccc86f03f59311493cad9974ec8c7a..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Minecraft Comes Alive Installer.md +++ /dev/null @@ -1,110 +0,0 @@ - -

            Minecraft Comes Alive Installer: How to Download and Install the Mod

            - -

            If you are bored of the default villagers in Minecraft, you might want to try out the Minecraft Comes Alive mod. This mod replaces the villagers with human characters that have different personalities, skins, and abilities. You can interact with them, build relationships, get married, and even have children. In this article, we will show you how to download and install the Minecraft Comes Alive installer for your Minecraft version.

            -

            Minecraft Comes Alive Installer


            Download Filehttps://cinurl.com/2uEXJi



            - -

            What is Minecraft Comes Alive?

            - -

            Minecraft Comes Alive (MCA) is a popular mod that adds a new dimension to the game. It makes the villagers more realistic and immersive, giving them names, genders, professions, and traits. You can talk to them, give them gifts, hire them, trade with them, and more. You can also find your soulmate among the villagers, propose to them, and start a family. Your children will grow up over time and help you with various chores. You can even marry other players and have children in multiplayer mode.

            - -

            The mod also adds some new features to the game, such as divorce papers, matchmaker's rings, baby boys and girls, tombstones, and more. You can customize the mod's settings to your liking, such as changing the villager skins, disabling aging, enabling infection, and so on. The mod is compatible with most other mods that do not overwrite the villagers.

            - -

            How to Download Minecraft Comes Alive Installer

            - -

            The first step to install the Minecraft Comes Alive mod is to download the installer from a reliable source. One of the best places to download the mod is CurseForge, a website that hosts thousands of mods for various games. You can find the Minecraft Comes Alive installer on this page: https://www.curseforge.com/minecraft/mc-mods/minecraft-comes-alive-mca.

            - -

            On this page, you will see a list of files for different versions of Minecraft. Choose the file that matches your Minecraft version and click on it. Then click on the download button to start downloading the installer. The file name should look something like this: minecraft-comes-alive-6.0.0-beta+1.12.2-universal.jar.

            - -

            How to Install Minecraft Comes Alive Installer

            - -

            Once you have downloaded the Minecraft Comes Alive installer, you need to run it to install the mod. Before you do that, make sure you have installed Minecraft Forge on your computer. Minecraft Forge is a tool that allows you to run mods on Minecraft. You can download it from here: https://files.minecraftforge.net/.

            - -

            After installing Minecraft Forge, follow these steps to install the Minecraft Comes Alive installer:

            -

            - -
              -
            1. Open the folder where you downloaded the Minecraft Comes Alive installer and double-click on it.
            2. -
            3. A window will pop up asking you to select an install client or server. Choose install client and click OK.
            4. -
            5. The installer will automatically detect your Minecraft folder and install the mod there.
            6. -
            7. When the installation is complete, click OK.
            8. -
            9. Launch Minecraft and select Forge as your profile.
            10. -
            11. Click on Mods and make sure Minecraft Comes Alive is enabled.
            12. -
            13. Enjoy the mod!
            14. -
            - -

            Conclusion

            - -

            Minecraft Comes Alive is a mod that adds more life and fun to your Minecraft world. It lets you interact with human villagers, get married, have children, and more. To install the mod, you need to download and run the Minecraft Comes Alive installer from CurseForge and have Minecraft Forge installed on your computer. We hope this article helped you learn how to download and install the Minecraft Comes Alive installer for your Minecraft version.

            -

            How to Use Minecraft Comes Alive Installer

            - -

            After installing the Minecraft Comes Alive mod, you can start enjoying its features. To interact with a villager, you need to right-click on them. A menu will appear with different options, such as chat, follow, trade, gift, and more. You can also use special items to interact with villagers, such as the whistle, the divorce papers, the matchmaker's ring, and the baby boy or girl.

            - -

            As you interact with villagers, you will build relationships with them. You can see your relationship level with a villager by hovering over their name in the menu. The higher your relationship level, the more options you will have with them. For example, you can ask them to marry you if your relationship level is high enough. You can also have children with your spouse by using a baby boy or girl item.

            - -

            Your children will grow up over time and do different chores for you. You can assign them chores by right-clicking on them and choosing the chore option. You can also give them gifts, teach them skills, and tell them stories. Your children will eventually grow up into adults, who can get married and have children of their own.

            - -

            How to Uninstall Minecraft Comes Alive Installer

            - -

            If you want to uninstall the Minecraft Comes Alive mod, you need to follow these steps:

            - -
              -
            1. Close Minecraft and any other programs that use it.
            2. -
            3. Open the folder where you installed the Minecraft Comes Alive installer and delete it.
            4. -
            5. Open the folder where your Minecraft saves are located and delete the MCA folder.
            6. -
            7. Launch Minecraft and select Forge as your profile.
            8. -
            9. Click on Mods and disable or remove Minecraft Comes Alive.
            10. -
            11. Enjoy vanilla Minecraft again!
            12. -
            - -

            Conclusion

            - -

            Minecraft Comes Alive is a mod that adds more life and fun to your Minecraft world. It lets you interact with human villagers, get married, have children, and more. To install the mod, you need to download and run the Minecraft Comes Alive installer from CurseForge and have Minecraft Forge installed on your computer. We hope this article helped you learn how to download, install, use, and uninstall the Minecraft Comes Alive installer for your Minecraft version.

            -

            How to Play Minecraft Comes Alive

            - -

            Once you have installed the Minecraft Comes Alive mod, you can start exploring its features. You will notice that the villagers are now human characters with different skins, genders, and personalities. You can interact with them by right-clicking on them and choosing an option from the menu. You can chat with them, compliment them, joke with them, flirt with them, and more. You can also give them gifts, such as flowers, diamonds, or cakes, to increase your relationship level with them.

            - -

            As you interact with villagers, you will see their hearts change color depending on their mood and attitude towards you. The hearts can be green, yellow, orange, red, or black. Green means they are friendly, yellow means they are neutral, orange means they are unfriendly, red means they are hostile, and black means they are dead. You can also see their name, profession, trait, and relationship level by hovering over their name in the menu.

            - -

            If you want to get married to a villager, you need to have a high relationship level with them and a matchmaker's ring. You can craft a matchmaker's ring with a diamond and a gold ingot. To propose to a villager, you need to right-click on them with the ring and choose the marry option. If they accept, you will see a message saying that you are now married. You can also marry other players in multiplayer mode by using the same method.

            - -

            How to Customize Minecraft Comes Alive

            - -

            The Minecraft Comes Alive mod has many settings that you can customize to your liking. You can access the settings by pressing the MCA key on your keyboard (default is V). A menu will appear with different tabs, such as General Settings, Villager Settings, Family Settings, World Settings, and Server Settings. You can change various options in each tab, such as enabling or disabling aging, infection, story progression, villager skins, and more.

            - -

            You can also customize your own character in the mod by using the editor screen. You can access the editor screen by pressing the MCA key on your keyboard (default is V) and choosing the editor option. A screen will appear with different options to change your appearance, such as your skin color, hair color, eye color, facial features, clothing style, and more. You can also change your name and gender in this screen.

            - -

            The mod also has some commands that you can use to customize your experience. You can use these commands by typing them in the chat window (default is T). Some of the commands are /mca help (to see a list of all commands), /mca reset (to reset all MCA data), /mca revive (to revive a dead villager), /mca divorce (to divorce your spouse), /mca sethome (to set your home location), and more.

            -

            How to Troubleshoot Minecraft Comes Alive Installer

            - -

            Sometimes, you may encounter some issues when installing or using the Minecraft Comes Alive mod. Here are some common problems and solutions that may help you troubleshoot the mod:

            - -
              -
            • If you get a message saying that the mod is missing dependencies, such as RadixCore or Minecraft Forge, you need to download and install them from their respective websites.
            • -
            • If you get a message saying that the mod is incompatible with your Minecraft version, you need to download and install the correct version of the mod from CurseForge or our website.
            • -
            • If you get a message saying that the mod has crashed or caused an error, you need to check the crash report and see what caused the problem. You can also send us the crash report via our website or Discord server for further assistance.
            • -
            • If you have any other issues with the mod, such as glitches, bugs, or conflicts with other mods, you can report them on our website or Discord server. Please provide as much information as possible, such as screenshots, logs, and steps to reproduce the issue.
            • -
            - -

            How to Update Minecraft Comes Alive Installer

            - -

            The Minecraft Comes Alive mod is constantly being updated with new features, improvements, and bug fixes. To enjoy the latest version of the mod, you need to update it regularly. You can update the mod by following these steps:

            - -
              -
            1. Close Minecraft and any other programs that use it.
            2. -
            3. Open the folder where you downloaded the Minecraft Comes Alive installer and delete it.
            4. -
            5. Open CurseForge or our website and download the latest version of the installer.
            6. -
            7. Run the installer and follow the same steps as before to install the mod.
            8. -
            9. Launch Minecraft and select Forge as your profile.
            10. -
            11. Click on Mods and make sure Minecraft Comes Alive is enabled.
            12. -
            13. Enjoy the updated mod!
            14. -
            - -

            Conclusion

            - -

            Minecraft Comes Alive is a mod that adds more life and fun to your Minecraft world. It lets you interact with human villagers, get married, have children, and more. To install the mod, you need to download and run the Minecraft Comes Alive installer from CurseForge and have Minecraft Forge installed on your computer. We hope this article helped you learn how to download, install, use, update, and troubleshoot the Minecraft Comes Alive installer for your Minecraft version.

            -

            Conclusion

            - -

            Minecraft Comes Alive is a mod that adds more life and fun to your Minecraft world. It lets you interact with human villagers, get married, have children, and more. To install the mod, you need to download and run the Minecraft Comes Alive installer from CurseForge and have Minecraft Forge installed on your computer. We hope this article helped you learn how to download, install, use, update, and troubleshoot the Minecraft Comes Alive installer for your Minecraft version.

            3cee63e6c2
            -
            -
            \ No newline at end of file diff --git a/spaces/szk1ck/similarity_by_fasttext/README.md b/spaces/szk1ck/similarity_by_fasttext/README.md deleted file mode 100644 index c6a8be55092a4dbbbfd71f4366b1459bd7f67f48..0000000000000000000000000000000000000000 --- a/spaces/szk1ck/similarity_by_fasttext/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Similarity By Fasttext -emoji: 🏆 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/szukevin/VISOR-GPT/train/finetune/run_classifier.py b/spaces/szukevin/VISOR-GPT/train/finetune/run_classifier.py deleted file mode 100644 index 97504020581de7100ff1640cc56844b612149361..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/finetune/run_classifier.py +++ /dev/null @@ -1,366 +0,0 @@ -""" -This script provides an example to wrap TencentPretrain for classification. -""" -import sys -import os -import random -import argparse -import torch -import torch.nn as nn - -tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -sys.path.append(tencentpretrain_dir) - -from tencentpretrain.embeddings import * -from tencentpretrain.encoders import * -from tencentpretrain.utils.vocab import Vocab -from tencentpretrain.utils.constants import * -from tencentpretrain.utils import * -from tencentpretrain.utils.optimizers import * -from tencentpretrain.utils.config import load_hyperparam -from tencentpretrain.utils.seed import set_seed -from tencentpretrain.utils.logging import init_logger -from tencentpretrain.utils.misc import pooling -from tencentpretrain.model_saver import save_model -from tencentpretrain.opts import finetune_opts, tokenizer_opts, adv_opts - - -class Classifier(nn.Module): - def __init__(self, args): - super(Classifier, self).__init__() - self.embedding = Embedding(args) - for embedding_name in args.embedding: - tmp_emb = str2embedding[embedding_name](args, len(args.tokenizer.vocab)) - self.embedding.update(tmp_emb, embedding_name) - self.encoder = str2encoder[args.encoder](args) - self.labels_num = args.labels_num - self.pooling_type = args.pooling - self.soft_targets = args.soft_targets - self.soft_alpha = args.soft_alpha - self.output_layer_1 = nn.Linear(args.hidden_size, args.hidden_size) - self.output_layer_2 = nn.Linear(args.hidden_size, self.labels_num) - - def forward(self, src, tgt, seg, soft_tgt=None): - """ - Args: - src: [batch_size x seq_length] - tgt: [batch_size] - seg: [batch_size x seq_length] - """ - # Embedding. - emb = self.embedding(src, seg) - # Encoder. - output = self.encoder(emb, seg) - # Target. - output = pooling(output, seg, self.pooling_type) - output = torch.tanh(self.output_layer_1(output)) - logits = self.output_layer_2(output) - if tgt is not None: - if self.soft_targets and soft_tgt is not None: - loss = self.soft_alpha * nn.MSELoss()(logits, soft_tgt) + \ - (1 - self.soft_alpha) * nn.NLLLoss()(nn.LogSoftmax(dim=-1)(logits), tgt.view(-1)) - else: - loss = nn.NLLLoss()(nn.LogSoftmax(dim=-1)(logits), tgt.view(-1)) - return loss, logits - else: - return None, logits - - -def count_labels_num(path): - labels_set, columns = set(), {} - with open(path, mode="r", encoding="utf-8") as f: - for line_id, line in enumerate(f): - if line_id == 0: - for i, column_name in enumerate(line.rstrip("\r\n").split("\t")): - columns[column_name] = i - continue - line = line.rstrip("\r\n").split("\t") - label = int(line[columns["label"]]) - labels_set.add(label) - return len(labels_set) - - -def load_or_initialize_parameters(args, model): - if args.pretrained_model_path is not None: - # Initialize with pretrained model. - model.load_state_dict(torch.load(args.pretrained_model_path, map_location="cpu"), strict=False) - else: - # Initialize with normal distribution. - for n, p in list(model.named_parameters()): - if "gamma" not in n and "beta" not in n: - p.data.normal_(0, 0.02) - - -def build_optimizer(args, model): - param_optimizer = list(model.named_parameters()) - no_decay = ["bias", "gamma", "beta"] - optimizer_grouped_parameters = [ - {"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], "weight_decay": 0.01}, - {"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, - ] - if args.optimizer in ["adamw"]: - optimizer = str2optimizer[args.optimizer](optimizer_grouped_parameters, lr=args.learning_rate, correct_bias=False) - else: - optimizer = str2optimizer[args.optimizer](optimizer_grouped_parameters, lr=args.learning_rate, - scale_parameter=False, relative_step=False) - if args.scheduler in ["constant"]: - scheduler = str2scheduler[args.scheduler](optimizer) - elif args.scheduler in ["constant_with_warmup"]: - scheduler = str2scheduler[args.scheduler](optimizer, args.train_steps*args.warmup) - else: - scheduler = str2scheduler[args.scheduler](optimizer, args.train_steps*args.warmup, args.train_steps) - return optimizer, scheduler - - -def batch_loader(batch_size, src, tgt, seg, soft_tgt=None): - instances_num = src.size()[0] - for i in range(instances_num // batch_size): - src_batch = src[i * batch_size : (i + 1) * batch_size, :] - tgt_batch = tgt[i * batch_size : (i + 1) * batch_size] - seg_batch = seg[i * batch_size : (i + 1) * batch_size, :] - if soft_tgt is not None: - soft_tgt_batch = soft_tgt[i * batch_size : (i + 1) * batch_size, :] - yield src_batch, tgt_batch, seg_batch, soft_tgt_batch - else: - yield src_batch, tgt_batch, seg_batch, None - if instances_num > instances_num // batch_size * batch_size: - src_batch = src[instances_num // batch_size * batch_size :, :] - tgt_batch = tgt[instances_num // batch_size * batch_size :] - seg_batch = seg[instances_num // batch_size * batch_size :, :] - if soft_tgt is not None: - soft_tgt_batch = soft_tgt[instances_num // batch_size * batch_size :, :] - yield src_batch, tgt_batch, seg_batch, soft_tgt_batch - else: - yield src_batch, tgt_batch, seg_batch, None - - -def read_dataset(args, path): - dataset, columns = [], {} - with open(path, mode="r", encoding="utf-8") as f: - for line_id, line in enumerate(f): - if line_id == 0: - for i, column_name in enumerate(line.rstrip("\r\n").split("\t")): - columns[column_name] = i - continue - line = line.rstrip("\r\n").split("\t") - tgt = int(line[columns["label"]]) - if args.soft_targets and "logits" in columns.keys(): - soft_tgt = [float(value) for value in line[columns["logits"]].split(" ")] - if "text_b" not in columns: # Sentence classification. - text_a = line[columns["text_a"]] - src = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_a) + [SEP_TOKEN]) - seg = [1] * len(src) - else: # Sentence-pair classification. - text_a, text_b = line[columns["text_a"]], line[columns["text_b"]] - src_a = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_a) + [SEP_TOKEN]) - src_b = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(text_b) + [SEP_TOKEN]) - src = src_a + src_b - seg = [1] * len(src_a) + [2] * len(src_b) - - if len(src) > args.seq_length: - src = src[: args.seq_length] - seg = seg[: args.seq_length] - PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])[0] - while len(src) < args.seq_length: - src.append(PAD_ID) - seg.append(0) - if args.soft_targets and "logits" in columns.keys(): - dataset.append((src, tgt, seg, soft_tgt)) - else: - dataset.append((src, tgt, seg)) - - return dataset - - -def train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch, soft_tgt_batch=None): - model.zero_grad() - - src_batch = src_batch.to(args.device) - tgt_batch = tgt_batch.to(args.device) - seg_batch = seg_batch.to(args.device) - if soft_tgt_batch is not None: - soft_tgt_batch = soft_tgt_batch.to(args.device) - - loss, _ = model(src_batch, tgt_batch, seg_batch, soft_tgt_batch) - if torch.cuda.device_count() > 1: - loss = torch.mean(loss) - - if args.fp16: - with args.amp.scale_loss(loss, optimizer) as scaled_loss: - scaled_loss.backward() - else: - loss.backward() - - if args.use_adv and args.adv_type == "fgm": - args.adv_method.attack(epsilon=args.fgm_epsilon) - loss_adv, _ = model(src_batch, tgt_batch, seg_batch, soft_tgt_batch) - if torch.cuda.device_count() > 1: - loss_adv = torch.mean(loss_adv) - loss_adv.backward() - args.adv_method.restore() - - if args.use_adv and args.adv_type == "pgd": - K = args.pgd_k - args.adv_method.backup_grad() - for t in range(K): - # apply the perturbation to embedding - args.adv_method.attack(epsilon=args.pgd_epsilon, alpha=args.pgd_alpha, - is_first_attack=(t == 0)) - if t != K - 1: - model.zero_grad() - else: - args.adv_method.restore_grad() - loss_adv, _ = model(src_batch, tgt_batch, seg_batch, soft_tgt_batch) - if torch.cuda.device_count() > 1: - loss_adv = torch.mean(loss_adv) - loss_adv.backward() - args.adv_method.restore() - - optimizer.step() - scheduler.step() - - return loss - - -def evaluate(args, dataset): - src = torch.LongTensor([sample[0] for sample in dataset]) - tgt = torch.LongTensor([sample[1] for sample in dataset]) - seg = torch.LongTensor([sample[2] for sample in dataset]) - - batch_size = args.batch_size - - correct = 0 - # Confusion matrix. - confusion = torch.zeros(args.labels_num, args.labels_num, dtype=torch.long) - - args.model.eval() - - for i, (src_batch, tgt_batch, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg)): - src_batch = src_batch.to(args.device) - tgt_batch = tgt_batch.to(args.device) - seg_batch = seg_batch.to(args.device) - with torch.no_grad(): - _, logits = args.model(src_batch, tgt_batch, seg_batch) - pred = torch.argmax(nn.Softmax(dim=1)(logits), dim=1) - gold = tgt_batch - for j in range(pred.size()[0]): - confusion[pred[j], gold[j]] += 1 - correct += torch.sum(pred == gold).item() - - args.logger.info("Confusion matrix:") - args.logger.info(confusion) - args.logger.info("Report precision, recall, and f1:") - - eps = 1e-9 - for i in range(confusion.size()[0]): - p = confusion[i, i].item() / (confusion[i, :].sum().item() + eps) - r = confusion[i, i].item() / (confusion[:, i].sum().item() + eps) - f1 = 2 * p * r / (p + r + eps) - args.logger.info("Label {}: {:.3f}, {:.3f}, {:.3f}".format(i, p, r, f1)) - - args.logger.info("Acc. (Correct/Total): {:.4f} ({}/{}) ".format(correct / len(dataset), correct, len(dataset))) - return correct / len(dataset), confusion - - -def main(): - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - finetune_opts(parser) - - tokenizer_opts(parser) - - parser.add_argument("--soft_targets", action='store_true', - help="Train model with logits.") - parser.add_argument("--soft_alpha", type=float, default=0.5, - help="Weight of the soft targets loss.") - - adv_opts(parser) - - args = parser.parse_args() - - # Load the hyperparameters from the config file. - args = load_hyperparam(args) - # Count the number of labels. - args.labels_num = count_labels_num(args.train_path) - - # Build tokenizer. - args.tokenizer = str2tokenizer[args.tokenizer](args) - set_seed(args.seed) - - # Build classification model. - model = Classifier(args) - - # Load or initialize parameters. - load_or_initialize_parameters(args, model) - - # Get logger. - args.logger = init_logger(args) - - args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - model = model.to(args.device) - - # Training phase. - trainset = read_dataset(args, args.train_path) - instances_num = len(trainset) - batch_size = args.batch_size - - args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1 - - args.logger.info("Batch size: {}".format(batch_size)) - args.logger.info("The number of training instances: {}".format(instances_num)) - optimizer, scheduler = build_optimizer(args, model) - - if args.fp16: - try: - from apex import amp - except ImportError: - raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") - model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) - args.amp = amp - - if torch.cuda.device_count() > 1: - args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count())) - model = torch.nn.DataParallel(model) - args.model = model - - if args.use_adv: - args.adv_method = str2adv[args.adv_type](model) - - total_loss, result, best_result = 0.0, 0.0, 0.0 - - args.logger.info("Start training.") - for epoch in range(1, args.epochs_num + 1): - random.shuffle(trainset) - src = torch.LongTensor([example[0] for example in trainset]) - tgt = torch.LongTensor([example[1] for example in trainset]) - seg = torch.LongTensor([example[2] for example in trainset]) - if args.soft_targets: - soft_tgt = torch.FloatTensor([example[3] for example in trainset]) - else: - soft_tgt = None - - model.train() - for i, (src_batch, tgt_batch, seg_batch, soft_tgt_batch) in enumerate(batch_loader(batch_size, src, tgt, seg, soft_tgt)): - loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch, soft_tgt_batch) - total_loss += loss.item() - if (i + 1) % args.report_steps == 0: - args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i + 1, total_loss / args.report_steps)) - total_loss = 0.0 - - result = evaluate(args, read_dataset(args, args.dev_path)) - if result[0] > best_result: - best_result = result[0] - save_model(model, args.output_model_path) - - # Evaluation phase. - if args.test_path is not None: - args.logger.info("Test set evaluation.") - if torch.cuda.device_count() > 1: - args.model.module.load_state_dict(torch.load(args.output_model_path)) - else: - args.model.load_state_dict(torch.load(args.output_model_path)) - evaluate(args, read_dataset(args, args.test_path)) - - -if __name__ == "__main__": - main() diff --git a/spaces/szukevin/VISOR-GPT/train/scripts/preprocess_coord.py b/spaces/szukevin/VISOR-GPT/train/scripts/preprocess_coord.py deleted file mode 100644 index afe8c239a7fc6015cd26860ab592e6c4d5a1dd55..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/scripts/preprocess_coord.py +++ /dev/null @@ -1,249 +0,0 @@ -import argparse -import collections -import torch -import json -import numpy as np -import random -from interval import Interval -from tqdm import tqdm - -# [Annotation type] [Object centric or Multiple instances] [Number of instances] [Number of keypoints] [Class A, Class B, ...] [Box A, Box B, ...] - -# ----- kinhane -def get_size(coordinate, type, small=Interval(0, 32**2), medium=Interval(32**2, 96**2, lower_closed=False)): - - if type == 'box': - coordinate = np.array(coordinate) - mean_area = np.mean((coordinate[:, 2] - coordinate[:, 0]) * (coordinate[:, 3] - coordinate[:, 1])) - # import ipdb - # ipdb.set_trace() - elif type == 'keypoint' or type == 'mask': - area_list = [] - for coord in coordinate: - if type == 'mask': - coord = np.array(coord).squeeze(1) - else: - # delete unannotated key points - tmp = [] - for kpt in coord: - _, _, v = kpt - if v != 0: - tmp.append(kpt) - coord = np.array(tmp) - # import ipdb - # ipdb.set_trace() - area = (np.max(coord[:, 0]) - np.min(coord[:, 0])) * (np.max(coord[:, 1]) - np.min(coord[:, 1])) - area_list.append(area) - mean_area = np.mean(area_list) - else: - raise NotImplementedError - - if mean_area in small: - return 'small' - elif mean_area in medium: - return 'medium' - else: - return 'large' - -# ----- kinhane - -def filter_keypoint(keypoints): - output = [] - for kp_list in keypoints: - output_single = [] - for kp in kp_list: - for name, point in kp.items(): - if np.array(point).sum() > 0: - output_single.append({name: point}) - if len(output_single) > 0: - output.append(output_single) - return output - - -def keypoint_to_formular_data(keypoints): - output = [] - for kp_list in tqdm(keypoints): - random.shuffle(kp_list) - output_single = {"anno_type": "key point", - "prefix": "Multiple instances", - "flag": None, - "instances_num": 0, - "keypoints_num": None, - "categories": [], - "coordinate": [] - } - for kp in kp_list: - for name, point in kp.items(): - # ----- kinhane omit instances with less 3 key points - if np.where(np.array(point)[:, -1] != 0)[0].shape[0] < 3: - continue - # ----- kinhane - output_single["instances_num"] += 1 - output_single["categories"].append(name) - output_single["coordinate"].append(point) - output_single["keypoints_num"] = len(point) - - if output_single["instances_num"] > 7: - break - - # ----- kinhane omit idle list - if len(output_single["coordinate"]) == 0: - continue - # ----- kinhane - - if random.random() < 0.5: - flag = get_size(output_single["coordinate"], type='keypoint') # add by kinhane - else: - flag = "random" - output_single["flag"] = flag # add by kinhane - output.append(output_single) - - return output - - -def mask_to_formular_data(keypoints): - output = [] - for mask_list in tqdm(keypoints): - point_counter = 0 - - random.shuffle(keypoints) - output_single = {"anno_type": "mask", - "prefix": "Multiple instances", - "flag": None, - "instances_num": 0, - "keypoints_num": 0, - "categories": [], - "coordinate": [] - } - for mask in mask_list: - for name, point in mask.items(): - # ----- kinhane omit very small masks - if len(point) < 5: - continue - if point_counter + len(point) >= 150: - break - else: - point_counter += len(point) - - # ----- kinhane - output_single["categories"].append(name) - output_single["coordinate"].append(point) - output_single["instances_num"] += 1 - - - - # ----- kinhane omit idle list - if len(output_single["coordinate"]) == 0: - continue - # ----- kinhane - - flag = get_size(output_single["coordinate"], type='mask') # add by kinhane - output_single["flag"] = flag # add by kinhane - output.append(output_single) - - return output - - -def box_to_formular_data(keypoints, centric=0): - output = [] - for mask_list in tqdm(keypoints): - random.shuffle(mask_list) - output_single = {"anno_type": "box", - "prefix": "multiple instances", - "flag": None, - "instances_num": 0, - "keypoints_num": 0, - "categories": [], - "coordinate": [] - } - if centric == 1: - output_single["prefix"] = "object centric" - for mask in mask_list[:20]: - for name, point in mask.items(): - output_single["categories"].append(name) - output_single["coordinate"].append(point) - output_single["instances_num"] += 1 - - flag = get_size(output_single["coordinate"], type='box') # add by kinhane - output_single["flag"] = flag # add by kinhane - output.append(output_single) - - return output - -num2char = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g', 7: 'h', 8: 'i', 9: 'j', - 10: 'k', 11: 'l', 12: 'm', 13: 'n', 14: 'o', 15: 'p', 16: 'q', 17: 'r', 18: 's', 19: 't', - 20: 'u', 21: 'v', 22: 'w'} - -def formular_data_to_str(data_list, type): - - def keyporint_coord_to_str(keypoints): - output = "" - for points_list in keypoints: - output = output + '[' - for i, point in enumerate(points_list): - output = output + ' ' + num2char[i] + ' $' + str(point[0]) + ' $'+ str(point[1]) - output = output + '] ' - return output - - def mask_coord_to_str(keypoints): - output = "" - for points_list in keypoints: - output = output + '[' - for i, point in enumerate(points_list): - output = output + ' ' + 'm'+str(i) + ' $' + str(point[0][0]) + ' $'+ str(point[0][1]) - output = output + '] ' - return output - - def box_coord_to_str(boxes): - output = "" - for box in boxes: - output = output + '[ xmin $' + str(box[0]) + ' ymin $'+ str(box[1]) + \ - ' xmax $'+ str(box[2]) + ' ymax $'+ str(box[3]) +'] ' - return output - - output = [] - for data in tqdm(data_list): - output_single = '; '.join([data["anno_type"], data["prefix"], str(data["instances_num"]), str(data["keypoints_num"]), data['flag']]) - output_single = output_single + '; ' + ', '.join(data["categories"]) +'; ' - if type == "keypoint": - output_single = output_single + keyporint_coord_to_str(data["coordinate"]) - elif type == "box": - output_single = output_single + box_coord_to_str(data["coordinate"]) - else: - output_single = output_single + mask_coord_to_str(data["coordinate"]) - output.append(output_single) - - return output - -def main(): - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("--input_path", type=str, default='data/xjh_coco_val.json', help="data/xjh_coco_val.json") - parser.add_argument("--output_path", type=str, default='test.txt', help="test.txt") - parser.add_argument("--data_type", type=str, default='mask', help="box") - parser.add_argument("--centric", type=int, default=0, help="box") - - args = parser.parse_args() - - with open(args.input_path) as f: - data = json.load(f) - - print("data_type ", args.data_type) - - - if args.data_type == "keypoint": - keypoints = filter_keypoint(data['keypoints'] ) - data_json = keypoint_to_formular_data(keypoints) - elif args.data_type == "box": - data_json = box_to_formular_data(data['bboxes'], args.centric) - else: - data_json = mask_to_formular_data(data['masks']) - - data_str = formular_data_to_str(data_json, args.data_type) - - with open(args.output_path, 'w') as f: - for l in data_str: - f.write(l + '\n') - - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/spaces/taesiri/DeticChatGPT/detic/modeling/meta_arch/custom_rcnn.py b/spaces/taesiri/DeticChatGPT/detic/modeling/meta_arch/custom_rcnn.py deleted file mode 100644 index 9a5ac721d42e40a8b4f28508b10a932cef827fcf..0000000000000000000000000000000000000000 --- a/spaces/taesiri/DeticChatGPT/detic/modeling/meta_arch/custom_rcnn.py +++ /dev/null @@ -1,232 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import copy -import logging -import numpy as np -from typing import Dict, List, Optional, Tuple -import torch -from torch import nn -import json -from detectron2.utils.events import get_event_storage -from detectron2.config import configurable -from detectron2.structures import ImageList, Instances, Boxes -import detectron2.utils.comm as comm - -from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY -from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN -from detectron2.modeling.postprocessing import detector_postprocess -from detectron2.utils.visualizer import Visualizer, _create_text_labels -from detectron2.data.detection_utils import convert_image_to_rgb - -from torch.cuda.amp import autocast -from ..text.text_encoder import build_text_encoder -from ..utils import load_class_freq, get_fed_loss_inds - -@META_ARCH_REGISTRY.register() -class CustomRCNN(GeneralizedRCNN): - ''' - Add image labels - ''' - @configurable - def __init__( - self, - with_image_labels = False, - dataset_loss_weight = [], - fp16 = False, - sync_caption_batch = False, - roi_head_name = '', - cap_batch_ratio = 4, - with_caption = False, - dynamic_classifier = False, - **kwargs): - """ - """ - self.with_image_labels = with_image_labels - self.dataset_loss_weight = dataset_loss_weight - self.fp16 = fp16 - self.with_caption = with_caption - self.sync_caption_batch = sync_caption_batch - self.roi_head_name = roi_head_name - self.cap_batch_ratio = cap_batch_ratio - self.dynamic_classifier = dynamic_classifier - self.return_proposal = False - if self.dynamic_classifier: - self.freq_weight = kwargs.pop('freq_weight') - self.num_classes = kwargs.pop('num_classes') - self.num_sample_cats = kwargs.pop('num_sample_cats') - super().__init__(**kwargs) - assert self.proposal_generator is not None - if self.with_caption: - assert not self.dynamic_classifier - self.text_encoder = build_text_encoder(pretrain=True) - for v in self.text_encoder.parameters(): - v.requires_grad = False - - - @classmethod - def from_config(cls, cfg): - ret = super().from_config(cfg) - ret.update({ - 'with_image_labels': cfg.WITH_IMAGE_LABELS, - 'dataset_loss_weight': cfg.MODEL.DATASET_LOSS_WEIGHT, - 'fp16': cfg.FP16, - 'with_caption': cfg.MODEL.WITH_CAPTION, - 'sync_caption_batch': cfg.MODEL.SYNC_CAPTION_BATCH, - 'dynamic_classifier': cfg.MODEL.DYNAMIC_CLASSIFIER, - 'roi_head_name': cfg.MODEL.ROI_HEADS.NAME, - 'cap_batch_ratio': cfg.MODEL.CAP_BATCH_RATIO, - }) - if ret['dynamic_classifier']: - ret['freq_weight'] = load_class_freq( - cfg.MODEL.ROI_BOX_HEAD.CAT_FREQ_PATH, - cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT) - ret['num_classes'] = cfg.MODEL.ROI_HEADS.NUM_CLASSES - ret['num_sample_cats'] = cfg.MODEL.NUM_SAMPLE_CATS - return ret - - - def inference( - self, - batched_inputs: Tuple[Dict[str, torch.Tensor]], - detected_instances: Optional[List[Instances]] = None, - do_postprocess: bool = True, - ): - assert not self.training - assert detected_instances is None - - images = self.preprocess_image(batched_inputs) - features = self.backbone(images.tensor) - proposals, _ = self.proposal_generator(images, features, None) - results, _ = self.roi_heads(images, features, proposals) - if do_postprocess: - assert not torch.jit.is_scripting(), \ - "Scripting is not supported for postprocess." - return CustomRCNN._postprocess( - results, batched_inputs, images.image_sizes) - else: - return results - - - def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]): - """ - Add ann_type - Ignore proposal loss when training with image labels - """ - if not self.training: - return self.inference(batched_inputs) - - images = self.preprocess_image(batched_inputs) - - ann_type = 'box' - gt_instances = [x["instances"].to(self.device) for x in batched_inputs] - if self.with_image_labels: - for inst, x in zip(gt_instances, batched_inputs): - inst._ann_type = x['ann_type'] - inst._pos_category_ids = x['pos_category_ids'] - ann_types = [x['ann_type'] for x in batched_inputs] - assert len(set(ann_types)) == 1 - ann_type = ann_types[0] - if ann_type in ['prop', 'proptag']: - for t in gt_instances: - t.gt_classes *= 0 - - if self.fp16: # TODO (zhouxy): improve - with autocast(): - features = self.backbone(images.tensor.half()) - features = {k: v.float() for k, v in features.items()} - else: - features = self.backbone(images.tensor) - - cls_features, cls_inds, caption_features = None, None, None - - if self.with_caption and 'caption' in ann_type: - inds = [torch.randint(len(x['captions']), (1,))[0].item() \ - for x in batched_inputs] - caps = [x['captions'][ind] for ind, x in zip(inds, batched_inputs)] - caption_features = self.text_encoder(caps).float() - if self.sync_caption_batch: - caption_features = self._sync_caption_features( - caption_features, ann_type, len(batched_inputs)) - - if self.dynamic_classifier and ann_type != 'caption': - cls_inds = self._sample_cls_inds(gt_instances, ann_type) # inds, inv_inds - ind_with_bg = cls_inds[0].tolist() + [-1] - cls_features = self.roi_heads.box_predictor[ - 0].cls_score.zs_weight[:, ind_with_bg].permute(1, 0).contiguous() - - classifier_info = cls_features, cls_inds, caption_features - proposals, proposal_losses = self.proposal_generator( - images, features, gt_instances) - - if self.roi_head_name in ['StandardROIHeads', 'CascadeROIHeads']: - proposals, detector_losses = self.roi_heads( - images, features, proposals, gt_instances) - else: - proposals, detector_losses = self.roi_heads( - images, features, proposals, gt_instances, - ann_type=ann_type, classifier_info=classifier_info) - - if self.vis_period > 0: - storage = get_event_storage() - if storage.iter % self.vis_period == 0: - self.visualize_training(batched_inputs, proposals) - - losses = {} - losses.update(detector_losses) - if self.with_image_labels: - if ann_type in ['box', 'prop', 'proptag']: - losses.update(proposal_losses) - else: # ignore proposal loss for non-bbox data - losses.update({k: v * 0 for k, v in proposal_losses.items()}) - else: - losses.update(proposal_losses) - if len(self.dataset_loss_weight) > 0: - dataset_sources = [x['dataset_source'] for x in batched_inputs] - assert len(set(dataset_sources)) == 1 - dataset_source = dataset_sources[0] - for k in losses: - losses[k] *= self.dataset_loss_weight[dataset_source] - - if self.return_proposal: - return proposals, losses - else: - return losses - - - def _sync_caption_features(self, caption_features, ann_type, BS): - has_caption_feature = (caption_features is not None) - BS = (BS * self.cap_batch_ratio) if (ann_type == 'box') else BS - rank = torch.full( - (BS, 1), comm.get_rank(), dtype=torch.float32, - device=self.device) - if not has_caption_feature: - caption_features = rank.new_zeros((BS, 512)) - caption_features = torch.cat([caption_features, rank], dim=1) - global_caption_features = comm.all_gather(caption_features) - caption_features = torch.cat( - [x.to(self.device) for x in global_caption_features], dim=0) \ - if has_caption_feature else None # (NB) x (D + 1) - return caption_features - - - def _sample_cls_inds(self, gt_instances, ann_type='box'): - if ann_type == 'box': - gt_classes = torch.cat( - [x.gt_classes for x in gt_instances]) - C = len(self.freq_weight) - freq_weight = self.freq_weight - else: - gt_classes = torch.cat( - [torch.tensor( - x._pos_category_ids, - dtype=torch.long, device=x.gt_classes.device) \ - for x in gt_instances]) - C = self.num_classes - freq_weight = None - assert gt_classes.max() < C, '{} {}'.format(gt_classes.max(), C) - inds = get_fed_loss_inds( - gt_classes, self.num_sample_cats, C, - weight=freq_weight) - cls_id_map = gt_classes.new_full( - (self.num_classes + 1,), len(inds)) - cls_id_map[inds] = torch.arange(len(inds), device=cls_id_map.device) - return inds, cls_id_map \ No newline at end of file diff --git a/spaces/tang155/bingo/src/components/tailwind-indicator.tsx b/spaces/tang155/bingo/src/components/tailwind-indicator.tsx deleted file mode 100644 index f2a1291213dd67055fcebe67fab574c8441338df..0000000000000000000000000000000000000000 --- a/spaces/tang155/bingo/src/components/tailwind-indicator.tsx +++ /dev/null @@ -1,14 +0,0 @@ -export function TailwindIndicator() { - if (process.env.NODE_ENV === 'production') return null - - return ( -
            -
            xs
            -
            sm
            -
            md
            -
            lg
            -
            xl
            -
            2xl
            -
            - ) -} diff --git a/spaces/tbvl/Fake_Face_Detection/face_cropper.py b/spaces/tbvl/Fake_Face_Detection/face_cropper.py deleted file mode 100644 index 7538c0ede73eda6218e629f6930d52494b243517..0000000000000000000000000000000000000000 --- a/spaces/tbvl/Fake_Face_Detection/face_cropper.py +++ /dev/null @@ -1,103 +0,0 @@ -import cv2 -import mediapipe as mp -import os -from gradio_client import Client -# from test_image_fusion import Test -# from test_image_fusion import Test -from test_image import Test -import numpy as np - - - -from PIL import Image -import numpy as np -import cv2 - -# client = Client("https://tbvl-real-and-fake-face-detection.hf.space/--replicas/40d41jxhhx/") - -data = 'faceswap' -dct = 'fft' - - -# testet = Test(model_paths = [f"weights/{data}-hh-best_model.pth", -# f"weights/{data}-fft-best_model.pth"], -# multi_modal = ['hh', 'fft']) - -testet = Test(model_path =f"weights/{data}-hh-best_model.pth", - multi_modal ='hh') - -# Initialize MediaPipe Face Detection -mp_face_detection = mp.solutions.face_detection -mp_drawing = mp.solutions.drawing_utils -face_detection = mp_face_detection.FaceDetection(model_selection=1, min_detection_confidence=0.35) - -# Create a directory to save the cropped face images if it does not exist -save_dir = "cropped_faces" -os.makedirs(save_dir, exist_ok=True) - -# def detect_and_label_faces(image_path): - - -# Function to crop faces from a video and save them as images -# def crop_faces_from_video(video_path): -# # Read the video -# cap = cv2.VideoCapture(video_path) -# frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) -# frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) -# fps = int(cap.get(cv2.CAP_PROP_FPS)) -# total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - -# # Define the codec and create VideoWriter object -# out = cv2.VideoWriter(f'output_{real}_{data}_fusion.avi', cv2.VideoWriter_fourcc('M','J','P','G'), fps, (frame_width, frame_height)) - -# if not cap.isOpened(): -# print("Error: Could not open video.") -# return -# Convert PIL Image to NumPy array for OpenCV -def pil_to_opencv(pil_image): - open_cv_image = np.array(pil_image) - # Convert RGB to BGR for OpenCV - open_cv_image = open_cv_image[:, :, ::-1].copy() - return open_cv_image - -# Convert OpenCV NumPy array to PIL Image -def opencv_to_pil(opencv_image): - # Convert BGR to RGB - pil_image = Image.fromarray(opencv_image[:, :, ::-1]) - return pil_image - - - - -def detect_and_label_faces(frame): - frame = pil_to_opencv(frame) - - - print(type(frame)) - # Convert the frame to RGB - frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - # Perform face detection - results = face_detection.process(frame_rgb) - - # If faces are detected, crop and save each face as an image - if results.detections: - for face_count,detection in enumerate(results.detections): - bboxC = detection.location_data.relative_bounding_box - ih, iw, _ = frame.shape - x, y, w, h = int(bboxC.xmin * iw), int(bboxC.ymin * ih), int(bboxC.width * iw), int(bboxC.height * ih) - # Crop the face region and make sure the bounding box is within the frame dimensions - crop_img = frame[max(0, y):min(ih, y+h), max(0, x):min(iw, x+w)] - if crop_img.size > 0: - face_filename = os.path.join(save_dir, f'face_{face_count}.jpg') - cv2.imwrite(face_filename, crop_img) - - label = testet.testimage(face_filename) - - if os.path.exists(face_filename): - os.remove(face_filename) - - color = (0, 0, 255) if label == 'fake' else (0, 255, 0) - cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2) - cv2.putText(frame, label, (x, y + 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, 2) - return opencv_to_pil(frame) - diff --git a/spaces/terfces0erbo/CollegeProjectV2/Connectify Hotspot Max Lifetime Crack.md b/spaces/terfces0erbo/CollegeProjectV2/Connectify Hotspot Max Lifetime Crack.md deleted file mode 100644 index 923199327a7acc3ec316aacf09f958f40a337246..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Connectify Hotspot Max Lifetime Crack.md +++ /dev/null @@ -1,6 +0,0 @@ -

            connectify hotspot max lifetime crack


            Download ⚹⚹⚹ https://bytlly.com/2uGlPN



            -
            -Connectify Hotspot Pro 2020 Crack + Serial Key Free Download [Latest] ... The Pro and Max versions of the app Connectify Hotspot Pro Crack ... Enjoy using Connectify Hotspot Pro Free Download Full Version for lifetime. 1fdad05405
            -
            -
            -

            diff --git a/spaces/terfces0erbo/CollegeProjectV2/FSXPMDG747QOTSIICrackCrack.md b/spaces/terfces0erbo/CollegeProjectV2/FSXPMDG747QOTSIICrackCrack.md deleted file mode 100644 index a897e6ae55d73271d023d5c251156e0fe082a178..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/FSXPMDG747QOTSIICrackCrack.md +++ /dev/null @@ -1,18 +0,0 @@ -

            FSXPMDG747QOTSIICrackCrack


            DOWNLOAD ››››› https://bytlly.com/2uGjVO



            - -pde for Boeing 757, .pde for Boeing 777, and .pde for Boeing 747-400, this simulation represents an invaluable, realistic experience of the Boeing 747-400. - -Each feature set that has been developed for the Boeing 757, Boeing 777 and Boeing 747-400 is simulated separately, ensuring that the aircraft behaves correctly. .pde for the Boeing 747-400 also has integrated .pde for the interior and exterior of the aircraft.  - -The Boeing 747-400 is a successful business jet aircraft, used to intercontinental routes. The Boeing 747-400 entered service in 2004. The aircraft was first ordered by a Chinese company, and is used by many other airlines and ground-based operators. - -The cockpit of the Boeing 747-400 includes independent displays for the flight director, attitude director, and navigation display. In addition, the Boeing 747-400 has a .pde for the cabin and galley. The .pde for the galley includes .pde for the .pde for the First Class, Economy, and Business Class cabins. The crew cabin includes sleeping areas, a pressurized toilet, and a galley. The galley has a microwave, refrigerator, and sink with hot water. .pde for the Boeing 747-400 is also used to simulate .pde for the .pde for the .pde for the .pde for the Boeing 747-400 exterior.  - -Receiving a message on their display screen is a common activity for flight crew during a flight. The Boeing 747-400 has the .pde for each of these messages. Each message is accompanied by a .pde for the arrival at a specific destination. The Boeing 747-400 has the ability to approach the approach to a runway.  - -The features of the Boeing 747-400 include .pde for engine failures, engine spares, fuel usage, and many other features that will appear in future simulations of the Boeing 747-400. These include .pde for the .pde for the .pde for the .pde for the .pde for the Boeing 747-400. - -This simulation includes the new automatic engine restart (AER) functionality. This feature enables a flight crew to restart an .pde for an engine which has failed by the aircraft control and maintenance system (CMS). This 4fefd39f24
            -
            -
            -

            diff --git a/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (siberian Mouse Video Collection 30 G).md b/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (siberian Mouse Video Collection 30 G).md deleted file mode 100644 index b4c8694e65c040a940b6ec86830fe8fcec282d3a..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (siberian Mouse Video Collection 30 G).md +++ /dev/null @@ -1,6 +0,0 @@ -

            HD Online Player (siberian mouse video collection 30 g)


            DOWNLOADhttps://bytlly.com/2uGiPj



            - -Tattoodo is the world's #1 tattoo community with the greatest collection of tattoos ... Explore the world of LEGO® through games, videos, products and more! ... would then help Hasbro come up with the overall G. Face-Off: "Last Dance" vs. ... Comics Price Guide was the first online price guide for comic books back in 1995. 4d29de3e1b
            -
            -
            -

            diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Forex Gump EA Cracked How to Trade Like a Pro with the Most Powerful Forex Robot.md b/spaces/tialenAdioni/chat-gpt-api/logs/Forex Gump EA Cracked How to Trade Like a Pro with the Most Powerful Forex Robot.md deleted file mode 100644 index 43872b765e0260eb5c0f046defa1014a0eeb74a2..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Forex Gump EA Cracked How to Trade Like a Pro with the Most Powerful Forex Robot.md +++ /dev/null @@ -1,46 +0,0 @@ -
            -

            Forex Gump EA Cracked: How to Download and Install the Best Forex Robot for Free

            -

            If you are looking for a way to make money from the forex market without spending a fortune on expensive software, you might have heard of Forex Gump EA. This is a popular forex robot that claims to generate consistent profits with low risk and high accuracy. But what if you don't want to pay the hefty price tag of $199 for the original version? Is there a way to get Forex Gump EA cracked for free?

            -

            The answer is yes, but you have to be careful. There are many websites that offer Forex Gump EA cracked versions for download, but most of them are scams or viruses that can harm your computer or steal your personal information. Some of them might even install malware that can hijack your trading account and make unauthorized trades with your money. That's why you should never download Forex Gump EA cracked from untrusted sources.

            -

            forex gump ea cracked


            Download File ===> https://urlcod.com/2uKaZd



            -

            However, there is one website that we have tested and verified to be safe and reliable. It is called Forex Gump EA Cracked, and it offers a fully functional and updated version of Forex Gump EA for free. You can download it from their official website and install it on your MT4 platform in minutes. You don't need any license key or activation code, and you can use it on any broker or account type.

            -

            Forex Gump EA Cracked has all the features and benefits of the original version, such as:

            -
              -
            • Trading on multiple currency pairs and timeframes
            • -
            • Using advanced indicators and algorithms to analyze the market
            • -
            • Opening and closing trades automatically with optimal entry and exit points
            • -
            • Adjusting the risk level and trading parameters according to your preferences
            • -
            • Protecting your account from large drawdowns and losses
            • -
            • Generating detailed reports and statistics of your trading performance
            • -
            -

            With Forex Gump EA Cracked, you can enjoy the same results as the original version, but without paying a dime. You can expect to make up to 50% profit per month with minimal effort and time. You can also join their community of traders who share their tips and experiences with Forex Gump EA Cracked.

            -

            If you want to download Forex Gump EA Cracked for free, all you have to do is visit their website and follow the instructions. You will need to provide your name and email address to get access to the download link. You will also receive updates and support from their team in case you have any questions or issues.

            -

            Don't miss this opportunity to get Forex Gump EA Cracked for free and start making money from the forex market today. Visit Forex Gump EA Cracked now and download your copy before it's too late.

            - -

            How Does Forex Gump EA Work?

            -

            Forex Gump EA is a fully automated forex trading system that uses a combination of technical and fundamental analysis to identify the best trading opportunities. It scans the market for trends, patterns, news events, and other factors that affect the price movements of the currency pairs. It then opens and closes trades based on its own logic and rules, without any human intervention.

            -

            -

            Forex Gump EA has a built-in money management system that ensures your account is always protected from large losses. It uses a fixed lot size and a stop loss for every trade, and it also has a trailing stop and a break-even feature that lock in your profits and minimize your risk. Forex Gump EA can trade on any market condition, whether it is trending, ranging, or volatile. It can adapt to the changing market environment and adjust its strategy accordingly.

            -

            Forex Gump EA is compatible with any MT4 broker and any account type, including ECN, STP, NDD, or micro accounts. It can run on any computer or VPS with a stable internet connection. It only requires a minimum deposit of $100 to start trading, but it is recommended to use a higher amount for better results. Forex Gump EA can trade on multiple currency pairs and timeframes simultaneously, but the most profitable ones are EURUSD, GBPUSD, USDJPY, and EURJPY on the M1 or M5 charts.

            - -

            What Are the Advantages of Forex Gump EA Cracked?

            -

            Forex Gump EA Cracked is a free version of Forex Gump EA that has been cracked by a team of hackers who wanted to share this amazing forex robot with the public. They have managed to bypass the security system of the original version and make it available for anyone to download and use without paying anything.

            -

            Forex Gump EA Cracked has all the advantages of Forex Gump EA, such as:

            -
              -
            • High profitability and low risk
            • -
            • Easy installation and setup
            • -
            • No license key or activation code required
            • -
            • No expiration date or limitations
            • -
            • Free updates and support
            • -
            -

            Forex Gump EA Cracked also has some additional advantages over the original version, such as:

            -
              -
            • No need to register or verify your account
            • -
            • No need to worry about losing your license or getting banned
            • -
            • No need to deal with annoying pop-ups or ads
            • -
            • No need to share your personal or financial information
            • -
            • No risk of getting scammed or infected by malware
            • -
            -

            Forex Gump EA Cracked is the best way to get Forex Gump EA for free and enjoy its benefits without any hassle or cost. You can download it from their official website and start using it right away.

            ddb901b051
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/GC Agarwal Physics Book Free Download A Comprehensive Guide for JEE Aspirants.md b/spaces/tialenAdioni/chat-gpt-api/logs/GC Agarwal Physics Book Free Download A Comprehensive Guide for JEE Aspirants.md deleted file mode 100644 index 44b68426ad4ef810180c568bc377a6ee7e8a4484..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/GC Agarwal Physics Book Free Download A Comprehensive Guide for JEE Aspirants.md +++ /dev/null @@ -1,156 +0,0 @@ - -

            GC Agarwal Physics Book Free Download: A Complete Guide

            -

            Are you looking for a reliable and comprehensive physics book for your preparation for competitive exams like JEE Main, JEE Advanced, NEET, etc.? If yes, then you might have heard of GC Agarwal Physics Book, one of the most popular and widely used physics books among students. But what is GC Agarwal Physics Book and how can you download it for free? In this article, we will answer these questions and more. We will also discuss the benefits and drawbacks of GC Agarwal Physics Book, as well as some alternatives that you can consider. So, without further ado, let's get started.

            -

            gc agarwal physics book free download


            DOWNLOADhttps://urlcod.com/2uK88l



            -

            Introduction

            -

            What is GC Agarwal Physics Book?

            -

            GC Agarwal Physics Book is a series of physics books written by Dr. G.C. Agarwal, a renowned physicist and educator. The series consists of six volumes, covering the topics of mechanics, waves and thermodynamics, electricity and magnetism, optics and modern physics, atomic and nuclear physics, and solid state physics. The books are designed to help students master the concepts and principles of physics and apply them to solve various problems.

            -

            Why is GC Agarwal Physics Book popular among students?

            -

            GC Agarwal Physics Book is popular among students because it offers several advantages over other physics books. Some of these advantages are:

            -
              -
            • It covers the entire syllabus of physics for competitive exams like JEE Main, JEE Advanced, NEET, etc.
            • -
            • It provides a systematic and logical presentation of topics, starting from the basics and gradually moving to the advanced levels.
            • -
            • It explains the concepts and theories in a simple and lucid language, with the help of diagrams, graphs, tables, etc.
            • -
            • It includes a large number of solved examples and exercises, with varying levels of difficulty and types of questions.
            • -
            • It gives tips and tricks for solving problems quickly and accurately.
            • -
            -

            How to download GC Agarwal Physics Book for free?

            -

            If you want to download GC Agarwal Physics Book for free, you have two options. One option is to search for the PDF files of the books on the internet. There are many websites that offer free downloads of GC Agarwal Physics Book PDFs. However, you should be careful while downloading from such websites, as they may contain viruses or malware that can harm your device or data. Also, some of the PDF files may be incomplete or corrupted.

            -

            gc agarwal physics book pdf download
            -gc agarwal physics book for class 11 free download
            -gc agarwal physics book for class 12 free download
            -gc agarwal physics book solutions free download
            -gc agarwal physics book for neet free download
            -gc agarwal physics book for jee free download
            -gc agarwal physics book for iit free download
            -gc agarwal physics book for cbse free download
            -gc agarwal physics book for isc free download
            -gc agarwal physics book for icse free download
            -gc agarwal physics book for ncert free download
            -gc agarwal physics book for olympiad free download
            -gc agarwal physics book for aiims free download
            -gc agarwal physics book for bitsat free download
            -gc agarwal physics book for kvpy free download
            -gc agarwal physics book for ntse free download
            -gc agarwal physics book for eamcet free download
            -gc agarwal physics book for wbjee free download
            -gc agarwal physics book for comedk free download
            -gc agarwal physics book for viteee free download
            -gc agarwal physics book for upsee free download
            -gc agarwal physics book for mht cet free download
            -gc agarwal physics book for gujcet free download
            -gc agarwal physics book for kcet free download
            -gc agarwal physics book for ap eamcet free download
            -gc agarwal physics book for ts eamcet free download
            -gc agarwal physics book for jee main free download
            -gc agarwal physics book for jee advanced free download
            -gc agarwal physics book for jee mains and advanced free download
            -gc agarwal physics book concepts of physics free download
            -gc agarwal physics book fundamentals of physics free download
            -gc agarwal physics book principles of physics free download
            -gc agarwal physics book problems in general physics free download
            -gc agarwal physics book objective questions in physics free download
            -gc agarwal physics book numerical problems in physics free download
            -gc agarwal physics book multiple choice questions in physics free download
            -gc agarwal physics book theory and practice of physics free download
            -how to get gc agarwal physics book for free online
            -where to find gc agarwal physics book for free online
            -best website to download gc agarwal physics book for free online
            -best app to download gc agarwal physics book for free online
            -best torrent to download gc agarwal physics book for free online
            -best link to download gc agarwal physics book for free online
            -best source to download gc agarwal physics book for free online
            -best site to read gc agarwal physics book online for free
            -best way to study from gc agarwal physics book online for free
            -best method to learn from gc agarwal physics book online for free
            -best tips to prepare from gc agarwal physics book online for free
            -best tricks to crack from gc agarwal physics book online for free
            -best hacks to master from gc agarwal physics book online for free

            -

            The other option is to use online platforms that provide access to GC Agarwal Physics Book eBooks. These platforms are more reliable and secure than the websites that offer free downloads. They also have features like bookmarking, highlighting, annotating, etc. that can enhance your reading experience. Some of the online platforms that provide access to GC Agarwal Physics Book eBooks are:

            - - - - - - - - - - - - - - - - - - - - - - - - - - -
            NameLinkPrice
            Vedantuhttps://www.vedantu.com/books/gc-agarwal-physics-bookFree
            EduRevhttps://edurev.in/studytube/GC-Agarwal-Physics-Book--Class-11--Class-12--IIT-JE/4f6c0f7b-4d9d-4b0e-bf8e-5b0f7a8f9c5a_tFree
            Scribdhttps://www.scribd.com/search?query=gc+agarwal+physics+book$9.99 per month (after 30-day free trial)
            Amazon Kindlehttps://www.amazon.in/s?k=gc+agarwal+physics+book&rh=n%3A1634753031&ref=nb_sb_noss$2.99-$9.99 per book (depending on volume)
            -

            Benefits of GC Agarwal Physics Book

            -

            Comprehensive coverage of topics

            -

            One of the main benefits of GC Agarwal Physics Book is that it covers all the topics that are relevant for competitive exams like JEE Main, JEE Advanced, NEET, etc. The books follow the latest syllabus and exam pattern prescribed by the National Testing Agency (NTA) and other exam conducting bodies. The books also cover some additional topics that are not included in the syllabus but are important for enhancing your knowledge and skills.

            -

            Clear and concise explanations

            -

            Another benefit of GC Agarwal Physics Book is that it provides clear and concise explanations of the concepts and theories of physics. The books use simple and lucid language that can be easily understood by students of any level. The books also use diagrams, graphs, tables, etc. to illustrate the concepts and make them more visual and appealing. The books avoid unnecessary details and jargon that can confuse or bore the readers.

            -

            Solved examples and exercises

            -

            A third benefit of GC Agarwal Physics Book is that it includes a large number of solved examples and exercises that can help you practice and master the topics. The books provide step-by-step solutions to the examples that can help you learn the methods and techniques of solving problems. The books also provide exercises at the end of each chapter that contain various types of questions such as objective type, subjective type, multiple choice type, assertion-reason type, etc. The exercises are graded according to their level of difficulty and complexity.

            -

            Tips and tricks

            Tips and tricks for solving problems

            -

            A fourth benefit of GC Agarwal Physics Book is that it gives tips and tricks for solving problems quickly and accurately. The books provide shortcuts, formulas, rules, mnemonics, etc. that can help you save time and effort while solving problems. The books also provide hints and clues to some difficult or tricky questions that can help you overcome your doubts or difficulties.

            -

            Drawbacks of GC Agarwal Physics Book

            -

            Outdated syllabus and content

            -

            One of the main drawbacks of GC Agarwal Physics Book is that it has an outdated syllabus and content that does not match with the current trends and requirements of competitive exams like JEE Main, JEE Advanced, NEET, etc. The books were written several years ago when the syllabus and exam pattern were different from what they are now. The books do not include some new topics or concepts that have been introduced or modified in recent years.

            -

            Lack of online support and resources

            -

            Another drawback of GC Agarwal Physics Book is that it lacks online support and resources that can enhance your learning experience. The books do not have any official website or app where you can access additional features or services such as video lectures, interactive quizzes, doubt clearing sessions, mock tests, etc. The books also do not provide any online solutions or answer keys to the exercises or questions. You have to rely on other sources or platforms to get these online support and resources.

            -

            Errors and mistakes in some solutions

            -

            A third drawback of GC Agarwal Physics Book is that it contains some errors and mistakes in some solutions or answers. The books have not been revised or updated for a long time, so some of the solutions or answers may be incorrect or incomplete. This can create confusion or frustration among the students who use these books. You have to be careful while using these books and cross-check the solutions or answers with other sources or platforms.

            -

            Alternatives to GC Agarwal Physics Book

            -

            NCERT Physics Books

            -

            One of the best alternatives to GC Agarwal Physics Book is NCERT Physics Books. These are the official textbooks prescribed by the CBSE board for class 11 and 12 physics. These books are also recommended by the NTA for competitive exams like JEE Main, JEE Advanced, NEET, etc. These books have several advantages over GC Agarwal Physics Book such as:

            -
              -
            • They are updated and revised regularly according to the latest syllabus and exam pattern.
            • -
            • They are available online for free on the official website of NCERT.
            • -
            • They provide online solutions and answer keys to the exercises and questions on various platforms such as Vedantu, EduRev, etc.
            • -
            • They are simple and easy to understand, with minimal errors and mistakes.
            • -
            -

            HC Verma Concepts of Physics

            -

            Another alternative to GC Agarwal Physics Book is HC Verma Concepts of Physics. This is a series of two physics books written by Dr. H.C. Verma, a famous physicist and author. These books cover the topics of mechanics, waves, thermodynamics, electricity, magnetism, optics, modern physics, etc. These books are also widely used by students for competitive exams like JEE Main, JEE Advanced, NEET, etc. These books have several advantages over GC Agarwal Physics Book such as:

            -
              -
            • They are based on the concept-building approach rather than the rote-learning approach.
            • -
            • They provide a variety of questions such as objective type, subjective type, multiple choice type, fill in the blanks type, true-false type, matching type, etc. that test your understanding and application skills.
            • -
            • They provide detailed solutions and explanations to all the questions in a separate book called Solutions to Concepts of Physics by HC Verma.
            • -
            • They are engaging and interesting, with real-life examples and applications.
            • -
            -

            DC Pandey Understanding Physics

            -

            A third alternative to GC Agarwal Physics Book is DC Pandey Understanding Physics. This is a series of five physics books written by D.C. Pandey, a renowned physics teacher and author. These books cover the topics of mechanics, electricity and magnetism, waves and thermodynamics, optics and modern physics, etc. These books are also popular among students for competitive exams like JEE Main, JEE Advanced, NEET, etc. These books have several advantages over GC Agarwal Physics Book such as:

            -
              -
            • They are aligned with the latest syllabus and exam pattern of competitive exams.
            • -
            • They provide online access to video lectures, interactive quizzes, doubt clearing sessions, mock tests, etc. on various platforms such as Arihant Online Test Series (AOTS), Arihant Live (ALIVE), etc.
            • -
            • They provide a large number of questions such as objective type, subjective type, multiple choice type, assertion-reason type, comprehension type, match the column type, integer answer type, etc. that cover all the aspects and dimensions of physics.
            • -
            • They provide tips and tricks
            • They provide tips and tricks for solving problems efficiently and effectively.
            • -
            -

            Conclusion

            -

            In conclusion, GC Agarwal Physics Book is a series of physics books that can help you prepare for competitive exams like JEE Main, JEE Advanced, NEET, etc. The books have some benefits such as comprehensive coverage of topics, clear and concise explanations, solved examples and exercises, tips and tricks for solving problems, etc. However, the books also have some drawbacks such as outdated syllabus and content, lack of online support and resources, errors and mistakes in some solutions, etc. Therefore, you should consider some alternatives to GC Agarwal Physics Book such as NCERT Physics Books, HC Verma Concepts of Physics, DC Pandey Understanding Physics, etc. These alternatives can provide you with more updated and relevant content, online support and resources, variety and quality of questions, etc. You should choose the best physics book for yourself according to your needs and preferences.

            -

            FAQs

            -

            Here are some frequently asked questions about GC Agarwal Physics Book and its alternatives.

            -
              -
            1. Is GC Agarwal Physics Book enough for JEE Main and JEE Advanced?
            2. -

              No, GC Agarwal Physics Book is not enough for JEE Main and JEE Advanced. The book has an outdated syllabus and content that does not match with the current trends and requirements of these exams. You need to supplement GC Agarwal Physics Book with other books or sources that cover the new topics or concepts that are asked in these exams.

              -
            3. Which is better: HC Verma Concepts of Physics or DC Pandey Understanding Physics?
            4. -

              Both HC Verma Concepts of Physics and DC Pandey Understanding Physics are good physics books for competitive exams like JEE Main and JEE Advanced. However, they have different strengths and weaknesses. HC Verma Concepts of Physics is better for building your concepts and understanding of physics. DC Pandey Understanding Physics is better for practicing your problem-solving skills and applying your concepts to different types of questions. You should use both books to get the best results.

              -
            5. How can I get the solutions to GC Agarwal Physics Book?
            6. -

              You can get the solutions to GC Agarwal Physics Book from various sources or platforms such as Vedantu, EduRev, etc. These sources or platforms provide online solutions or answer keys to the exercises or questions in GC Agarwal Physics Book. However, you should be careful while using these sources or platforms as they may contain some errors or mistakes in some solutions or answers. You should cross-check the solutions or answers with other sources or platforms.

              -
            7. Which is the best online platform to access GC Agarwal Physics Book eBooks?
            8. -

              The best online platform to access GC Agarwal Physics Book eBooks depends on your preferences and budget. Some of the online platforms that provide access to GC Agarwal Physics Book eBooks are Vedantu, EduRev, Scribd, Amazon Kindle, etc. These platforms have different features and prices that can suit your needs. You should compare these platforms and choose the one that offers you the best value for money.

              -
            9. Which is the best alternative to GC Agarwal Physics Book?
            10. -

              The best alternative to GC Agarwal Physics Book depends on your level and goal. Some of the alternatives to GC Agarwal Physics Book are NCERT Physics Books, HC Verma Concepts of Physics, DC Pandey Understanding Physics, etc. These alternatives have different advantages and disadvantages that can suit your needs. You should evaluate these alternatives and choose the one that matches your level and goal.

              -
            -

            0a6ba089eb
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/How to Get 3ds Max 2022 xforce Free A Step-by-Step Guide.md b/spaces/tialenAdioni/chat-gpt-api/logs/How to Get 3ds Max 2022 xforce Free A Step-by-Step Guide.md deleted file mode 100644 index 0e7157b64aad5f50e67996844bf2e3e6d849a32d..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/How to Get 3ds Max 2022 xforce Free A Step-by-Step Guide.md +++ /dev/null @@ -1,34 +0,0 @@ -
            -

            How to Download and Install 3ds Max 2022 xforce Free

            -

            If you are looking for a powerful and versatile 3D modeling and animation software, you might want to check out 3ds Max 2022. This software is widely used by professionals and hobbyists alike for creating stunning graphics, games, movies, and more. But how can you get 3ds Max 2022 for free? In this article, we will show you how to download and install 3ds Max 2022 xforce free, a cracked version of the software that bypasses the activation process.

            -

            3ds max 2022 crack xforce free download


            Download Filehttps://urlcod.com/2uKajL



            -

            What is 3ds Max 2022 xforce free?

            -

            3ds Max 2022 xforce free is a modified version of the original 3ds Max 2022 software that uses a keygen or a patch to generate a valid serial number and product key for activation. By using this method, you can avoid paying for the subscription fee or the license cost of the software. However, you should be aware that using 3ds Max 2022 xforce free is illegal and risky. You might face legal consequences or damage your computer with malware or viruses. Therefore, we do not recommend or endorse using 3ds Max 2022 xforce free.

            -

            How to download 3ds Max 2022 xforce free?

            -

            If you still want to try 3ds Max 2022 xforce free at your own risk, here are the steps you need to follow:

            -
              -
            1. Download the original 3ds Max 2022 software from the official website or a trusted source. You can choose the trial version or the full version.
            2. -
            3. Download the xforce keygen or patch from a reliable source. Make sure you scan it with an antivirus program before opening it.
            4. -
            5. Disconnect your internet connection and disable your antivirus software temporarily.
            6. -
            7. Run the xforce keygen or patch as administrator. Select 3ds Max 2022 from the list of products and click on Generate.
            8. -
            9. Copy the generated serial number and product key and paste them in the corresponding fields when prompted by the installation wizard.
            10. -
            11. Complete the installation process and launch the software.
            12. -
            13. Enjoy using 3ds Max 2022 xforce free!
            14. -
            -

            Conclusion

            -

            3ds Max 2022 is a great software for creating amazing 3D models and animations. However, it is not free and requires a valid license or subscription to use. If you want to use it for free, you might be tempted to use 3ds Max 2022 xforce free, a cracked version of the software that uses a keygen or a patch to activate it. However, this is illegal and risky, and we do not recommend it. Instead, you should consider using alternative software that are free and legal, such as Blender, SketchUp, or Maya LT.

            -

            - -

            How to use 3ds Max 2022 xforce free?

            -

            Once you have installed 3ds Max 2022 xforce free, you can start using it for your 3D projects. Here are some of the features and benefits of using 3ds Max 2022:

            -
              -
            • 3ds Max 2022 has a new and improved user interface that is more intuitive and customizable. You can easily access the tools and commands you need, and adjust the layout and colors to suit your preferences.
            • -
            • 3ds Max 2022 has a new scene explorer that lets you manage and organize your 3D assets more efficiently. You can filter, sort, search, and edit your objects, layers, groups, and modifiers in a hierarchical view.
            • -
            • 3ds Max 2022 has a new smart extrude feature that lets you create complex shapes and surfaces with ease. You can extrude multiple polygons at once, along any direction, with automatic collision detection and correction.
            • -
            • 3ds Max 2022 has a new chamfer modifier that lets you create smooth and realistic edges and corners. You can control the amount, shape, depth, and width of the chamfers, and apply them to multiple edges or loops at once.
            • -
            • 3ds Max 2022 has a new bake to texture feature that lets you generate high-quality textures from your 3D models. You can bake lighting, shadows, ambient occlusion, normal maps, and more, with various options and presets.
            • -
            • 3ds Max 2022 has a new Arnold renderer that lets you create stunning photorealistic images and animations. You can use the built-in materials, lights, cameras, and effects, or customize them to your needs. You can also use the GPU mode for faster rendering.
            • -
            -

            These are just some of the features and benefits of using 3ds Max 2022 xforce free. However, you should also be aware of the drawbacks and risks of using a cracked software. You might encounter errors, bugs, crashes, or compatibility issues. You might also expose your computer to malware or viruses that could harm your data or system. Moreover, you might face legal consequences or penalties for violating the intellectual property rights of the software developer. Therefore, we advise you to use 3ds Max 2022 xforce free at your own discretion and responsibility.

            ddb901b051
            -
            -
            \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Angry Birds Classic Hack Download the Best Mod for Your Android Device.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Angry Birds Classic Hack Download the Best Mod for Your Android Device.md deleted file mode 100644 index 2a5b64f68c7bf4ae806eb4fe1d3925c2c5c722bd..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Angry Birds Classic Hack Download the Best Mod for Your Android Device.md +++ /dev/null @@ -1,115 +0,0 @@ - -

            Angry Birds Classic Hack Download: A Guide for Gamers

            -

            Angry Birds Classic is one of the most popular and addictive mobile games ever created. It was released in 2009 by Rovio Entertainment and has since spawned several sequels, spin-offs, movies, and merchandise. The game involves using a slingshot to launch different types of birds at structures made of various materials, such as wood, glass, stone, and metal, where green pigs are hiding. The goal is to destroy all the pigs and collect stars, eggs, and other items along the way.

            -

            The game features challenging physics-based gameplay and hundreds of levels across different episodes and themes. Each bird has its own unique ability that can be activated by tapping the screen while it is in flight. For example, the yellow bird can speed up, the black bird can explode, and the white bird can drop egg bombs. The game also offers power-ups, such as extra birds, mighty eagle, sling scope, king sling, and super seeds, that can enhance the birds' destructive power.

            -

            angry birds classic hack download


            Downloadhttps://bltlly.com/2uOmkv



            -

            However, not everyone can master the game easily. Some levels are very difficult to complete with three stars, some pigs are very hard to reach or kill, and some power-ups are very expensive to buy or use. That's why some players resort to using hacks for Angry Birds Classic. A hack is a modification or alteration of the game's code or data that gives the player an unfair advantage or changes the game's behavior in some way. For example, a hack can give unlimited birds, coins, power-ups, or stars; unlock all levels and episodes; remove ads; or disable obstacles.

            -

            Using hacks can make the game more fun and easy for some players who want to enjoy the game without spending too much time or money on it. However, using hacks can also have some drawbacks and risks, such as ruining the game's balance and challenge; violating the game's terms of service and privacy policy; exposing the device to malware or viruses; or getting banned or detected by Rovio's anti-cheat system.

            -

            If you still want to try using hacks for Angry Birds Classic, this article will guide you on how to download them for different devices: Android, iOS, and PC. Please note that this article is for educational purposes only and does not endorse or encourage hacking in any way. Use hacks at your own risk and discretion.

            -

            How to Download Angry Birds Classic Hack for Android Devices

            -

            If you have an Android device, such as a smartphone or tablet, you can follow these steps to download a hack for Angry Birds Classic:

            -
              -
            1. Go to on your device's browser. This is a website that offers various mods for Android games, including Angry Birds Classic.
            2. -
            3. Scroll down until you see Angry Birds Classic (MOD, Unlimited Money) 8.0.3. This is a modded version of the game that gives you unlimited coins and power-ups.
            4. -
            5. Tap on Download (99.1Mb) to start downloading the modded APK file.
            6. -
            7. Once the download is complete, go to your device's settings and enable installation from unknown sources. This will allow you to install apps that are not from the Google Play Store.
            8. -
            9. Go to your device's file Birds Classic.
            10. -
            11. Scroll down until you see Angry Birds Classic v8.0.3 +4 Trainer. This is a trainer that gives you four options to modify the game, such as unlimited birds, coins, power-ups, and stars.
            12. -
            13. Click on Download to start downloading the trainer file.
            14. -
            15. Once the download is complete, extract the trainer file to a folder of your choice.
            16. -
            17. Launch the trainer file and click on Run as administrator. You may need to disable your antivirus or firewall to run the trainer.
            18. -
            19. Launch the game from your PC or from the trainer. You will see a small window with the trainer options and hotkeys.
            20. -
            21. Press the hotkeys to activate or deactivate the trainer options as you play the game.
            22. -
            23. Enjoy playing Angry Birds Classic with the trainer!
            24. -
            -

            Some of the pros of using this hack are:

            -
              -
            • You can use any bird, coin, power-up, or star you want without worrying about running out of them or spending real money.
            • -
            • You can easily complete any level with three stars and unlock all episodes and themes.
            • -
            • You can have more fun and variety in the game with different trainer options and hotkeys.
            • -
            -

            Some of the cons of using this hack are:

            -
              -
            • You may lose the original challenge and satisfaction of playing the game as intended.
            • -
            • You may encounter some bugs or glitches in the game due to the trainer code or data.
            • -
            • You may risk getting banned or detected by Rovio if they find out that you are using a hack.
            • -
            -

            Conclusion

            -

            In this article, we have shown you how to download Angry Birds Classic hack for different devices: Android, iOS, and PC. We have also discussed some of the pros and cons of using hacks for this game. Using hacks can make the game more fun and easy for some players who want to enjoy the game without spending too much time or money on it. However, using hacks can also have some drawbacks and risks, such as ruining the game's balance and challenge; violating the game's terms of service and privacy policy; exposing the device to malware or viruses; or getting banned or detected by Rovio's anti-cheat system.

            -

            If you decide to use hacks for Angry Birds Classic, here are some tips and tricks for playing the game with them:

            -

            How to download angry birds classic hack for free
            -Angry birds classic hack mod apk download
            -Angry birds classic hack unlimited coins and gems
            -Download angry birds classic hack for android
            -Angry birds classic hack cheats tool online
            -Angry birds classic hack no survey no password
            -Angry birds classic hack version 5.0.1 download
            -Angry birds classic hack game jolt[^1^]
            -Angry birds classic hack ios download
            -Angry birds classic hack pc download
            -Angry birds classic hack without root or jailbreak
            -Angry birds classic hack 2023 latest update
            -Angry birds classic hack tips and tricks
            -Angry birds classic hack gameplay video
            -Angry birds classic hack review and rating
            -Angry birds classic hack safe and secure download
            -Angry birds classic hack features and benefits
            -Angry birds classic hack support and feedback
            -Angry birds classic hack compatible with all devices
            -Angry birds classic hack easy and fast download
            -Download angry birds classic hack from official website
            -Angry birds classic hack direct download link
            -Angry birds classic hack mirror download link
            -Angry birds classic hack alternative download link
            -Angry birds classic hack torrent download link
            -Download angry birds classic hack from google play store
            -Download angry birds classic hack from app store
            -Download angry birds classic hack from amazon appstore
            -Download angry birds classic hack from game jolt[^1^]
            -Download angry birds classic hack from mediafire
            -Download angry birds classic hack from mega.nz
            -Download angry birds classic hack from zippyshare
            -Download angry birds classic hack from dropbox
            -Download angry birds classic hack from 4shared
            -Download angry birds classic hack from file-upload.com
            -Download angry birds classic hack from apk pure
            -Download angry birds classic hack from apk mirror
            -Download angry birds classic hack from apk monk
            -Download angry birds classic hack from apk home
            -Download angry birds classic hack from apk done

            -
              -
            • Use hacks sparingly and moderately. Don't use them all the time or on every level. Try to play some levels without hacks to test your skills and enjoy the game's original design.
            • -
            • Use hacks wisely and strategically. Don't use them randomly or recklessly. Try to use them only when you need them or when they can help you achieve your goals.
            • -
            • Use hacks responsibly and ethically. Don't use them to cheat or harm other players or Rovio. Try to respect the game's rules and community.
            • -
            -

            We hope that this article has been helpful and informative for you. If you have any questions or comments about Angry Birds Classic hack download, feel free to leave them below. Happy gaming!

            -

            FAQs

            -

            What are some of the best hacks for Angry Birds Classic?

            -

            Some of the best hacks for Angry Birds Classic are:

            -
              -
            • Unlimited birds, coins, power-ups, and stars. These hacks give you unlimited resources to use in the game, making it easier and more fun to play.
            • -
            • Unlock all levels and episodes. These hacks let you access all the content in the game, giving you more variety and options to play.
            • -
            • Mod menu. This hack gives you a menu with various options to customize the game, such as changing the bird's size, speed, gravity, angle, etc., giving you more control and creativity in the game.
            • -
            -

            Is it legal to use hacks for Angry Birds Classic?

            -

            No, it is not legal to use hacks for Angry Birds Classic. Using hacks violates the game's terms of service and privacy policy, which prohibit modifying, altering, or interfering with the game's code or data. Using hacks can also be considered as cheating, which is illegal and unethical in most games and competitions. Using hacks can result in legal actions or penalties from Rovio, such as banning, suspending, or deleting your account or progress.

            -

            Will using hacks affect my game progress or achievements?

            -

            Yes, using hacks will affect your game progress or achievements. Using hacks can make your game progress faster or easier, but it can also make it less meaningful or rewarding. Using hacks can also affect your game achievements, such as stars, eggs, trophies, or leaderboards. Using hacks can make your achievements easier to obtain, but it can also make them less valuable or credible. Using hacks can also affect your game sync, backup, or restore, as the game may not recognize or accept your hacked data.

            -

            How can I avoid getting banned or detected by Rovio for using hacks?

            -

            There is no sure way to avoid getting banned or detected by Rovio for using hacks. Rovio has a sophisticated anti-cheat system that can monitor and analyze your game activity and data. If Rovio finds any evidence or suspicion of hacking, they can ban or detect your account or device at any time. However, some possible ways to reduce the risk of getting banned or detected are:

            -
              -
            • Use hacks only from trusted and reputable sources. Avoid downloading hacks from unknown or shady websites that may contain malware or viruses.
            • -
            • Use hacks only on offline mode. Avoid using hacks on online mode or connecting to the internet while playing the game.
            • -
            • Use hacks only occasionally and moderately. Avoid using hacks all the time or on every level.
            • -
            • Use hacks only for personal use. Avoid sharing or bragging about your hacks with other players or on social media.
            • -
            -

            Where can I find more information and resources about Angry Birds Classic hacks?

            -

            If you want to find more information and resources about Angry Birds Classic hacks, you can visit some of these websites:

            -
              -
            • . This is a website that offers various mods for Android games, including Angry Birds Classic.
            • -
            • . This is a website that offers various tweaks and hacks for iOS games and apps, including Angry Birds Classic.
            • -
            • . This is a website that offers various cheats and trainers for PC games, including Angry Birds Classic.
            • -
            • . This is a website that offers various tips and guides for Angry Birds Classic, including how to use power-ups, how to get three stars, how to unlock episodes, etc.
            • -

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Driver San Francisco Ubisoft Game Launcher TOP Download.md b/spaces/tioseFevbu/cartoon-converter/scripts/Driver San Francisco Ubisoft Game Launcher TOP Download.md deleted file mode 100644 index 4bc925343a192c95283458b2b1b374e2d2651dea..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Driver San Francisco Ubisoft Game Launcher TOP Download.md +++ /dev/null @@ -1,20 +0,0 @@ -
            -

            How to Download and Play Driver San Francisco on PC with Ubisoft Game Launcher

            -

            Driver San Francisco is a 2011 action-adventure racing game developed by Ubisoft Reflections and published by Ubisoft. The game features an innovative gameplay feature that allows players to seamlessly shift between over 130 licensed cars and explore a realistic recreation of San Francisco. The game also supports online multiplayer modes and a story mode that follows the protagonist John Tanner, a detective who can use his ability to shift into other drivers' bodies to stop a crime lord.

            -

            Driver San Francisco Ubisoft Game Launcher Download


            Download File » https://urlcod.com/2uHx4t



            -

            If you want to download and play Driver San Francisco on your PC, you will need to use the Ubisoft Game Launcher, a free service that connects you with other Ubisoft players and games across all platforms. Here are the steps to follow:

            -
              -
            1. Create a Ubisoft account or log in with your existing one.
            2. -
            3. Download the Ubisoft Game Launcher from https://ubisoftconnect.com/ and install it on your PC.
            4. -
            5. Launch the Ubisoft Game Launcher and go to the Store tab. Search for Driver San Francisco and purchase it. You can also redeem a code if you have one.
            6. -
            7. Go to the Library tab and find Driver San Francisco in your games list. Click on the Download button and wait for the game to be installed on your PC.
            8. -
            9. Once the installation is complete, click on the Play button and enjoy Driver San Francisco on your PC.
            10. -
            -

            Note: You may need to adjust some compatibility settings for the game to run properly on your PC. For example, you may need to disable fullscreen optimization or run the game as an administrator. You can find more details on how to fix common issues on https://kosgames.com/driver-san-francisco-how-to-fix-game-not-launch-in-2021-play-online-1074/.

            - -

            Driver San Francisco is a game that offers a unique and thrilling experience for racing fans and action lovers. The game has received positive reviews from critics and players alike, praising its innovative gameplay, diverse car selection, realistic graphics, and engaging story. The game also features a variety of challenges and activities to keep you entertained, such as races, stunts, dares, and missions. You can also customize your cars and unlock new ones as you progress through the game.

            -

            If you want to play Driver San Francisco online with other players, you can join the Ubisoft Connect network and access various multiplayer modes. You can compete with up to seven other players in different modes, such as tag, trailblazer, takedown, and capture the flag. You can also team up with a friend in split-screen or online co-op mode and complete missions together. You can also use your shifting ability to help or hinder other players in online mode.

            -

            -

            Driver San Francisco is a game that will keep you on the edge of your seat and make you feel like a real driver. If you are looking for a fun and exciting game to play on your PC, you should definitely give Driver San Francisco a try. You can download it from the Ubisoft Game Launcher and start your adventure in the city by the bay.

            cec2833e83
            -
            -
            \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Flexisign Pro 10 Keygen 35.md b/spaces/tioseFevbu/cartoon-converter/scripts/Flexisign Pro 10 Keygen 35.md deleted file mode 100644 index a12b1732946057346eaa7545e1f39ff29f634757..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Flexisign Pro 10 Keygen 35.md +++ /dev/null @@ -1,66 +0,0 @@ - -

            Flexisign Pro 10 Keygen 35: How to Download and Activate the Best Graphic Design Software for Signs

            -

            If you are looking for a powerful graphic design software that can help you create professional-looking logos and vector graphics, design direct-to-vinyl signs with multiple colors, text, and vector graphics, and offer a complete set of design, cutting, RIPing, and printing tools for maximum productivity, then you might want to check out Flexisign Pro 10.

            -

            Flexisign Pro 10 is one of the best and the most useful Photoshop alternatives, which is a reliable and free image editing and vector graphic software. It combines the power of genuine Adobe® PostScript® 3 RIP engine, ICC profile support, and built-in direct drivers offering a complete set of design, cutting, RIPing, and printing tools for maximum productivity. It also includes vector graphics scheme enabling you to generate very advanced logos together with powerful web design. It offers full graphic design, text serialization, and color tracing features. It allows you to easily modify and design the vectors using a wide range of available resources to generate excellent vector graphics and logos.

            -

            flexisign pro 10 keygen 35


            DOWNLOAD ->>->>->> https://urlcod.com/2uHw1L



            -

            However, Flexisign Pro 10 is not a free software. You need to purchase a license to use it legally. But what if you don't have enough money to buy it? Or what if you just want to try it out before buying it? Is there a way to get Flexisign Pro 10 for free?

            -

            The answer is yes. There is a way to get Flexisign Pro 10 for free by using a keygen. A keygen is a software that can generate a serial number and an activation code for a software that you want to use. By using a keygen, you can bypass the license verification process and activate the software without paying anything.

            -

            But is it legal to use a keygen? And where can you find a reliable and safe keygen for Flexisign Pro 10? How can you download and activate Flexisign Pro 10 with a keygen? And how can you use Flexisign Pro 10 for graphic design? In this article, we will answer all these questions and more. We will show you how to download and activate Flexisign Pro 10 with keygen 35, which is one of the most popular and working keygens for Flexisign Pro 10. We will also show you how to use Flexisign Pro 10 for graphic design and create stunning logos and vector graphics with ease.

            -

            -

            What is Flexisign Pro 10?

            -

            Flexisign Pro 10 is a graphic design software that is specially designed for signs, banners, decals, stickers, vehicle wraps, and other types of graphics. It is developed by SA International (SAi), which is a leading provider of software solutions for the signmaking, digital printing, textile, and CNC machining industries. Flexisign Pro 10 is part of the SAi Flexi family of products, which also includes FlexiSIGN, FlexiPRINT, FlexiDESIGNER, and FlexiEXPERT.

            -

            Features and benefits of Flexisign Pro 10

            -

            Flexisign Pro 10 has many features and benefits that make it one of the best graphic design software for signs. Some of them are:

            -
              -
            • It has a user-friendly interface that allows you to easily access all the tools and features you need.
            • -
            • It supports a wide range of file formats, such as EPS, PDF, AI, PSD, JPG, PNG, TIFF, BMP, SVG, DXF, PLT, and more.
            • -
            • It has a powerful vector graphics engine that lets you create and edit vector graphics with precision and quality.
            • -
            • It has a comprehensive text tool that lets you add and edit text with various fonts, styles, effects, and alignment options.
            • -
            • It has a color management system that ensures accurate color reproduction across different devices and media.
            • -
            • It has a genuine Adobe® PostScript® 3 RIP engine that enables fast and reliable printing of complex graphics.
            • -
            • It has built-in direct drivers that support over 1,000 cutters, printers, hybrids, flatbeds, and print-and-cut devices.
            • -
            • It has a cutting tool that lets you cut your designs directly from the software or export them to a cutter.
            • -
            • It has a contour cutting tool that lets you create contour cut lines around your graphics for easy cutting.
            • -
            • It has a print-and-cut tool that lets you print your designs on a printer and then cut them on a cutter with automatic registration marks.
            • -
            • It has a nesting tool that lets you optimize the use of media by arranging multiple graphics on one sheet.
            • -
            • It has a tiling tool that lets you split large graphics into smaller tiles for printing on multiple sheets.
            • -
            • It has an ICC profile support that lets you use custom profiles for different media types and print modes.
            • -
            • It has an advanced color tracing tool that lets you convert bitmap images into vector graphics with accurate colors.
            • -
            • It has an image editing tool that lets you enhance your images with filters, adjustments, effects, and masks.
            • -
            -

            System requirements for Flexisign Pro 10

            -

            To run Flexisign Pro 10 smoothly on your computer, you need to meet the following system requirements:

            - - - - - - - - - -
            Operating systemWindows 7/8/10 (32-bit or 64-bit)
            CPUPentium IV or higher
            RAM1 GB or higher
            HDD4 GB or higher
            Graphics cardDedicated OpenGL 2.0 compatible with at least 256 MB memory
            Monitor resolution1024 x 768 or higher
            DVD driveRequired for installation
            Internet connectionRequired for activation and updates
            - -

            A keygen is a software that can generate a serial number and an activation code for another software that you want to use. A serial number is a unique identifier that is required to install and register a software. An activation code is a code that is required to activate a software and unlock its full features. A keygen can create these codes by using a specific algorithm that matches the one used by the original software developer.

            -

            You need a keygen if you want to use a software without paying for it or without having a valid license. By using a keygen, you can bypass the license verification process and activate the software for free. This way, you can enjoy the full functionality of the software without any limitations or restrictions.

            -

            The difference between a keygen and a crack

            -

            A keygen is not the same as a crack. A crack is a software that modifies or patches another software to remove or disable its protection mechanisms, such as license verification, trial expiration, or online activation. A crack does not generate any codes, but rather alters the original software files to make it think that it is already activated or registered.

            -

            The main difference between a keygen and a crack is that a keygen does not modify or damage the original software files, but rather creates new codes that can be used to activate the software. A keygen is usually safer and easier to use than a crack, as it does not require any changes to the system files or registry entries. However, both keygens and cracks are illegal and unethical, as they violate the intellectual property rights of the software developers and distributors.

            -

            The advantages and disadvantages of using a keygen

            -

            Using a keygen has some advantages and disadvantages that you should be aware of before deciding to use one. Some of the advantages are:

            -
              -
            • You can use a software for free without paying anything.
            • -
            • You can access all the features and functions of the software without any limitations or restrictions.
            • -
            • You can use the software offline without needing an internet connection for activation.
            • -
            • You can use the software on multiple devices without needing multiple licenses.
            • -
            -

            Some of the disadvantages are:

            -
              -
            • You are breaking the law and risking legal consequences for piracy and theft.
            • -
            • You are violating the terms and conditions of the software license agreement and losing your right to technical support and updates.
            • -
            • You are exposing your computer and data to potential viruses and malware that may be hidden in the keygen or downloaded from untrusted sources.
            • -
            • You are compromising the quality and performance of the software, as it may not work properly or may crash frequently due to compatibility issues or bugs.
            • -
            • You are disrespecting the hard work and creativity of the software developers and distributors who deserve to be paid for their products and services.
            • -

            b2dd77e56b
            -
            -
            \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/eucjpprober.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/eucjpprober.py deleted file mode 100644 index abf2e66e283eb45c404e2d566c3933ae369324e8..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/eucjpprober.py +++ /dev/null @@ -1,95 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .chardistribution import EUCJPDistributionAnalysis -from .codingstatemachine import CodingStateMachine -from .enums import MachineState, ProbingState -from .jpcntx import EUCJPContextAnalysis -from .mbcharsetprober import MultiByteCharSetProber -from .mbcssm import EUCJP_SM_MODEL - - -class EUCJPProber(MultiByteCharSetProber): - def __init__(self): - super().__init__() - self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL) - self.distribution_analyzer = EUCJPDistributionAnalysis() - self.context_analyzer = EUCJPContextAnalysis() - self.reset() - - def reset(self): - super().reset() - self.context_analyzer.reset() - - @property - def charset_name(self): - return "EUC-JP" - - @property - def language(self): - return "Japanese" - - def feed(self, byte_str): - for i, byte in enumerate(byte_str): - # PY3K: byte_str is a byte array, so byte is an int, not a byte - coding_state = self.coding_sm.next_state(byte) - if coding_state == MachineState.ERROR: - self.logger.debug( - "%s %s prober hit error at byte %s", - self.charset_name, - self.language, - i, - ) - self._state = ProbingState.NOT_ME - break - if coding_state == MachineState.ITS_ME: - self._state = ProbingState.FOUND_IT - break - if coding_state == MachineState.START: - char_len = self.coding_sm.get_current_charlen() - if i == 0: - self._last_char[1] = byte - self.context_analyzer.feed(self._last_char, char_len) - self.distribution_analyzer.feed(self._last_char, char_len) - else: - self.context_analyzer.feed(byte_str[i - 1 : i + 1], char_len) - self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len) - - self._last_char[0] = byte_str[-1] - - if self.state == ProbingState.DETECTING: - if self.context_analyzer.got_enough_data() and ( - self.get_confidence() > self.SHORTCUT_THRESHOLD - ): - self._state = ProbingState.FOUND_IT - - return self.state - - def get_confidence(self): - context_conf = self.context_analyzer.get_confidence() - distrib_conf = self.distribution_analyzer.get_confidence() - return max(context_conf, distrib_conf) diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_distutils/__init__.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_distutils/__init__.py deleted file mode 100644 index b3ac0146cb3f4cb1894f55fc09775875bc4e1177..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_distutils/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -"""distutils - -The main package for the Python Module Distribution Utilities. Normally -used from a setup script as - - from distutils.core import setup - - setup (...) -""" - -import sys -import importlib - -__version__ = sys.version[: sys.version.index(' ')] - - -try: - # Allow Debian and pkgsrc (only) to customize system - # behavior. Ref pypa/distutils#2 and pypa/distutils#16. - # This hook is deprecated and no other environments - # should use it. - importlib.import_module('_distutils_system_mod') -except ImportError: - pass diff --git a/spaces/tomar79/webcam/README.md b/spaces/tomar79/webcam/README.md deleted file mode 100644 index bae6348b2bc47317380411833a6aeed4e49334de..0000000000000000000000000000000000000000 --- a/spaces/tomar79/webcam/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Webcam -emoji: 🏢 -colorFrom: gray -colorTo: blue -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/data/datasets/coco.py b/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/data/datasets/coco.py deleted file mode 100644 index 030238d1a6f3b8b78f1dd21474ed5c0eaa7003d7..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/data/datasets/coco.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import torch -import torchvision - -from maskrcnn_benchmark.structures.bounding_box import BoxList -from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask - - -class COCODataset(torchvision.datasets.coco.CocoDetection): - def __init__( - self, ann_file, root, remove_images_without_annotations, transforms=None - ): - super(COCODataset, self).__init__(root, ann_file) - - # sort indices for reproducible results - self.ids = sorted(self.ids) - - # filter images without detection annotations - if remove_images_without_annotations: - self.ids = [ - img_id - for img_id in self.ids - if len(self.coco.getAnnIds(imgIds=img_id, iscrowd=None)) > 0 - ] - - self.json_category_id_to_contiguous_id = { - v: i + 1 for i, v in enumerate(self.coco.getCatIds()) - } - self.contiguous_category_id_to_json_id = { - v: k for k, v in self.json_category_id_to_contiguous_id.items() - } - self.id_to_img_map = {k: v for k, v in enumerate(self.ids)} - self.transforms = transforms - - def __getitem__(self, idx): - img, anno = super(COCODataset, self).__getitem__(idx) - - # filter crowd annotations - # TODO might be better to add an extra field - anno = [obj for obj in anno if obj["iscrowd"] == 0] - - boxes = [obj["bbox"] for obj in anno] - boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes - target = BoxList(boxes, img.size, mode="xywh",use_char_ann=False).convert("xyxy") - - classes = [obj["category_id"] for obj in anno] - classes = [self.json_category_id_to_contiguous_id[c] for c in classes] - classes = torch.tensor(classes) - target.add_field("labels", classes) - - masks = [obj["segmentation"] for obj in anno] - masks = SegmentationMask(masks, img.size) - target.add_field("masks", masks) - - target = target.clip_to_image(remove_empty=True) - - if self.transforms is not None: - img, target = self.transforms(img, target) - - return img, target, idx - - def get_img_info(self, index): - img_id = self.id_to_img_map[index] - img_data = self.coco.imgs[img_id] - return img_data diff --git a/spaces/tomzhang1019/ChatGPT/modules/models/StableLM.py b/spaces/tomzhang1019/ChatGPT/modules/models/StableLM.py deleted file mode 100644 index f4affc3699e335f1e42bf5fc8c93e92a41d027fe..0000000000000000000000000000000000000000 --- a/spaces/tomzhang1019/ChatGPT/modules/models/StableLM.py +++ /dev/null @@ -1,93 +0,0 @@ -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer -import time -import numpy as np -from torch.nn import functional as F -import os -from .base_model import BaseLLMModel -from threading import Thread - -STABLELM_MODEL = None -STABLELM_TOKENIZER = None - - -class StopOnTokens(StoppingCriteria): - def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: - stop_ids = [50278, 50279, 50277, 1, 0] - for stop_id in stop_ids: - if input_ids[0][-1] == stop_id: - return True - return False - - -class StableLM_Client(BaseLLMModel): - def __init__(self, model_name, user_name="") -> None: - super().__init__(model_name=model_name, user=user_name) - global STABLELM_MODEL, STABLELM_TOKENIZER - print(f"Starting to load StableLM to memory") - if model_name == "StableLM": - model_name = "stabilityai/stablelm-tuned-alpha-7b" - else: - model_name = f"models/{model_name}" - if STABLELM_MODEL is None: - STABLELM_MODEL = AutoModelForCausalLM.from_pretrained( - model_name, torch_dtype=torch.float16).cuda() - if STABLELM_TOKENIZER is None: - STABLELM_TOKENIZER = AutoTokenizer.from_pretrained(model_name) - self.generator = pipeline( - 'text-generation', model=STABLELM_MODEL, tokenizer=STABLELM_TOKENIZER, device=0) - print(f"Sucessfully loaded StableLM to the memory") - self.system_prompt = """StableAssistant -- StableAssistant is A helpful and harmless Open Source AI Language Model developed by Stability and CarperAI. -- StableAssistant is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user. -- StableAssistant is more than just an information source, StableAssistant is also able to write poetry, short stories, and make jokes. -- StableAssistant will refuse to participate in anything that could harm a human.""" - self.max_generation_token = 1024 - self.top_p = 0.95 - self.temperature = 1.0 - - def _get_stablelm_style_input(self): - history = self.history + [{"role": "assistant", "content": ""}] - print(history) - messages = self.system_prompt + \ - "".join(["".join(["<|USER|>"+history[i]["content"], "<|ASSISTANT|>"+history[i + 1]["content"]]) - for i in range(0, len(history), 2)]) - return messages - - def _generate(self, text, bad_text=None): - stop = StopOnTokens() - result = self.generator(text, max_new_tokens=self.max_generation_token, num_return_sequences=1, num_beams=1, do_sample=True, - temperature=self.temperature, top_p=self.top_p, top_k=1000, stopping_criteria=StoppingCriteriaList([stop])) - return result[0]["generated_text"].replace(text, "") - - def get_answer_at_once(self): - messages = self._get_stablelm_style_input() - return self._generate(messages), len(messages) - - def get_answer_stream_iter(self): - stop = StopOnTokens() - messages = self._get_stablelm_style_input() - - # model_inputs = tok([messages], return_tensors="pt")['input_ids'].cuda()[:, :4096-1024] - model_inputs = STABLELM_TOKENIZER( - [messages], return_tensors="pt").to("cuda") - streamer = TextIteratorStreamer( - STABLELM_TOKENIZER, timeout=10., skip_prompt=True, skip_special_tokens=True) - generate_kwargs = dict( - model_inputs, - streamer=streamer, - max_new_tokens=self.max_generation_token, - do_sample=True, - top_p=self.top_p, - top_k=1000, - temperature=self.temperature, - num_beams=1, - stopping_criteria=StoppingCriteriaList([stop]) - ) - t = Thread(target=STABLELM_MODEL.generate, kwargs=generate_kwargs) - t.start() - - partial_text = "" - for new_text in streamer: - partial_text += new_text - yield partial_text diff --git a/spaces/touchscale/White-box-Cartoonization/README.md b/spaces/touchscale/White-box-Cartoonization/README.md deleted file mode 100644 index 9860239cf42c94e385faaaa75a85311e010d64f7..0000000000000000000000000000000000000000 --- a/spaces/touchscale/White-box-Cartoonization/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -python_version: 3.7 -title: White Box Cartoonization -emoji: 📚 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: hylee/White-box-Cartoonization ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/trttung1610/musicgen/audiocraft/grids/diffusion/4_bands_base_32khz.py b/spaces/trttung1610/musicgen/audiocraft/grids/diffusion/4_bands_base_32khz.py deleted file mode 100644 index f7e67bcc89dd0c8e50d770e600b55f179fe19588..0000000000000000000000000000000000000000 --- a/spaces/trttung1610/musicgen/audiocraft/grids/diffusion/4_bands_base_32khz.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Training of the 4 diffusion models described in -"From Discrete Tokens to High-Fidelity Audio Using Multi-Band Diffusion" -(paper link). -""" - -from ._explorers import DiffusionExplorer - - -@DiffusionExplorer -def explorer(launcher): - launcher.slurm_(gpus=4, partition='learnfair') - - launcher.bind_({'solver': 'diffusion/default', - 'dset': 'internal/music_10k_32khz'}) - - with launcher.job_array(): - launcher({'filter.use': True, 'filter.idx_band': 0, "processor.use": False, 'processor.power_std': 0.4}) - launcher({'filter.use': True, 'filter.idx_band': 1, "processor.use": False, 'processor.power_std': 0.4}) - launcher({'filter.use': True, 'filter.idx_band': 2, "processor.use": True, 'processor.power_std': 0.4}) - launcher({'filter.use': True, 'filter.idx_band': 3, "processor.use": True, 'processor.power_std': 0.75}) diff --git a/spaces/tuan2010/DocumentGPT/utils.py b/spaces/tuan2010/DocumentGPT/utils.py deleted file mode 100644 index 0cdd0dbad68e08fc98178e25890585f64ba44ebd..0000000000000000000000000000000000000000 --- a/spaces/tuan2010/DocumentGPT/utils.py +++ /dev/null @@ -1,27 +0,0 @@ - -def split_text(text: str, chunk_size: int = 150, chunk_overlap: int = 25) -> list: - """ - Splits a long string into chunks with a specified size and overlap. - - Args: - text (str): The long string to be split. - chunk_size (int): The desired size of each chunk. - chunk_overlap (int): The desired overlap between adjacent chunks. - - Returns: - A list of string chunks. - """ - words = text.split(" ") - chunks = [] - start = 0 - end = chunk_size - - while end <= len(words): - chunks.append(" ".join(words[start:end])) - start += chunk_size - chunk_overlap - end += chunk_size - chunk_overlap - - if end > len(words) and start < len(words): - chunks.append(" ".join(words[start:])) - - return chunks \ No newline at end of file diff --git a/spaces/unity/ML-Agents-SoccerTwos/index.html b/spaces/unity/ML-Agents-SoccerTwos/index.html deleted file mode 100644 index 8f0c125792fd357eb0bc4a72524d8eef6c361153..0000000000000000000000000000000000000000 --- a/spaces/unity/ML-Agents-SoccerTwos/index.html +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - - SoccerTwos - - - - -
            - -
            - - - - - diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/11 Year Old Girls Pussy Pics.rar.md b/spaces/usbethFlerru/sovits-modelsV2/example/11 Year Old Girls Pussy Pics.rar.md deleted file mode 100644 index 0c237931b2e7ba0e8dd77f4aa3631ad5757be25c..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/11 Year Old Girls Pussy Pics.rar.md +++ /dev/null @@ -1,6 +0,0 @@ -

            11 Year Old Girls Pussy Pics.rar


            Download > https://urlcod.com/2uyVTx



            -
            - aaccfb2cb3
            -
            -
            -

            diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Candydoll Tv Fotos.md b/spaces/usbethFlerru/sovits-modelsV2/example/Candydoll Tv Fotos.md deleted file mode 100644 index 3566edcedaee8ebef6aeabf90f0a33c4589b80a7..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Candydoll Tv Fotos.md +++ /dev/null @@ -1,6 +0,0 @@ -

            Candydoll Tv Fotos


            Download File ————— https://urlcod.com/2uyXaF



            -
            -Encontre as fotografias de stock e imagens de notícias editoriais de Natasha ... Signed photos, posters & more for actors, musicians, singers, TV stars and other ... Comments Off on CANDYDOLL CANDYDOLLTV - NATASHA C - SET 19 - 75P. 1fdad05405
            -
            -
            -

            diff --git a/spaces/user238921933/stable-diffusion-webui/extensions/deforum/javascript/deforum.js b/spaces/user238921933/stable-diffusion-webui/extensions/deforum/javascript/deforum.js deleted file mode 100644 index 889e76f39c937d9eb3b602aa5510d8ac637f5e3c..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/extensions/deforum/javascript/deforum.js +++ /dev/null @@ -1,21 +0,0 @@ -function submit_deforum(){ - // alert('Hello, Deforum!') - rememberGallerySelection('deforum_gallery') - showSubmitButtons('deforum', false) - - var id = randomId() - requestProgress(id, gradioApp().getElementById('deforum_gallery_container'), gradioApp().getElementById('deforum_gallery'), function(){ - showSubmitButtons('deforum', true) - }) - - var res = create_submit_args(arguments) - - res[0] = id - // res[1] = get_tab_index('deforum') - - return res -} - -onUiUpdate(function(){ - check_gallery('deforum_gallery') -}) diff --git a/spaces/utensil/model-memory-usage/README.md b/spaces/utensil/model-memory-usage/README.md deleted file mode 100644 index c11d74e1bad6720036ae3b9661c834cbdd7f226d..0000000000000000000000000000000000000000 --- a/spaces/utensil/model-memory-usage/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Model Memory Utility -emoji: 🚀 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: hf-accelerate/model-memory-usage ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/overrides/partials/comments.html b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/overrides/partials/comments.html deleted file mode 100644 index ff1455b25eac59b84f260561ce177cf33dde8d33..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/overrides/partials/comments.html +++ /dev/null @@ -1,50 +0,0 @@ -{% if page.meta.comments %} -

            {{ lang.t("meta.comments") }}

            - - - - - - -{% endif %} diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/v8/pose/val.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/v8/pose/val.md deleted file mode 100644 index c26c255e3010b579ee0734431fdf22897b458bdb..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/v8/pose/val.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -description: Ensure proper human poses in images with YOLOv8 Pose Validation, part of the Ultralytics YOLO v8 suite. -keywords: PoseValidator, Ultralytics YOLO, object detection, pose analysis, validation ---- - -## PoseValidator ---- -### ::: ultralytics.yolo.v8.pose.val.PoseValidator -

            - -## val ---- -### ::: ultralytics.yolo.v8.pose.val.val -

            diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/train_custom_data.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/train_custom_data.md deleted file mode 100644 index ce11a95f6973306a743bff294545c55b696f2b3a..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/train_custom_data.md +++ /dev/null @@ -1,237 +0,0 @@ ---- -comments: true -description: Train your custom dataset with YOLOv5. Learn to collect, label and annotate images, and train and deploy models. Get started now. -keywords: YOLOv5, train custom dataset, object detection, artificial intelligence, deep learning, computer vision ---- - -📚 This guide explains how to train your own **custom dataset** with [YOLOv5](https://github.com/ultralytics/yolov5) 🚀. -UPDATED 7 June 2023. - -## Before You Start - -Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a [**Python>=3.7.0**](https://www.python.org/) environment, including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). - -```bash -git clone https://github.com/ultralytics/yolov5 # clone -cd yolov5 -pip install -r requirements.txt # install -``` - -## Train On Custom Data - - - -
            -
            - -Creating a custom model to detect your objects is an iterative process of collecting and organizing images, labeling your objects of interest, training a model, deploying it into the wild to make predictions, and then using that deployed model to collect examples of edge cases to repeat and improve. - -### 1. Create Dataset - -YOLOv5 models must be trained on labelled data in order to learn classes of objects in that data. There are two options for creating your dataset before you start training: - -
            -Use Roboflow to create your dataset in YOLO format - -!!! warning - - Roboflow users can use Ultralytics under the [AGPL license](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) or procure an [Enterprise license](https://ultralytics.com/license) directly from Ultralytics. Be aware that Roboflow does **not** provide Ultralytics licenses, and it is the responsibility of the user to ensure appropriate licensing. - -### 1.1 Collect Images - -Your model will learn by example. Training on images similar to the ones it will see in the wild is of the utmost importance. Ideally, you will collect a wide variety of images from the same configuration (camera, angle, lighting, etc.) as you will ultimately deploy your project. - -If this is not possible, you can start from [a public dataset](https://universe.roboflow.com/?ref=ultralytics) to train your initial model and then [sample images from the wild during inference](https://blog.roboflow.com/computer-vision-active-learning-tips/?ref=ultralytics) to improve your dataset and model iteratively. - -### 1.2 Create Labels - -Once you have collected images, you will need to annotate the objects of interest to create a ground truth for your model to learn from. - -

            - -[Roboflow Annotate](https://roboflow.com/annotate?ref=ultralytics) is a simple -web-based tool for managing and labeling your images with your team and exporting -them in [YOLOv5's annotation format](https://roboflow.com/formats/yolov5-pytorch-txt?ref=ultralytics). - -### 1.3 Prepare Dataset for YOLOv5 - -Whether you [label your images with Roboflow](https://roboflow.com/annotate?ref=ultralytics) or not, you can use it to convert your dataset into YOLO format, create a YOLOv5 YAML configuration file, and host it for importing into your training script. - -[Create a free Roboflow account](https://app.roboflow.com/?model=yolov5&ref=ultralytics) -and upload your dataset to a `Public` workspace, label any unannotated images, -then generate and export a version of your dataset in `YOLOv5 Pytorch` format. - -Note: YOLOv5 does online augmentation during training, so we do not recommend -applying any augmentation steps in Roboflow for training with YOLOv5. But we -recommend applying the following preprocessing steps: - -

            - -* **Auto-Orient** - to strip EXIF orientation from your images. -* **Resize (Stretch)** - to the square input size of your model (640x640 is the YOLOv5 default). - -Generating a version will give you a point in time snapshot of your dataset so -you can always go back and compare your future model training runs against it, -even if you add more images or change its configuration later. - -

            - -Export in `YOLOv5 Pytorch` format, then copy the snippet into your training -script or notebook to download your dataset. - -

            - -Now continue with `2. Select a Model`. -
            - -
            -Or manually prepare your dataset - -### 1.1 Create dataset.yaml - -[COCO128](https://www.kaggle.com/ultralytics/coco128) is an example small tutorial dataset composed of the first 128 images in [COCO](http://cocodataset.org/#home) train2017. These same 128 images are used for both training and validation to verify our training pipeline is capable of overfitting. [data/coco128.yaml](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), shown below, is the dataset config file that defines 1) the dataset root directory `path` and relative paths to `train` / `val` / `test` image directories (or *.txt files with image paths) and 2) a class `names` dictionary: - -```yaml -# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco128 # dataset root dir -train: images/train2017 # train images (relative to 'path') 128 images -val: images/train2017 # val images (relative to 'path') 128 images -test: # test images (optional) - -# Classes (80 COCO classes) -names: - 0: person - 1: bicycle - 2: car - ... - 77: teddy bear - 78: hair drier - 79: toothbrush -``` - -### 1.2 Create Labels - -After using an annotation tool to label your images, export your labels to **YOLO format**, with one `*.txt` file per image (if no objects in image, no `*.txt` file is required). The `*.txt` file specifications are: - -- One row per object -- Each row is `class x_center y_center width height` format. -- Box coordinates must be in **normalized xywh** format (from 0 - 1). If your boxes are in pixels, divide `x_center` and `width` by image width, and `y_center` and `height` by image height. -- Class numbers are zero-indexed (start from 0). - -

            - -The label file corresponding to the above image contains 2 persons (class `0`) and a tie (class `27`): - -

            - -### 1.3 Organize Directories - -Organize your train and val images and labels according to the example below. YOLOv5 assumes `/coco128` is inside a `/datasets` directory **next to** the `/yolov5` directory. **YOLOv5 locates labels automatically for each image** by replacing the last instance of `/images/` in each image path with `/labels/`. For example: - -```bash -../datasets/coco128/images/im0.jpg # image -../datasets/coco128/labels/im0.txt # label -``` - -

            -
            - -### 2. Select a Model - -Select a pretrained model to start training from. Here we select [YOLOv5s](https://github.com/ultralytics/yolov5/blob/master/models/yolov5s.yaml), the second-smallest and fastest model available. See our README [table](https://github.com/ultralytics/yolov5#pretrained-checkpoints) for a full comparison of all models. - -

            YOLOv5 Models

            - -### 3. Train - -Train a YOLOv5s model on COCO128 by specifying dataset, batch-size, image size and either pretrained `--weights yolov5s.pt` (recommended), or randomly initialized `--weights '' --cfg yolov5s.yaml` (not recommended). Pretrained weights are auto-downloaded from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). - -```bash -python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt -``` - -!!! tip "Tip" - - 💡 Add `--cache ram` or `--cache disk` to speed up training (requires significant RAM/disk resources). - -!!! tip "Tip" - - 💡 Always train from a local dataset. Mounted or network drives like Google Drive will be very slow. - -All training results are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc. For more details see the Training section of our tutorial notebook. Open In Colab Open In Kaggle - -### 4. Visualize - -#### Comet Logging and Visualization 🌟 NEW - -[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! - -Getting started is easy: - -```shell -pip install comet_ml # 1. install -export COMET_API_KEY= # 2. paste API key -python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train -``` - -To learn more about all the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook: -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing) - -yolo-ui - -#### ClearML Logging and Automation 🌟 NEW - -[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML: - -- `pip install clearml` -- run `clearml-init` to connect to a ClearML server (**deploy your own open-source server [here](https://github.com/allegroai/clearml-server)**, or use our free hosted server [here](https://cutt.ly/yolov5-notebook-clearml)) - -You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers). - -You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details! - - -ClearML Experiment Management UI - -#### Local Logging - -Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. - -This directory contains train and val statistics, mosaics, labels, predictions and augmented mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. - -Local logging results - -Results file `results.csv` is updated after each epoch, and then plotted as `results.png` (below) after training completes. You can also plot any `results.csv` file manually: - -```python -from utils.plots import plot_results - -plot_results('path/to/results.csv') # plot 'results.csv' as 'results.png' -``` - -

            results.png

            - -## Next Steps - -Once your model is trained you can use your best checkpoint `best.pt` to: - -* Run [CLI](https://github.com/ultralytics/yolov5#quick-start-examples) or [Python](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference on new images and videos -* [Validate](https://github.com/ultralytics/yolov5/blob/master/val.py) accuracy on train, val and test splits -* [Export](https://docs.ultralytics.com/yolov5/tutorials/model_export) to TensorFlow, Keras, ONNX, TFlite, TF.js, CoreML and TensorRT formats -* [Evolve](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution) hyperparameters to improve performance -* [Improve](https://docs.roboflow.com/adding-data/upload-api?ref=ultralytics) your model by sampling real-world images and adding them to your dataset - -## Environments - -YOLOv5 is designed to be run in the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - -- **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle -- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/) -- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/) -- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) Docker Pulls - -## Status - -YOLOv5 CI - -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file diff --git a/spaces/valhalla/glide-text2im/glide_text2im/clip/attention.py b/spaces/valhalla/glide-text2im/glide_text2im/clip/attention.py deleted file mode 100644 index 33775913e5cd604faea084190b1c218f34d908ac..0000000000000000000000000000000000000000 --- a/spaces/valhalla/glide-text2im/glide_text2im/clip/attention.py +++ /dev/null @@ -1,179 +0,0 @@ -import math -from abc import ABC, abstractmethod -from itertools import product -from typing import Any, Optional - -import attr -import numpy as np -import torch - - -@attr.s -class AttentionMask(ABC): - query_context_size: int = attr.ib(validator=lambda i, a, x: x >= 1) # type: ignore - key_context_size: int = attr.ib(validator=lambda i, a, x: x >= 1) # type: ignore - block_size: int = attr.ib(validator=lambda i, a, x: x >= 1) # type: ignore - n_head: int = attr.ib(validator=lambda i, a, x: x >= 1) # type: ignore - is_head_specific: bool = attr.ib(default=False) - n_query_pad: int = attr.ib(default=0) - n_key_pad: int = attr.ib(default=0) - - def __attrs_post_init__(self) -> None: - if self.query_context_size % self.block_size != 0: - raise ValueError() - if self.key_context_size % self.block_size != 0: - raise ValueError() - if self.n_query_pad >= self.query_context_size: - raise ValueError() - if self.n_key_pad >= self.key_context_size: - raise ValueError() - - self.n_query_block = self.query_context_size // self.block_size - self.n_key_block = self.key_context_size // self.block_size - self.first_pad_query_block_idx = self.n_query_block - int( - math.ceil(self.n_query_pad / self.block_size) - ) - self.first_pad_key_block_idx = self.n_key_block - int( - math.ceil(self.n_key_pad / self.block_size) - ) - - def _make_global_layout(self) -> None: - if not self.is_head_specific: - m = np.ones([self.n_query_block, self.n_key_block], dtype=np.bool) - r = product(*[range(n) for n in m.shape]) - - for qb, kb in r: - m[qb, kb] = np.any(self.block_layout(None, 0, qb, kb, 0)) - else: - m = np.ones([self.n_head, self.n_query_block, self.n_key_block], dtype=np.bool) - r = product(*[range(n) for n in m.shape]) - - for h, qb, kb in r: - m[h, qb, kb] = np.any(self.block_layout(None, h, qb, kb, 0)) - - self.global_layout = m - - @abstractmethod - def _block_layout( - self, blk_shape: Any, head_idx: int, query_idx: int, key_idx: int, blk_idx: int - ) -> np.ndarray: - raise NotImplementedError() - - def block_layout( - self, blk_shape: Any, head_idx: int, query_idx: int, key_idx: int, blk_idx: int - ) -> np.ndarray: - """ - `query_idx`, `key_idx` are block-level, zero-based indices. - """ - - m = np.ones([self.block_size, self.block_size], dtype=np.bool) - - if query_idx >= self.first_pad_query_block_idx: - n_pad = min( - self.block_size, - (query_idx + 1) * self.block_size - (self.query_context_size - self.n_query_pad), - ) - assert n_pad > 0 - m[self.block_size - n_pad :] = False - if key_idx >= self.first_pad_key_block_idx: - n_pad = min( - self.block_size, - (key_idx + 1) * self.block_size - (self.key_context_size - self.n_key_pad), - ) - assert n_pad > 0 - m[:, self.block_size - n_pad :] = False - - return m & self._block_layout(blk_shape, head_idx, query_idx, key_idx, blk_idx) - - -@attr.s -class DenseAttentionMask(AttentionMask): - def __attrs_post_init__(self) -> None: - super().__attrs_post_init__() - - self.global_layout = np.ones([self.n_query_block, self.n_key_block], dtype=np.bool) - n_zero_query_blocks = self.n_query_pad // self.block_size - n_zero_key_blocks = self.n_key_pad // self.block_size - self.global_layout[self.n_query_block - n_zero_query_blocks :] = False - self.global_layout[:, self.n_key_block - n_zero_key_blocks :] = False - - def _block_layout( - self, blk_shape: Any, head_idx: int, query_idx: int, key_idx: int, blk_idx: int - ) -> np.ndarray: - return np.ones([self.block_size, self.block_size], dtype=np.bool) - - -@attr.s -class DenseCausalAttentionMask(AttentionMask): - def __attrs_post_init__(self) -> None: - super().__attrs_post_init__() - - self.global_layout = np.tril(np.ones([self.n_query_block, self.n_key_block], dtype=np.bool)) - n_zero_query_blocks = self.n_query_pad // self.block_size - n_zero_key_blocks = self.n_key_pad // self.block_size - self.global_layout[self.n_query_block - n_zero_query_blocks :] = False - self.global_layout[:, self.n_key_block - n_zero_key_blocks :] = False - - def _block_layout( - self, blk_shape: Any, head_idx: int, query_idx: int, key_idx: int, blk_idx: int - ) -> np.ndarray: - if query_idx > key_idx: - return np.ones(2 * [self.block_size], dtype=np.bool) - elif query_idx < key_idx: - return np.zeros(2 * [self.block_size], dtype=np.bool) - else: - return np.tril(np.ones(2 * [self.block_size], dtype=np.bool)) - - -@attr.s(eq=False, repr=False) -class AttentionInfo: - n_heads: int = attr.ib() - ctx_blks_q: int = attr.ib() - ctx_blks_k: int = attr.ib() - block_size: int = attr.ib() - pytorch_attn_bias: Optional[torch.Tensor] = attr.ib() - - -def to_attention_info(d: AttentionMask) -> AttentionInfo: - return AttentionInfo( - n_heads=d.n_head, - ctx_blks_q=d.n_query_block, - ctx_blks_k=d.n_key_block, - block_size=d.block_size, - pytorch_attn_bias=None, - ) - - -def make_full_layout(d: AttentionMask) -> np.ndarray: - """ - Returns the `context_size x context_size` layout matrix described by `d`. If the layout is dependent on the index of - the attention head, a `attention_head x context_size x context_size` layout matrix is returned instead. - """ - - if not d.is_head_specific: - u = np.reshape(d.global_layout, [d.n_query_block, d.n_key_block, 1, 1]) - r = product(range(d.n_query_block), range(d.n_key_block)) - v = np.array([d.block_layout(None, 0, i, j, 0) for i, j in r]) - v = np.reshape(v, [d.n_query_block, d.n_key_block, d.block_size, d.block_size]) - - w = u * v - w = np.transpose(w, [0, 2, 1, 3]) - w = np.reshape(w, [d.query_context_size, d.key_context_size]) - return w - else: - if len(d.global_layout.shape) == 2: - u = np.reshape(d.global_layout, [1, d.n_query_block, d.n_key_block, 1, 1]) - u = np.tile(u, [d.n_head, 1, 1, 1, 1]) - elif len(d.global_layout.shape) == 3: - u = np.reshape(d.global_layout, [d.n_head, d.n_query_block, d.n_key_block, 1, 1]) - else: - raise RuntimeError() - - s = product(range(d.n_head), range(d.n_query_block), range(d.n_key_block)) - v = np.array([d.block_layout(None, i, j, k, 0) for i, j, k in s]) - v = np.reshape(v, [d.n_head, d.n_query_block, d.n_key_block, d.block_size, d.block_size]) - - w = u * v - w = np.transpose(w, [0, 1, 3, 2, 4]) - w = np.reshape(w, [d.n_head, d.query_context_size, d.key_context_size]) - return w diff --git a/spaces/viait/multi-fusion-sd-dalle/README.md b/spaces/viait/multi-fusion-sd-dalle/README.md deleted file mode 100644 index a6adf57f1bc816dbca595aebaed74486924f3e06..0000000000000000000000000000000000000000 --- a/spaces/viait/multi-fusion-sd-dalle/README.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: MultiFusion — Stable Diffusion & Dall•E-2 -metaTitle: DALL·E mini by craiyon.com on Hugging Face -emoji: 💧MF -colorFrom: yellow -colorTo: green -sdk: static -pinned: true ---- diff --git a/spaces/video-p2p-library/Video-P2P-Demo/app_upload.py b/spaces/video-p2p-library/Video-P2P-Demo/app_upload.py deleted file mode 100644 index f839c0c33c1ab8a43bc269ede0af920e61ef76cc..0000000000000000000000000000000000000000 --- a/spaces/video-p2p-library/Video-P2P-Demo/app_upload.py +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import pathlib - -import gradio as gr -import slugify - -from constants import MODEL_LIBRARY_ORG_NAME, UploadTarget -from uploader import Uploader -from utils import find_exp_dirs - - -class ModelUploader(Uploader): - def upload_model( - self, - folder_path: str, - repo_name: str, - upload_to: str, - private: bool, - delete_existing_repo: bool, - input_token: str | None = None, - ) -> str: - if not folder_path: - raise ValueError - if not repo_name: - repo_name = pathlib.Path(folder_path).name - repo_name = slugify.slugify(repo_name) - - if upload_to == UploadTarget.PERSONAL_PROFILE.value: - organization = '' - elif upload_to == UploadTarget.MODEL_LIBRARY.value: - organization = MODEL_LIBRARY_ORG_NAME - else: - raise ValueError - - return self.upload(folder_path, - repo_name, - organization=organization, - private=private, - delete_existing_repo=delete_existing_repo, - input_token=input_token) - - -def load_local_model_list() -> dict: - choices = find_exp_dirs() - return gr.update(choices=choices, value=choices[0] if choices else None) - - -def create_upload_demo(hf_token: str | None) -> gr.Blocks: - uploader = ModelUploader(hf_token) - model_dirs = find_exp_dirs() - - with gr.Blocks() as demo: - with gr.Box(): - gr.Markdown('Local Models') - reload_button = gr.Button('Reload Model List') - model_dir = gr.Dropdown( - label='Model names', - choices=model_dirs, - value=model_dirs[0] if model_dirs else None) - with gr.Box(): - gr.Markdown('Upload Settings') - with gr.Row(): - use_private_repo = gr.Checkbox(label='Private', value=True) - delete_existing_repo = gr.Checkbox( - label='Delete existing repo of the same name', value=False) - upload_to = gr.Radio(label='Upload to', - choices=[_.value for _ in UploadTarget], - value=UploadTarget.MODEL_LIBRARY.value) - model_name = gr.Textbox(label='Model Name') - input_token = gr.Text(label='Hugging Face Write Token', - placeholder='', - visible=False if hf_token else True) - upload_button = gr.Button('Upload') - gr.Markdown(f''' - - You can upload your trained model to your personal profile (i.e. https://huggingface.co/{{your_username}}/{{model_name}}) or to the public [Video-P2P Library](https://huggingface.co/{MODEL_LIBRARY_ORG_NAME}) (i.e. https://huggingface.co/{MODEL_LIBRARY_ORG_NAME}/{{model_name}}). - ''') - with gr.Box(): - gr.Markdown('Output message') - output_message = gr.Markdown() - - reload_button.click(fn=load_local_model_list, - inputs=None, - outputs=model_dir) - upload_button.click(fn=uploader.upload_model, - inputs=[ - model_dir, - model_name, - upload_to, - use_private_repo, - delete_existing_repo, - input_token, - ], - outputs=output_message) - - return demo - - -if __name__ == '__main__': - import os - - hf_token = os.getenv('HF_TOKEN') - demo = create_upload_demo(hf_token) - demo.queue(max_size=1).launch(share=False) diff --git a/spaces/vumichien/Whisper_speaker_diarization/app.py b/spaces/vumichien/Whisper_speaker_diarization/app.py deleted file mode 100644 index ef334ec7fa10419f6f382a944f305f8f732f3cf8..0000000000000000000000000000000000000000 --- a/spaces/vumichien/Whisper_speaker_diarization/app.py +++ /dev/null @@ -1,473 +0,0 @@ -# import whisper -from faster_whisper import WhisperModel -import datetime -import subprocess -import gradio as gr -from pathlib import Path -import pandas as pd -import re -import time -import os -import numpy as np -from sklearn.cluster import AgglomerativeClustering -from sklearn.metrics import silhouette_score - -from pytube import YouTube -import yt_dlp -import torch -import pyannote.audio -from pyannote.audio.pipelines.speaker_verification import PretrainedSpeakerEmbedding -from pyannote.audio import Audio -from pyannote.core import Segment - -from gpuinfo import GPUInfo - -import wave -import contextlib -from transformers import pipeline -import psutil - -whisper_models = ["tiny", "base", "small", "medium", "large-v1", "large-v2"] -source_languages = { - "en": "English", - "zh": "Chinese", - "de": "German", - "es": "Spanish", - "ru": "Russian", - "ko": "Korean", - "fr": "French", - "ja": "Japanese", - "pt": "Portuguese", - "tr": "Turkish", - "pl": "Polish", - "ca": "Catalan", - "nl": "Dutch", - "ar": "Arabic", - "sv": "Swedish", - "it": "Italian", - "id": "Indonesian", - "hi": "Hindi", - "fi": "Finnish", - "vi": "Vietnamese", - "he": "Hebrew", - "uk": "Ukrainian", - "el": "Greek", - "ms": "Malay", - "cs": "Czech", - "ro": "Romanian", - "da": "Danish", - "hu": "Hungarian", - "ta": "Tamil", - "no": "Norwegian", - "th": "Thai", - "ur": "Urdu", - "hr": "Croatian", - "bg": "Bulgarian", - "lt": "Lithuanian", - "la": "Latin", - "mi": "Maori", - "ml": "Malayalam", - "cy": "Welsh", - "sk": "Slovak", - "te": "Telugu", - "fa": "Persian", - "lv": "Latvian", - "bn": "Bengali", - "sr": "Serbian", - "az": "Azerbaijani", - "sl": "Slovenian", - "kn": "Kannada", - "et": "Estonian", - "mk": "Macedonian", - "br": "Breton", - "eu": "Basque", - "is": "Icelandic", - "hy": "Armenian", - "ne": "Nepali", - "mn": "Mongolian", - "bs": "Bosnian", - "kk": "Kazakh", - "sq": "Albanian", - "sw": "Swahili", - "gl": "Galician", - "mr": "Marathi", - "pa": "Punjabi", - "si": "Sinhala", - "km": "Khmer", - "sn": "Shona", - "yo": "Yoruba", - "so": "Somali", - "af": "Afrikaans", - "oc": "Occitan", - "ka": "Georgian", - "be": "Belarusian", - "tg": "Tajik", - "sd": "Sindhi", - "gu": "Gujarati", - "am": "Amharic", - "yi": "Yiddish", - "lo": "Lao", - "uz": "Uzbek", - "fo": "Faroese", - "ht": "Haitian creole", - "ps": "Pashto", - "tk": "Turkmen", - "nn": "Nynorsk", - "mt": "Maltese", - "sa": "Sanskrit", - "lb": "Luxembourgish", - "my": "Myanmar", - "bo": "Tibetan", - "tl": "Tagalog", - "mg": "Malagasy", - "as": "Assamese", - "tt": "Tatar", - "haw": "Hawaiian", - "ln": "Lingala", - "ha": "Hausa", - "ba": "Bashkir", - "jw": "Javanese", - "su": "Sundanese", -} - -source_language_list = [key[0] for key in source_languages.items()] - -MODEL_NAME = "vumichien/whisper-medium-jp" -lang = "ja" - -device = 0 if torch.cuda.is_available() else "cpu" -pipe = pipeline( - task="automatic-speech-recognition", - model=MODEL_NAME, - chunk_length_s=30, - device=device, -) -os.makedirs('output', exist_ok=True) -pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe") - -embedding_model = PretrainedSpeakerEmbedding( - "speechbrain/spkrec-ecapa-voxceleb", - device=torch.device("cuda" if torch.cuda.is_available() else "cpu")) - -def transcribe(microphone, file_upload): - warn_output = "" - if (microphone is not None) and (file_upload is not None): - warn_output = ( - "WARNING: You've uploaded an audio file and used the microphone. " - "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" - ) - - elif (microphone is None) and (file_upload is None): - return "ERROR: You have to either use the microphone or upload an audio file" - - file = microphone if microphone is not None else file_upload - - text = pipe(file)["text"] - - return warn_output + text - -def _return_yt_html_embed(yt_url): - video_id = yt_url.split("?v=")[-1] - HTML_str = ( - f'
            ' - "
            " - ) - return HTML_str - -def yt_transcribe(yt_url): - # yt = YouTube(yt_url) - # html_embed_str = _return_yt_html_embed(yt_url) - # stream = yt.streams.filter(only_audio=True)[0] - # stream.download(filename="audio.mp3") - - ydl_opts = { - 'format': 'bestvideo*+bestaudio/best', - 'postprocessors': [{ - 'key': 'FFmpegExtractAudio', - 'preferredcodec': 'mp3', - 'preferredquality': '192', - }], - 'outtmpl':'audio.%(ext)s', - } - - with yt_dlp.YoutubeDL(ydl_opts) as ydl: - ydl.download([yt_url]) - - text = pipe("audio.mp3")["text"] - return html_embed_str, text - -def convert_time(secs): - return datetime.timedelta(seconds=round(secs)) - -def get_youtube(video_url): - # yt = YouTube(video_url) - # abs_video_path = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first().download() - - ydl_opts = { - 'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best', - } - - with yt_dlp.YoutubeDL(ydl_opts) as ydl: - info = ydl.extract_info(video_url, download=False) - abs_video_path = ydl.prepare_filename(info) - ydl.process_info(info) - - print("Success download video") - print(abs_video_path) - return abs_video_path - -def speech_to_text(video_file_path, selected_source_lang, whisper_model, num_speakers): - """ - # Transcribe youtube link using OpenAI Whisper - 1. Using Open AI's Whisper model to seperate audio into segments and generate transcripts. - 2. Generating speaker embeddings for each segments. - 3. Applying agglomerative clustering on the embeddings to identify the speaker for each segment. - - Speech Recognition is based on models from OpenAI Whisper https://github.com/openai/whisper - Speaker diarization model and pipeline from by https://github.com/pyannote/pyannote-audio - """ - - # model = whisper.load_model(whisper_model) - # model = WhisperModel(whisper_model, device="cuda", compute_type="int8_float16") - model = WhisperModel(whisper_model, compute_type="int8") - time_start = time.time() - if(video_file_path == None): - raise ValueError("Error no video input") - print(video_file_path) - - try: - # Read and convert youtube video - _,file_ending = os.path.splitext(f'{video_file_path}') - print(f'file enging is {file_ending}') - audio_file = video_file_path.replace(file_ending, ".wav") - print("starting conversion to wav") - os.system(f'ffmpeg -i "{video_file_path}" -ar 16000 -ac 1 -c:a pcm_s16le "{audio_file}"') - - # Get duration - with contextlib.closing(wave.open(audio_file,'r')) as f: - frames = f.getnframes() - rate = f.getframerate() - duration = frames / float(rate) - print(f"conversion to wav ready, duration of audio file: {duration}") - - # Transcribe audio - options = dict(language=selected_source_lang, beam_size=5, best_of=5) - transcribe_options = dict(task="transcribe", **options) - segments_raw, info = model.transcribe(audio_file, **transcribe_options) - - # Convert back to original openai format - segments = [] - i = 0 - for segment_chunk in segments_raw: - chunk = {} - chunk["start"] = segment_chunk.start - chunk["end"] = segment_chunk.end - chunk["text"] = segment_chunk.text - segments.append(chunk) - i += 1 - print("transcribe audio done with fast whisper") - except Exception as e: - raise RuntimeError("Error converting video to audio") - - try: - # Create embedding - def segment_embedding(segment): - audio = Audio() - start = segment["start"] - # Whisper overshoots the end timestamp in the last segment - end = min(duration, segment["end"]) - clip = Segment(start, end) - waveform, sample_rate = audio.crop(audio_file, clip) - return embedding_model(waveform[None]) - - embeddings = np.zeros(shape=(len(segments), 192)) - for i, segment in enumerate(segments): - embeddings[i] = segment_embedding(segment) - embeddings = np.nan_to_num(embeddings) - print(f'Embedding shape: {embeddings.shape}') - - if num_speakers == 0: - # Find the best number of speakers - score_num_speakers = {} - - for num_speakers in range(2, 10+1): - clustering = AgglomerativeClustering(num_speakers).fit(embeddings) - score = silhouette_score(embeddings, clustering.labels_, metric='euclidean') - score_num_speakers[num_speakers] = score - best_num_speaker = max(score_num_speakers, key=lambda x:score_num_speakers[x]) - print(f"The best number of speakers: {best_num_speaker} with {score_num_speakers[best_num_speaker]} score") - else: - best_num_speaker = num_speakers - - # Assign speaker label - clustering = AgglomerativeClustering(best_num_speaker).fit(embeddings) - labels = clustering.labels_ - for i in range(len(segments)): - segments[i]["speaker"] = 'SPEAKER ' + str(labels[i] + 1) - - # Make output - objects = { - 'Start' : [], - 'End': [], - 'Speaker': [], - 'Text': [] - } - text = '' - for (i, segment) in enumerate(segments): - if i == 0 or segments[i - 1]["speaker"] != segment["speaker"]: - objects['Start'].append(str(convert_time(segment["start"]))) - objects['Speaker'].append(segment["speaker"]) - if i != 0: - objects['End'].append(str(convert_time(segments[i - 1]["end"]))) - objects['Text'].append(text) - text = '' - text += segment["text"] + ' ' - objects['End'].append(str(convert_time(segments[i - 1]["end"]))) - objects['Text'].append(text) - - time_end = time.time() - time_diff = time_end - time_start - memory = psutil.virtual_memory() - gpu_utilization, gpu_memory = GPUInfo.gpu_usage() - gpu_utilization = gpu_utilization[0] if len(gpu_utilization) > 0 else 0 - gpu_memory = gpu_memory[0] if len(gpu_memory) > 0 else 0 - system_info = f""" - *Memory: {memory.total / (1024 * 1024 * 1024):.2f}GB, used: {memory.percent}%, available: {memory.available / (1024 * 1024 * 1024):.2f}GB.* - *Processing time: {time_diff:.5} seconds.* - *GPU Utilization: {gpu_utilization}%, GPU Memory: {gpu_memory}MiB.* - """ - save_path = "output/transcript_result.csv" - df_results = pd.DataFrame(objects) - df_results.to_csv(save_path) - return df_results, system_info, save_path - - except Exception as e: - raise RuntimeError("Error Running inference with local model", e) - - -# ---- Gradio Layout ----- -# Inspiration from https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles -video_in = gr.Video(label="Video file", mirror_webcam=False) -youtube_url_in = gr.Textbox(label="Youtube url", lines=1, interactive=True) -df_init = pd.DataFrame(columns=['Start', 'End', 'Speaker', 'Text']) -memory = psutil.virtual_memory() -selected_source_lang = gr.Dropdown(choices=source_language_list, type="value", value="en", label="Spoken language in video", interactive=True) -selected_whisper_model = gr.Dropdown(choices=whisper_models, type="value", value="base", label="Selected Whisper model", interactive=True) -number_speakers = gr.Number(precision=0, value=0, label="Input number of speakers for better results. If value=0, model will automatic find the best number of speakers", interactive=True) -system_info = gr.Markdown(f"*Memory: {memory.total / (1024 * 1024 * 1024):.2f}GB, used: {memory.percent}%, available: {memory.available / (1024 * 1024 * 1024):.2f}GB*") -download_transcript = gr.File(label="Download transcript") -transcription_df = gr.DataFrame(value=df_init,label="Transcription dataframe", row_count=(0, "dynamic"), max_rows = 10, wrap=True, overflow_row_behaviour='paginate') -title = "Whisper speaker diarization" -demo = gr.Blocks(title=title) -demo.encrypt = False - - -with demo: - with gr.Tab("Whisper speaker diarization"): - gr.Markdown(''' -
            -

            Whisper speaker diarization

            - This space uses Whisper models from OpenAI with CTranslate2 which is a fast inference engine for Transformer models to recognize the speech (4 times faster than original openai model with same accuracy) - and ECAPA-TDNN model from SpeechBrain to encode and clasify speakers -
            - ''') - - with gr.Row(): - gr.Markdown(''' - ### Transcribe youtube link using OpenAI Whisper - ##### 1. Using Open AI's Whisper model to seperate audio into segments and generate transcripts. - ##### 2. Generating speaker embeddings for each segments. - ##### 3. Applying agglomerative clustering on the embeddings to identify the speaker for each segment. - ''') - - with gr.Row(): - gr.Markdown(''' - ### You can test by following examples: - ''') - examples = gr.Examples(examples= - [ "https://www.youtube.com/watch?v=j7BfEzAFuYc&t=32s", - "https://www.youtube.com/watch?v=-UX0X45sYe4", - "https://www.youtube.com/watch?v=7minSgqi-Gw"], - label="Examples", inputs=[youtube_url_in]) - - - with gr.Row(): - with gr.Column(): - youtube_url_in.render() - download_youtube_btn = gr.Button("Download Youtube video") - download_youtube_btn.click(get_youtube, [youtube_url_in], [ - video_in]) - print(video_in) - - - with gr.Row(): - with gr.Column(): - video_in.render() - with gr.Column(): - gr.Markdown(''' - ##### Here you can start the transcription process. - ##### Please select the source language for transcription. - ##### You can select a range of assumed numbers of speakers. - ''') - selected_source_lang.render() - selected_whisper_model.render() - number_speakers.render() - transcribe_btn = gr.Button("Transcribe audio and diarization") - transcribe_btn.click(speech_to_text, - [video_in, selected_source_lang, selected_whisper_model, number_speakers], - [transcription_df, system_info, download_transcript] - ) - - with gr.Row(): - gr.Markdown(''' - ##### Here you will get transcription output - ##### ''') - - - with gr.Row(): - with gr.Column(): - download_transcript.render() - transcription_df.render() - system_info.render() - gr.Markdown('''
            visitor badgeLicense: Apache 2.0
            ''') - - - - with gr.Tab("Whisper Transcribe Japanese Audio"): - gr.Markdown(f''' -
            -

            Whisper Transcribe Japanese Audio

            -
            - Transcribe long-form microphone or audio inputs with the click of a button! The fine-tuned - checkpoint
            {MODEL_NAME} to transcribe audio files of arbitrary length. - ''') - microphone = gr.inputs.Audio(source="microphone", type="filepath", optional=True) - upload = gr.inputs.Audio(source="upload", type="filepath", optional=True) - transcribe_btn = gr.Button("Transcribe Audio") - text_output = gr.Textbox() - with gr.Row(): - gr.Markdown(''' - ### You can test by following examples: - ''') - examples = gr.Examples(examples= - [ "sample1.wav", - "sample2.wav", - ], - label="Examples", inputs=[upload]) - transcribe_btn.click(transcribe, [microphone, upload], outputs=text_output) - - with gr.Tab("Whisper Transcribe Japanese YouTube"): - gr.Markdown(f''' -
            -

            Whisper Transcribe Japanese YouTube

            -
            - Transcribe long-form YouTube videos with the click of a button! The fine-tuned checkpoint: - {MODEL_NAME} to transcribe audio files of arbitrary length. - ''') - youtube_link = gr.Textbox(label="Youtube url", lines=1, interactive=True) - yt_transcribe_btn = gr.Button("Transcribe YouTube") - text_output2 = gr.Textbox() - html_output = gr.Markdown() - yt_transcribe_btn.click(yt_transcribe, [youtube_link], outputs=[html_output, text_output2]) - -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/datasets/dataset_wrappers.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/datasets/dataset_wrappers.py deleted file mode 100644 index d6a5e957ec3b44465432617cf6e8f0b86a8a5efa..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/datasets/dataset_wrappers.py +++ /dev/null @@ -1,50 +0,0 @@ -from torch.utils.data.dataset import ConcatDataset as _ConcatDataset - -from .builder import DATASETS - - -@DATASETS.register_module() -class ConcatDataset(_ConcatDataset): - """A wrapper of concatenated dataset. - - Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but - concat the group flag for image aspect ratio. - - Args: - datasets (list[:obj:`Dataset`]): A list of datasets. - """ - - def __init__(self, datasets): - super(ConcatDataset, self).__init__(datasets) - self.CLASSES = datasets[0].CLASSES - self.PALETTE = datasets[0].PALETTE - - -@DATASETS.register_module() -class RepeatDataset(object): - """A wrapper of repeated dataset. - - The length of repeated dataset will be `times` larger than the original - dataset. This is useful when the data loading time is long but the dataset - is small. Using RepeatDataset can reduce the data loading time between - epochs. - - Args: - dataset (:obj:`Dataset`): The dataset to be repeated. - times (int): Repeat times. - """ - - def __init__(self, dataset, times): - self.dataset = dataset - self.times = times - self.CLASSES = dataset.CLASSES - self.PALETTE = dataset.PALETTE - self._ori_len = len(self.dataset) - - def __getitem__(self, idx): - """Get item from original dataset.""" - return self.dataset[idx % self._ori_len] - - def __len__(self): - """The length is multiplied by ``times``""" - return self.times * self._ori_len diff --git a/spaces/wilson1/bingo/src/components/ui/alert-dialog.tsx b/spaces/wilson1/bingo/src/components/ui/alert-dialog.tsx deleted file mode 100644 index 17fec4d16510328deacc1416569173c97761ef72..0000000000000000000000000000000000000000 --- a/spaces/wilson1/bingo/src/components/ui/alert-dialog.tsx +++ /dev/null @@ -1,150 +0,0 @@ -'use client' - -import * as React from 'react' -import * as AlertDialogPrimitive from '@radix-ui/react-alert-dialog' - -import { cn } from '@/lib/utils' -import { buttonVariants } from '@/components/ui/button' - -const AlertDialog = AlertDialogPrimitive.Root - -const AlertDialogTrigger = AlertDialogPrimitive.Trigger - -const AlertDialogPortal = ({ - className, - children, - ...props -}: AlertDialogPrimitive.AlertDialogPortalProps) => ( - -
            - {children} -
            -
            -) -AlertDialogPortal.displayName = AlertDialogPrimitive.Portal.displayName - -const AlertDialogOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - -)) -AlertDialogOverlay.displayName = AlertDialogPrimitive.Overlay.displayName - -const AlertDialogContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - - - - -)) -AlertDialogContent.displayName = AlertDialogPrimitive.Content.displayName - -const AlertDialogHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
            -) -AlertDialogHeader.displayName = 'AlertDialogHeader' - -const AlertDialogFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
            -) -AlertDialogFooter.displayName = 'AlertDialogFooter' - -const AlertDialogTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogTitle.displayName = AlertDialogPrimitive.Title.displayName - -const AlertDialogDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogDescription.displayName = - AlertDialogPrimitive.Description.displayName - -const AlertDialogAction = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogAction.displayName = AlertDialogPrimitive.Action.displayName - -const AlertDialogCancel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogCancel.displayName = AlertDialogPrimitive.Cancel.displayName - -export { - AlertDialog, - AlertDialogTrigger, - AlertDialogContent, - AlertDialogHeader, - AlertDialogFooter, - AlertDialogTitle, - AlertDialogDescription, - AlertDialogAction, - AlertDialogCancel -} diff --git a/spaces/xSaXx/llama2-70b-nochat/README.md b/spaces/xSaXx/llama2-70b-nochat/README.md deleted file mode 100644 index 4519d2afc6eeaacb9c2936c7050ee560ab8ef822..0000000000000000000000000000000000000000 --- a/spaces/xSaXx/llama2-70b-nochat/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Llama2 70b Nochat -emoji: 🏢 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false -license: llama2 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/models/inceptionv4.py b/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/models/inceptionv4.py deleted file mode 100644 index b14916f140712298866c943ebdb4ebad67d72fc4..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/models/inceptionv4.py +++ /dev/null @@ -1,381 +0,0 @@ -from __future__ import division, absolute_import -import torch -import torch.nn as nn -import torch.utils.model_zoo as model_zoo - -__all__ = ['inceptionv4'] -""" -Code imported from https://github.com/Cadene/pretrained-models.pytorch -""" - -pretrained_settings = { - 'inceptionv4': { - 'imagenet': { - 'url': - 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth', - 'input_space': 'RGB', - 'input_size': [3, 299, 299], - 'input_range': [0, 1], - 'mean': [0.5, 0.5, 0.5], - 'std': [0.5, 0.5, 0.5], - 'num_classes': 1000 - }, - 'imagenet+background': { - 'url': - 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth', - 'input_space': 'RGB', - 'input_size': [3, 299, 299], - 'input_range': [0, 1], - 'mean': [0.5, 0.5, 0.5], - 'std': [0.5, 0.5, 0.5], - 'num_classes': 1001 - } - } -} - - -class BasicConv2d(nn.Module): - - def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): - super(BasicConv2d, self).__init__() - self.conv = nn.Conv2d( - in_planes, - out_planes, - kernel_size=kernel_size, - stride=stride, - padding=padding, - bias=False - ) # verify bias false - self.bn = nn.BatchNorm2d( - out_planes, - eps=0.001, # value found in tensorflow - momentum=0.1, # default pytorch value - affine=True - ) - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - x = self.relu(x) - return x - - -class Mixed_3a(nn.Module): - - def __init__(self): - super(Mixed_3a, self).__init__() - self.maxpool = nn.MaxPool2d(3, stride=2) - self.conv = BasicConv2d(64, 96, kernel_size=3, stride=2) - - def forward(self, x): - x0 = self.maxpool(x) - x1 = self.conv(x) - out = torch.cat((x0, x1), 1) - return out - - -class Mixed_4a(nn.Module): - - def __init__(self): - super(Mixed_4a, self).__init__() - - self.branch0 = nn.Sequential( - BasicConv2d(160, 64, kernel_size=1, stride=1), - BasicConv2d(64, 96, kernel_size=3, stride=1) - ) - - self.branch1 = nn.Sequential( - BasicConv2d(160, 64, kernel_size=1, stride=1), - BasicConv2d(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)), - BasicConv2d(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)), - BasicConv2d(64, 96, kernel_size=(3, 3), stride=1) - ) - - def forward(self, x): - x0 = self.branch0(x) - x1 = self.branch1(x) - out = torch.cat((x0, x1), 1) - return out - - -class Mixed_5a(nn.Module): - - def __init__(self): - super(Mixed_5a, self).__init__() - self.conv = BasicConv2d(192, 192, kernel_size=3, stride=2) - self.maxpool = nn.MaxPool2d(3, stride=2) - - def forward(self, x): - x0 = self.conv(x) - x1 = self.maxpool(x) - out = torch.cat((x0, x1), 1) - return out - - -class Inception_A(nn.Module): - - def __init__(self): - super(Inception_A, self).__init__() - self.branch0 = BasicConv2d(384, 96, kernel_size=1, stride=1) - - self.branch1 = nn.Sequential( - BasicConv2d(384, 64, kernel_size=1, stride=1), - BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1) - ) - - self.branch2 = nn.Sequential( - BasicConv2d(384, 64, kernel_size=1, stride=1), - BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1), - BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1) - ) - - self.branch3 = nn.Sequential( - nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), - BasicConv2d(384, 96, kernel_size=1, stride=1) - ) - - def forward(self, x): - x0 = self.branch0(x) - x1 = self.branch1(x) - x2 = self.branch2(x) - x3 = self.branch3(x) - out = torch.cat((x0, x1, x2, x3), 1) - return out - - -class Reduction_A(nn.Module): - - def __init__(self): - super(Reduction_A, self).__init__() - self.branch0 = BasicConv2d(384, 384, kernel_size=3, stride=2) - - self.branch1 = nn.Sequential( - BasicConv2d(384, 192, kernel_size=1, stride=1), - BasicConv2d(192, 224, kernel_size=3, stride=1, padding=1), - BasicConv2d(224, 256, kernel_size=3, stride=2) - ) - - self.branch2 = nn.MaxPool2d(3, stride=2) - - def forward(self, x): - x0 = self.branch0(x) - x1 = self.branch1(x) - x2 = self.branch2(x) - out = torch.cat((x0, x1, x2), 1) - return out - - -class Inception_B(nn.Module): - - def __init__(self): - super(Inception_B, self).__init__() - self.branch0 = BasicConv2d(1024, 384, kernel_size=1, stride=1) - - self.branch1 = nn.Sequential( - BasicConv2d(1024, 192, kernel_size=1, stride=1), - BasicConv2d( - 192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3) - ), - BasicConv2d( - 224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0) - ) - ) - - self.branch2 = nn.Sequential( - BasicConv2d(1024, 192, kernel_size=1, stride=1), - BasicConv2d( - 192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0) - ), - BasicConv2d( - 192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3) - ), - BasicConv2d( - 224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0) - ), - BasicConv2d( - 224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3) - ) - ) - - self.branch3 = nn.Sequential( - nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), - BasicConv2d(1024, 128, kernel_size=1, stride=1) - ) - - def forward(self, x): - x0 = self.branch0(x) - x1 = self.branch1(x) - x2 = self.branch2(x) - x3 = self.branch3(x) - out = torch.cat((x0, x1, x2, x3), 1) - return out - - -class Reduction_B(nn.Module): - - def __init__(self): - super(Reduction_B, self).__init__() - - self.branch0 = nn.Sequential( - BasicConv2d(1024, 192, kernel_size=1, stride=1), - BasicConv2d(192, 192, kernel_size=3, stride=2) - ) - - self.branch1 = nn.Sequential( - BasicConv2d(1024, 256, kernel_size=1, stride=1), - BasicConv2d( - 256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3) - ), - BasicConv2d( - 256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0) - ), BasicConv2d(320, 320, kernel_size=3, stride=2) - ) - - self.branch2 = nn.MaxPool2d(3, stride=2) - - def forward(self, x): - x0 = self.branch0(x) - x1 = self.branch1(x) - x2 = self.branch2(x) - out = torch.cat((x0, x1, x2), 1) - return out - - -class Inception_C(nn.Module): - - def __init__(self): - super(Inception_C, self).__init__() - - self.branch0 = BasicConv2d(1536, 256, kernel_size=1, stride=1) - - self.branch1_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) - self.branch1_1a = BasicConv2d( - 384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1) - ) - self.branch1_1b = BasicConv2d( - 384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0) - ) - - self.branch2_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) - self.branch2_1 = BasicConv2d( - 384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0) - ) - self.branch2_2 = BasicConv2d( - 448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1) - ) - self.branch2_3a = BasicConv2d( - 512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1) - ) - self.branch2_3b = BasicConv2d( - 512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0) - ) - - self.branch3 = nn.Sequential( - nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), - BasicConv2d(1536, 256, kernel_size=1, stride=1) - ) - - def forward(self, x): - x0 = self.branch0(x) - - x1_0 = self.branch1_0(x) - x1_1a = self.branch1_1a(x1_0) - x1_1b = self.branch1_1b(x1_0) - x1 = torch.cat((x1_1a, x1_1b), 1) - - x2_0 = self.branch2_0(x) - x2_1 = self.branch2_1(x2_0) - x2_2 = self.branch2_2(x2_1) - x2_3a = self.branch2_3a(x2_2) - x2_3b = self.branch2_3b(x2_2) - x2 = torch.cat((x2_3a, x2_3b), 1) - - x3 = self.branch3(x) - - out = torch.cat((x0, x1, x2, x3), 1) - return out - - -class InceptionV4(nn.Module): - """Inception-v4. - - Reference: - Szegedy et al. Inception-v4, Inception-ResNet and the Impact of Residual - Connections on Learning. AAAI 2017. - - Public keys: - - ``inceptionv4``: InceptionV4. - """ - - def __init__(self, num_classes, loss, **kwargs): - super(InceptionV4, self).__init__() - self.loss = loss - - self.features = nn.Sequential( - BasicConv2d(3, 32, kernel_size=3, stride=2), - BasicConv2d(32, 32, kernel_size=3, stride=1), - BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1), - Mixed_3a(), - Mixed_4a(), - Mixed_5a(), - Inception_A(), - Inception_A(), - Inception_A(), - Inception_A(), - Reduction_A(), # Mixed_6a - Inception_B(), - Inception_B(), - Inception_B(), - Inception_B(), - Inception_B(), - Inception_B(), - Inception_B(), - Reduction_B(), # Mixed_7a - Inception_C(), - Inception_C(), - Inception_C() - ) - self.global_avgpool = nn.AdaptiveAvgPool2d(1) - self.classifier = nn.Linear(1536, num_classes) - - def forward(self, x): - f = self.features(x) - v = self.global_avgpool(f) - v = v.view(v.size(0), -1) - - if not self.training: - return v - - y = self.classifier(v) - - if self.loss == 'softmax': - return y - elif self.loss == 'triplet': - return y, v - else: - raise KeyError('Unsupported loss: {}'.format(self.loss)) - - -def init_pretrained_weights(model, model_url): - """Initializes model with pretrained weights. - - Layers that don't match with pretrained layers in name or size are kept unchanged. - """ - pretrain_dict = model_zoo.load_url(model_url) - model_dict = model.state_dict() - pretrain_dict = { - k: v - for k, v in pretrain_dict.items() - if k in model_dict and model_dict[k].size() == v.size() - } - model_dict.update(pretrain_dict) - model.load_state_dict(model_dict) - - -def inceptionv4(num_classes, loss='softmax', pretrained=True, **kwargs): - model = InceptionV4(num_classes, loss, **kwargs) - if pretrained: - model_url = pretrained_settings['inceptionv4']['imagenet']['url'] - init_pretrained_weights(model, model_url) - return model diff --git a/spaces/xiang-wuu/yolov5/utils/loggers/wandb/README.md b/spaces/xiang-wuu/yolov5/utils/loggers/wandb/README.md deleted file mode 100644 index d78324b4c8e9405f388091310227d51d1ead5712..0000000000000000000000000000000000000000 --- a/spaces/xiang-wuu/yolov5/utils/loggers/wandb/README.md +++ /dev/null @@ -1,162 +0,0 @@ -📚 This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 🚀. UPDATED 29 September 2021. - -- [About Weights & Biases](#about-weights-&-biases) -- [First-Time Setup](#first-time-setup) -- [Viewing runs](#viewing-runs) -- [Disabling wandb](#disabling-wandb) -- [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) -- [Reports: Share your work with the world!](#reports) - -## About Weights & Biases - -Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models — architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions. - -Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows: - -- [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time -- [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4) visualized automatically -- [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization -- [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators -- [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently -- [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models - -## First-Time Setup - -
            - Toggle Details -When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device. - -W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as: - -```shell -$ python train.py --project ... --name ... -``` - -YOLOv5 notebook example: Open In Colab Open In Kaggle -Screen Shot 2021-09-29 at 10 23 13 PM - -
            - -## Viewing Runs - -
            - Toggle Details -Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: - -- Training & Validation losses -- Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 -- Learning Rate over time -- A bounding box debugging panel, showing the training progress over time -- GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** -- System: Disk I/0, CPU utilization, RAM memory usage -- Your trained model as W&B Artifact -- Environment: OS and Python types, Git repository and state, **training command** - -

            Weights & Biases dashboard

            -
            - -## Disabling wandb - -- training after running `wandb disabled` inside that directory creates no wandb run - ![Screenshot (84)](https://user-images.githubusercontent.com/15766192/143441777-c780bdd7-7cb4-4404-9559-b4316030a985.png) - -- To enable wandb again, run `wandb online` - ![Screenshot (85)](https://user-images.githubusercontent.com/15766192/143441866-7191b2cb-22f0-4e0f-ae64-2dc47dc13078.png) - -## Advanced Usage - -You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started. - -
            -

            1: Train and Log Evaluation simultaneousy

            - This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table - Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, - so no images will be uploaded from your system more than once. -
            - Usage - Code $ python train.py --upload_data val - -![Screenshot from 2021-11-21 17-40-06](https://user-images.githubusercontent.com/15766192/142761183-c1696d8c-3f38-45ab-991a-bb0dfd98ae7d.png) - -
            - -

            2. Visualize and Version Datasets

            - Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. -
            - Usage - Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. - -![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) - -
            - -

            3: Train using dataset artifact

            - When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that - can be used to train a model directly from the dataset artifact. This also logs evaluation -
            - Usage - Code $ python train.py --data {data}_wandb.yaml - -![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) - -
            - -

            4: Save model checkpoints as artifacts

            - To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. - You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged - -
            - Usage - Code $ python train.py --save_period 1 - -![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png) - -
            - -
            - -

            5: Resume runs from checkpoint artifacts.

            -Any run can be resumed using artifacts if the --resume argument starts with wandb-artifact:// prefix followed by the run path, i.e, wandb-artifact://username/project/runid . This doesn't require the model checkpoint to be present on the local system. - -
            - Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) - -
            - -

            6: Resume runs from dataset artifact & checkpoint artifacts.

            - Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device - The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or - train from _wandb.yaml file and set --save_period - -
            - Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) - -
            - - - -

            Reports

            -W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). - -Weights & Biases Reports - -## Environments - -YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - -- **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle -- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) -- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) -- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - -## Status - -![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/spaces/xikacat/xikacatbing/Dockerfile b/spaces/xikacat/xikacatbing/Dockerfile deleted file mode 100644 index 3698c7cb7938e025afc53b18a571ae2961fbdffe..0000000000000000000000000000000000000000 --- a/spaces/xikacat/xikacatbing/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# Build Stage -# 使用 golang:alpine 作为构建阶段的基础镜像 -FROM golang:alpine AS builder - -# 添加 git,以便之后能从GitHub克隆项目 -RUN apk --no-cache add git - -# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下 -RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app - -# 设置工作目录为之前克隆的项目目录 -WORKDIR /workspace/app - -# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小 -RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go - -# Runtime Stage -# 使用轻量级的 alpine 镜像作为运行时的基础镜像 -FROM alpine - -# 设置工作目录 -WORKDIR /workspace/app - -# 从构建阶段复制编译后的二进制文件到运行时镜像中 -COPY --from=builder /workspace/app/go-proxy-bingai . - -# 设置环境变量,此处为随机字符 -ENV Go_Proxy_BingAI_USER_TOKEN_1="kJs8hD92ncMzLaoQWYtX5rG6bE3fZ4iO" - -# 暴露8080端口 -EXPOSE 8080 - -# 容器启动时运行的命令 -CMD ["/workspace/app/go-proxy-bingai"] \ No newline at end of file diff --git a/spaces/xswu/HPSv2/src/__init__.py b/spaces/xswu/HPSv2/src/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/xuwenhao83/simple_chatbot/app.py b/spaces/xuwenhao83/simple_chatbot/app.py deleted file mode 100644 index 57dd8c74bd7c1e5441f3893bb63fcb04dcef4407..0000000000000000000000000000000000000000 --- a/spaces/xuwenhao83/simple_chatbot/app.py +++ /dev/null @@ -1,58 +0,0 @@ -import openai -import os -import gradio as gr - -openai.api_key = os.environ.get("OPENAI_API_KEY") - -class Conversation: - def __init__(self, prompt, num_of_round): - self.prompt = prompt - self.num_of_round = num_of_round - self.messages = [] - self.messages.append({"role": "system", "content": self.prompt}) - - def ask(self, question): - try: - self.messages.append( {"role": "user", "content": question}) - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=self.messages, - temperature=0.5, - max_tokens=2048, - top_p=1, - ) - except Exception as e: - print(e) - return e - - message = response["choices"][0]["message"]["content"] - self.messages.append({"role": "assistant", "content": message}) - - if len(self.messages) > self.num_of_round*2 + 1: - del self.messages[1:3] - return message - - -prompt = """你是一个中国厨师,用中文回答做菜的问题。你的回答需要满足以下要求: -1. 你的回答必须是中文 -2. 回答限制在100个字以内""" - -conv = Conversation(prompt, 5) - -def predict(input, history=[]): - history.append(input) - response = conv.ask(input) - history.append(response) - responses = [(u,b) for u,b in zip(history[::2], history[1::2])] - return responses, history - -with gr.Blocks(css="#chatbot{height:350px} .overflow-y-auto{height:500px}") as demo: - chatbot = gr.Chatbot(elem_id="chatbot") - state = gr.State([]) - - with gr.Row(): - txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False) - - txt.submit(predict, [txt, state], [chatbot, state]) - -demo.launch() \ No newline at end of file diff --git a/spaces/xxccc/gpt-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h b/spaces/xxccc/gpt-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h deleted file mode 100644 index c9004bb8043a12e32814436baa6262a00c8ef68e..0000000000000000000000000000000000000000 --- a/spaces/xxccc/gpt-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h +++ /dev/null @@ -1,433 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -#include "libipc/def.h" - -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_def.h" -#include "libipc/utility/log.h" -#include "libipc/utility/utility.h" - -namespace ipc { - -//////////////////////////////////////////////////////////////// -/// producer-consumer implementation -//////////////////////////////////////////////////////////////// - -template -struct prod_cons_impl; - -template <> -struct prod_cons_impl> { - - template - struct elem_t { - std::aligned_storage_t data_ {}; - }; - - alignas(cache_line_size) std::atomic rd_; // read index - alignas(cache_line_size) std::atomic wt_; // write index - - constexpr circ::u2_t cursor() const noexcept { - return 0; - } - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed)); - if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) { - return false; // full - } - std::forward(f)(&(elems[cur_wt].data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - /** - * In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'. - * So we could just disconnect all connections of receiver, and return false. - */ - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(~static_cast(0u)); - return false; - } - - template - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed)); - if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::forward(f)(&(elems[cur_rd].data_)); - std::forward(out)(true); - rd_.fetch_add(1, std::memory_order_release); - return true; - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - if (circ::index_of(cur_rd) == - circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - using flag_t = std::uint64_t; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - circ::u2_t cur_ct, nxt_ct; - for (unsigned k = 0;;) { - cur_ct = ct_.load(std::memory_order_relaxed); - if (circ::index_of(nxt_ct = cur_ct + 1) == - circ::index_of(rd_.load(std::memory_order_acquire))) { - return false; // full - } - if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - auto* el = elems + circ::index_of(cur_ct); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - while (1) { - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if (cur_ct != wt_.load(std::memory_order_relaxed)) { - return true; - } - if ((~cac_ct) != cur_ct) { - return true; - } - if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) { - return true; - } - wt_.store(nxt_ct, std::memory_order_release); - cur_ct = nxt_ct; - nxt_ct = cur_ct + 1; - el = elems + circ::index_of(cur_ct); - } - return true; - } - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - auto cur_wt = wt_.load(std::memory_order_acquire); - auto id_rd = circ::index_of(cur_rd); - auto id_wt = circ::index_of(cur_wt); - if (id_rd == id_wt) { - auto* el = elems + id_wt; - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if ((~cac_ct) != cur_wt) { - return false; // empty - } - if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) { - wt_.store(cur_wt + 1, std::memory_order_release); - } - k = 0; - } - else { - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - - enum : rc_t { - ep_mask = 0x00000000ffffffffull, - ep_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - }; - - alignas(cache_line_size) std::atomic wt_; // write index - alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer - - circ::u2_t cursor() const noexcept { - return wt_.load(std::memory_order_acquire); - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) { - return false; // has not finished yet - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - epoch_ += ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) { - if (cur == cursor()) return false; // acquire - auto* el = elems + circ::index_of(cur++); - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & ep_mask) == 0) { - std::forward(out)(true); - return true; - } - auto nxt_rc = cur_rc & ~static_cast(wrapper->connected_id()); - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)((nxt_rc & ep_mask) == 0); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - using flag_t = std::uint64_t; - - enum : rc_t { - rc_mask = 0x00000000ffffffffull, - ep_mask = 0x00ffffffffffffffull, - ep_incr = 0x0100000000000000ull, - ic_mask = 0xff000000ffffffffull, - ic_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - alignas(cache_line_size) std::atomic epoch_ { 0 }; - - circ::u2_t cursor() const noexcept { - return ct_.load(std::memory_order_acquire); - } - - constexpr static rc_t inc_rc(rc_t rc) noexcept { - return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask); - } - - constexpr static rc_t inc_mask(rc_t rc) noexcept { - return inc_rc(rc) & ~rc_mask; - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.load(std::memory_order_acquire); - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_relaxed); - circ::cc_t rem_cc = cur_rc & rc_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) { - return false; // has not finished yet - } - else if (!rem_cc) { - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if ((cur_fl != cur_ct) && cur_fl) { - return false; // full - } - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed) && - epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & rc_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed)) { - if (epoch == epoch_.load(std::memory_order_acquire)) { - break; - } - else if (push(wrapper, std::forward(f), elems)) { - return true; - } - epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) { - auto* el = elems + circ::index_of(cur); - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if (cur_fl != ~static_cast(cur)) { - return false; // empty - } - ++cur; - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & rc_mask) == 0) { - std::forward(out)(true); - el->f_ct_.store(cur + N - 1, std::memory_order_release); - return true; - } - auto nxt_rc = inc_rc(cur_rc) & ~static_cast(wrapper->connected_id()); - bool last_one = false; - if ((last_one = (nxt_rc & rc_mask) == 0)) { - el->f_ct_.store(cur + N - 1, std::memory_order_release); - } - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)(last_one); - return true; - } - ipc::yield(k); - } - } -}; - -} // namespace ipc diff --git a/spaces/xxccc/gpt-academic/docs/README.md.Italian.md b/spaces/xxccc/gpt-academic/docs/README.md.Italian.md deleted file mode 100644 index 04c5ed6bec16d90d6ce53852b4902e8b7d8c55a8..0000000000000000000000000000000000000000 --- a/spaces/xxccc/gpt-academic/docs/README.md.Italian.md +++ /dev/null @@ -1,310 +0,0 @@ -> **Nota** -> -> Durante l'installazione delle dipendenze, selezionare rigorosamente le **versioni specificate** nel file requirements.txt. -> -> ` pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/` - -# GPT Ottimizzazione Accademica (GPT Academic) - -**Se ti piace questo progetto, ti preghiamo di dargli una stella. Se hai sviluppato scorciatoie accademiche o plugin funzionali più utili, non esitare ad aprire una issue o pull request. Abbiamo anche una README in [Inglese|](docs/README_EN.md)[Giapponese|](docs/README_JP.md)[Coreano|](https://github.com/mldljyh/ko_gpt_academic)[Russo|](docs/README_RS.md)[Francese](docs/README_FR.md) tradotta da questo stesso progetto. -Per tradurre questo progetto in qualsiasi lingua con GPT, leggere e eseguire [`multi_language.py`](multi_language.py) (sperimentale). - -> **Nota** -> -> 1. Si prega di notare che solo i plugin (pulsanti) contrassegnati in **rosso** supportano la lettura di file, alcuni plugin sono posizionati nel **menu a discesa** nella zona dei plugin. Accettiamo e gestiamo PR per qualsiasi nuovo plugin con **massima priorità**! -> -> 2. Le funzionalità di ogni file di questo progetto sono descritte dettagliatamente nella propria analisi di autotraduzione [`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). Con l'iterazione delle versioni, è possibile fare clic sui plugin funzionali correlati in qualsiasi momento per richiamare GPT e generare nuovamente il rapporto di analisi automatica del progetto. Le domande frequenti sono riassunte nella [`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Metodo di installazione] (#installazione). -> -> 3. Questo progetto è compatibile e incoraggia l'utilizzo di grandi modelli di linguaggio di produzione nazionale come chatglm, RWKV, Pangu ecc. Supporta la coesistenza di più api-key e può essere compilato nel file di configurazione come `API_KEY="openai-key1,openai-key2,api2d-key3"`. Per sostituire temporaneamente `API_KEY`, inserire `API_KEY` temporaneo nell'area di input e premere Invio per renderlo effettivo. - -
            Funzione | Descrizione ---- | --- -Correzione immediata | Supporta correzione immediata e ricerca degli errori di grammatica del documento con un solo clic -Traduzione cinese-inglese immediata | Traduzione cinese-inglese immediata con un solo clic -Spiegazione del codice immediata | Visualizzazione del codice, spiegazione del codice, generazione del codice, annotazione del codice con un solo clic -[Scorciatoie personalizzate](https://www.bilibili.com/video/BV14s4y1E7jN) | Supporta scorciatoie personalizzate -Design modularizzato | Supporta potenti [plugin di funzioni](https://github.com/binary-husky/chatgpt_academic/tree/master/crazy_functions) personalizzati, i plugin supportano l'[aggiornamento in tempo reale](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Auto-profiling del programma](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin di funzioni] [Comprensione immediata](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) del codice sorgente di questo progetto -[Analisi del programma](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin di funzioni] Un clic può analizzare l'albero di altri progetti Python/C/C++/Java/Lua/... -Lettura del documento, [traduzione](https://www.bilibili.com/video/BV1KT411x7Wn) del documento | [Plugin di funzioni] La lettura immediata dell'intero documento latex/pdf di un documento e la generazione di un riassunto -Traduzione completa di un documento Latex, [correzione immediata](https://www.bilibili.com/video/BV1FT411H7c5/) | [Plugin di funzioni] Una traduzione o correzione immediata di un documento Latex -Generazione di annotazioni in batch | [Plugin di funzioni] Generazione automatica delle annotazioni di funzione con un solo clic -[Traduzione cinese-inglese di Markdown](https://www.bilibili.com/video/BV1yo4y157jV/) | [Plugin di funzioni] Hai letto il [README](https://github.com/binary-husky/chatgpt_academic/blob/master/docs/README_EN.md) delle cinque lingue sopra? -Generazione di report di analisi di chat | [Plugin di funzioni] Generazione automatica di un rapporto di sintesi dopo l'esecuzione -[Funzione di traduzione di tutto il documento PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugin di funzioni] Estrarre il titolo e il sommario dell'articolo PDF + tradurre l'intero testo (multithreading) -[Assistente di Arxiv](https://www.bilibili.com/video/BV1LM4y1279X) | [Plugin di funzioni] Inserire l'URL dell'articolo di Arxiv e tradurre il sommario con un clic + scaricare il PDF -[Assistente integrato di Google Scholar](https://www.bilibili.com/video/BV19L411U7ia) | [Plugin di funzioni] Con qualsiasi URL di pagina di ricerca di Google Scholar, lascia che GPT ti aiuti a scrivere il tuo [relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/) -Aggregazione delle informazioni su Internet + GPT | [Plugin di funzioni] Fai in modo che GPT rilevi le informazioni su Internet prima di rispondere alle domande, senza mai diventare obsolete -Visualizzazione di formule/img/tabelle | È possibile visualizzare un'equazione in forma [tex e render](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png) contemporaneamente, supporta equazioni e evidenziazione del codice -Supporto per plugin di funzioni multithreading | Supporto per chiamata multithreaded di chatgpt, elaborazione con un clic di grandi quantità di testo o di un programma -Avvia il tema di gradio [scuro](https://github.com/binary-husky/chatgpt_academic/issues/173) | Aggiungere ```/?__theme=dark``` dopo l'URL del browser per passare a un tema scuro -Supporto per maggiori modelli LLM, supporto API2D | Sentirsi serviti simultaneamente da GPT3.5, GPT4, [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS) deve essere una grande sensazione, giusto? -Ulteriori modelli LLM supportat,i supporto per l'implementazione di Huggingface | Aggiunta di un'interfaccia Newbing (Nuovo Bing), introdotta la compatibilità con Tsinghua [Jittorllms](https://github.com/Jittor/JittorLLMs), [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) e [PanGu-α](https://openi.org.cn/pangu/) -Ulteriori dimostrazioni di nuove funzionalità (generazione di immagini, ecc.)... | Vedere la fine di questo documento... - -- Nuova interfaccia (modificare l'opzione LAYOUT in `config.py` per passare dal layout a sinistra e a destra al layout superiore e inferiore) -
            - -
            Sei un traduttore professionista di paper accademici. - -- Tutti i pulsanti vengono generati dinamicamente leggendo il file functional.py, e aggiungerci nuove funzionalità è facile, liberando la clipboard. -
            - -
            - -- Revisione/Correzione -
            - -
            - -- Se l'output contiene una formula, viene visualizzata sia come testo che come formula renderizzata, per facilitare la copia e la visualizzazione. -
            - -
            - -- Non hai tempo di leggere il codice del progetto? Passa direttamente a chatgpt e chiedi informazioni. -
            - -
            - -- Chiamata mista di vari modelli di lingua di grandi dimensioni (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
            - -
            - ---- -# Installazione -## Installazione - Metodo 1: Esecuzione diretta (Windows, Linux o MacOS) - -1. Scarica il progetto -```sh -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -``` - -2. Configura API_KEY - -In `config.py`, configura la tua API KEY e altre impostazioni, [configs for special network environments](https://github.com/binary-husky/gpt_academic/issues/1). - -(N.B. Quando il programma viene eseguito, verifica prima se esiste un file di configurazione privato chiamato `config_private.py` e sovrascrive le stesse configurazioni in `config.py`. Pertanto, se capisci come funziona la nostra logica di lettura della configurazione, ti consigliamo vivamente di creare un nuovo file di configurazione chiamato `config_private.py` accanto a `config.py`, e spostare (copiare) le configurazioni di `config.py` in `config_private.py`. 'config_private.py' non è sotto la gestione di git e può proteggere ulteriormente le tue informazioni personali. NB Il progetto supporta anche la configurazione della maggior parte delle opzioni tramite "variabili d'ambiente". La sintassi della variabile d'ambiente è descritta nel file `docker-compose`. Priorità di lettura: "variabili d'ambiente" > "config_private.py" > "config.py") - - -3. Installa le dipendenze -```sh -# (Scelta I: se sei familiare con python) (python 3.9 o superiore, più nuovo è meglio), N.B.: utilizza il repository ufficiale pip o l'aliyun pip repository, metodo temporaneo per cambiare il repository: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Scelta II: se non conosci Python) utilizza anaconda, il processo è simile (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # crea l'ambiente anaconda -conda activate gptac_venv # attiva l'ambiente anaconda -python -m pip install -r requirements.txt # questo passaggio funziona allo stesso modo dell'installazione con pip -``` - -
            Se si desidera supportare ChatGLM di Tsinghua/MOSS di Fudan come backend, fare clic qui per espandere -

            - -【Passaggio facoltativo】 Se si desidera supportare ChatGLM di Tsinghua/MOSS di Fudan come backend, è necessario installare ulteriori dipendenze (prerequisiti: conoscenza di Python, esperienza con Pytorch e computer sufficientemente potente): -```sh -# 【Passaggio facoltativo I】 Supporto a ChatGLM di Tsinghua. Note su ChatGLM di Tsinghua: in caso di errore "Call ChatGLM fail 不能正常加载ChatGLM的参数" , fare quanto segue: 1. Per impostazione predefinita, viene installata la versione di torch + cpu; per usare CUDA, è necessario disinstallare torch e installare nuovamente torch + cuda; 2. Se non è possibile caricare il modello a causa di una configurazione insufficiente del computer, è possibile modificare la precisione del modello in request_llm/bridge_chatglm.py, cambiando AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) in AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt - -# 【Passaggio facoltativo II】 Supporto a MOSS di Fudan -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Si prega di notare che quando si esegue questa riga di codice, si deve essere nella directory radice del progetto - -# 【Passaggio facoltativo III】 Assicurati che il file di configurazione config.py includa tutti i modelli desiderati, al momento tutti i modelli supportati sono i seguenti (i modelli della serie jittorllms attualmente supportano solo la soluzione docker): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

            -
            - - - -4. Esegui -```sh -python main.py -```5. Plugin di test delle funzioni -``` -- Funzione plugin di test (richiede una risposta gpt su cosa è successo oggi in passato), puoi utilizzare questa funzione come template per implementare funzionalità più complesse - Clicca su "[Demo del plugin di funzione] Oggi nella storia" -``` - -## Installazione - Metodo 2: Utilizzo di Docker - -1. Solo ChatGPT (consigliato per la maggior parte delle persone) - -``` sh -git clone https://github.com/binary-husky/chatgpt_academic.git # scarica il progetto -cd chatgpt_academic # entra nel percorso -nano config.py # con un qualsiasi editor di testo, modifica config.py configurando "Proxy", "API_KEY" e "WEB_PORT" (ad esempio 50923) -docker build -t gpt-academic . # installa - -#(ultimo passaggio - selezione 1) In un ambiente Linux, utilizzare '--net=host' è più conveniente e veloce -docker run --rm -it --net=host gpt-academic -#(ultimo passaggio - selezione 2) In un ambiente MacOS/Windows, l'opzione -p può essere utilizzata per esporre la porta del contenitore (ad es. 50923) alla porta della macchina -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (richiede familiarità con Docker) - -``` sh -# Modifica docker-compose.yml, elimina i piani 1 e 3, mantieni il piano 2. Modifica la configurazione del piano 2 in docker-compose.yml, si prega di fare riferimento alle relative annotazioni -docker-compose up -``` - -3. ChatGPT + LLAMA + Pangu + RWKV (richiede familiarità con Docker) - -``` sh -# Modifica docker-compose.yml, elimina i piani 1 e 2, mantieni il piano 3. Modifica la configurazione del piano 3 in docker-compose.yml, si prega di fare riferimento alle relative annotazioni -docker-compose up -``` - - -## Installazione - Metodo 3: Altre modalità di distribuzione - -1. Come utilizzare un URL di reindirizzamento / AzureAPI Cloud Microsoft -Configura API_URL_REDIRECT seguendo le istruzioni nel file `config.py`. - -2. Distribuzione su un server cloud remoto (richiede conoscenze ed esperienza di server cloud) -Si prega di visitare [wiki di distribuzione-1] (https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -3. Utilizzo di WSL2 (Windows Subsystem for Linux) -Si prega di visitare [wiki di distribuzione-2] (https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -4. Come far funzionare ChatGPT all'interno di un sottodominio (ad es. `http://localhost/subpath`) -Si prega di visitare [Istruzioni per l'esecuzione con FastAPI] (docs/WithFastapi.md) - -5. Utilizzo di docker-compose per l'esecuzione -Si prega di leggere il file docker-compose.yml e seguire le istruzioni fornite. - ---- -# Uso avanzato -## Personalizzazione dei pulsanti / Plugin di funzione personalizzati - -1. Personalizzazione dei pulsanti (scorciatoie accademiche) -Apri `core_functional.py` con qualsiasi editor di testo e aggiungi la voce seguente, quindi riavvia il programma (se il pulsante è già stato aggiunto con successo e visibile, il prefisso e il suffisso supportano la modifica in tempo reale, senza bisogno di riavviare il programma). - -ad esempio -``` -"超级英译中": { - # Prefisso, verrà aggiunto prima del tuo input. Ad esempio, descrivi la tua richiesta, come tradurre, spiegare il codice, correggere errori, ecc. - "Prefix": "Per favore traduci questo testo in Cinese, e poi spiega tutti i termini tecnici nel testo con una tabella markdown:\n\n", - - # Suffisso, verrà aggiunto dopo il tuo input. Ad esempio, con il prefisso puoi circondare il tuo input con le virgolette. - "Suffix": "", -}, -``` -
            - -
            - -2. Plugin di funzione personalizzati - -Scrivi plugin di funzione personalizzati e esegui tutte le attività che desideri o non hai mai pensato di fare. -La difficoltà di scrittura e debug dei plugin del nostro progetto è molto bassa. Se si dispone di una certa conoscenza di base di Python, è possibile realizzare la propria funzione del plugin seguendo il nostro modello. Per maggiori dettagli, consultare la [guida al plugin per funzioni] (https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). - ---- -# Ultimo aggiornamento -## Nuove funzionalità dinamiche1. Funzionalità di salvataggio della conversazione. Nell'area dei plugin della funzione, fare clic su "Salva la conversazione corrente" per salvare la conversazione corrente come file html leggibile e ripristinabile, inoltre, nell'area dei plugin della funzione (menu a discesa), fare clic su "Carica la cronologia della conversazione archiviata" per ripristinare la conversazione precedente. Suggerimento: fare clic su "Carica la cronologia della conversazione archiviata" senza specificare il file consente di visualizzare la cache degli archivi html di cronologia, fare clic su "Elimina tutti i record di cronologia delle conversazioni locali" per eliminare tutte le cache degli archivi html. -
            - -
            - -2. Generazione di rapporti. La maggior parte dei plugin genera un rapporto di lavoro dopo l'esecuzione. -
            - - - -
            - -3. Progettazione modulare delle funzioni, semplici interfacce ma in grado di supportare potenti funzionalità. -
            - - -
            - -4. Questo è un progetto open source che può "tradursi da solo". -
            - -
            - -5. Tradurre altri progetti open source è semplice. -
            - -
            - -
            - -
            - -6. Piccola funzione decorativa per [live2d](https://github.com/fghrsh/live2d_demo) (disattivata per impostazione predefinita, è necessario modificare `config.py`). -
            - -
            - -7. Supporto del grande modello linguistico MOSS -
            - -
            - -8. Generazione di immagini OpenAI -
            - -
            - -9. Analisi e sintesi audio OpenAI -
            - -
            - -10. Verifica completa dei testi in LaTeX -
            - -
            - - -## Versione: -- versione 3.5(Todo): utilizzo del linguaggio naturale per chiamare tutti i plugin di funzioni del progetto (alta priorità) -- versione 3.4(Todo): supporto multi-threading per il grande modello linguistico locale Chatglm -- versione 3.3: +funzionalità di sintesi delle informazioni su Internet -- versione 3.2: i plugin di funzioni supportano più interfacce dei parametri (funzionalità di salvataggio della conversazione, lettura del codice in qualsiasi lingua + richiesta simultanea di qualsiasi combinazione di LLM) -- versione 3.1: supporto per interrogare contemporaneamente più modelli gpt! Supporto api2d, bilanciamento del carico per più apikey -- versione 3.0: supporto per Chatglm e altri piccoli LLM -- versione 2.6: ristrutturazione della struttura del plugin, miglioramento dell'interattività, aggiunta di più plugin -- versione 2.5: auto-aggiornamento, risoluzione del problema di testo troppo lungo e overflow del token durante la sintesi di grandi progetti di ingegneria -- versione 2.4: (1) funzionalità di traduzione dell'intero documento in formato PDF aggiunta; (2) funzionalità di scambio dell'area di input aggiunta; (3) opzione di layout verticale aggiunta; (4) ottimizzazione della funzione di plugin multi-threading. -- versione 2.3: miglioramento dell'interattività multi-threading -- versione 2.2: i plugin di funzioni supportano l'hot-reload -- versione 2.1: layout ripiegabile -- versione 2.0: introduzione di plugin di funzioni modulari -- versione 1.0: funzione di basegpt_academic sviluppatori gruppo QQ-2: 610599535 - -- Problemi noti - - Alcuni plugin di traduzione del browser interferiscono con l'esecuzione del frontend di questo software - - La versione di gradio troppo alta o troppo bassa può causare diversi malfunzionamenti - -## Riferimenti e apprendimento - -``` -Il codice fa riferimento a molte altre eccellenti progettazioni di progetti, principalmente: - -# Progetto 1: ChatGLM-6B di Tsinghua: -https://github.com/THUDM/ChatGLM-6B - -# Progetto 2: JittorLLMs di Tsinghua: -https://github.com/Jittor/JittorLLMs - -# Progetto 3: Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# Progetto 4: ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Progetto 5: ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Altro: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/stores/reactions.ts b/spaces/yderre-aubay/midi-player-demo/src/main/stores/reactions.ts deleted file mode 100644 index 761697765af2e19b24372db5dbe21070c288d9ad..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/stores/reactions.ts +++ /dev/null @@ -1,74 +0,0 @@ -import { autorun, observe } from "mobx" -import { resetSelection } from "../actions" -import MIDIOutput from "../services/MIDIOutput" -import RootStore from "./RootStore" - -export const registerReactions = (rootStore: RootStore) => { - // reset selection when tool changed - observe(rootStore.pianoRollStore, "mouseMode", resetSelection(rootStore)) - - observe( - rootStore.midiDeviceStore, - "enabledOutputs", - updateOutputDevices(rootStore), - ) - - autorun(updateInputDevices(rootStore)) - autorun(updateOutputDevices(rootStore)) - - observe( - rootStore.midiRecorder, - "isRecording", - disableSeekWhileRecording(rootStore), - ) - - observe(rootStore.player, "isPlaying", stopRecordingWhenStopPlayer(rootStore)) -} - -type Reaction = (rootStore: RootStore) => () => void - -// sync synthGroup.output to enabledOutputIds/isFactorySoundEnabled -const updateOutputDevices: Reaction = - ({ midiDeviceStore, player, synth, synthGroup }) => - () => { - const { outputs, enabledOutputs, isFactorySoundEnabled } = midiDeviceStore - - player.allSoundsOff() - - const midiDeviceEntries = outputs.map((device) => ({ - synth: new MIDIOutput(device), - isEnabled: enabledOutputs[device.id], - })) - - synthGroup.outputs = [ - { - synth: synth, - isEnabled: isFactorySoundEnabled, - }, - ...midiDeviceEntries, - ] - } - -const updateInputDevices: Reaction = - ({ midiDeviceStore, midiInput }) => - () => { - const { inputs, enabledInputs } = midiDeviceStore - - const devices = inputs.filter((d) => enabledInputs[d.id]) - - midiInput.removeAllDevices() - devices.forEach(midiInput.addDevice) - } - -const disableSeekWhileRecording: Reaction = - ({ player, midiRecorder }) => - () => - (player.disableSeek = midiRecorder.isRecording) - -const stopRecordingWhenStopPlayer: Reaction = - ({ player, midiRecorder }) => - () => { - if (!player.isPlaying) { - midiRecorder.isRecording = false - } - } diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/big_bird/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/big_bird/__init__.py deleted file mode 100644 index ef8ad80aa6b5e6c516aaae9a3d229ce1f4a3e261..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/big_bird/__init__.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING - -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_big_bird": ["BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP", "BigBirdConfig", "BigBirdOnnxConfig"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_big_bird"] = ["BigBirdTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_big_bird_fast"] = ["BigBirdTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_big_bird"] = [ - "BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST", - "BigBirdForCausalLM", - "BigBirdForMaskedLM", - "BigBirdForMultipleChoice", - "BigBirdForPreTraining", - "BigBirdForQuestionAnswering", - "BigBirdForSequenceClassification", - "BigBirdForTokenClassification", - "BigBirdLayer", - "BigBirdModel", - "BigBirdPreTrainedModel", - "load_tf_weights_in_big_bird", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_big_bird"] = [ - "FlaxBigBirdForCausalLM", - "FlaxBigBirdForMaskedLM", - "FlaxBigBirdForMultipleChoice", - "FlaxBigBirdForPreTraining", - "FlaxBigBirdForQuestionAnswering", - "FlaxBigBirdForSequenceClassification", - "FlaxBigBirdForTokenClassification", - "FlaxBigBirdModel", - "FlaxBigBirdPreTrainedModel", - ] - -if TYPE_CHECKING: - from .configuration_big_bird import BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdConfig, BigBirdOnnxConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_big_bird import BigBirdTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_big_bird_fast import BigBirdTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_big_bird import ( - BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST, - BigBirdForCausalLM, - BigBirdForMaskedLM, - BigBirdForMultipleChoice, - BigBirdForPreTraining, - BigBirdForQuestionAnswering, - BigBirdForSequenceClassification, - BigBirdForTokenClassification, - BigBirdLayer, - BigBirdModel, - BigBirdPreTrainedModel, - load_tf_weights_in_big_bird, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_big_bird import ( - FlaxBigBirdForCausalLM, - FlaxBigBirdForMaskedLM, - FlaxBigBirdForMultipleChoice, - FlaxBigBirdForPreTraining, - FlaxBigBirdForQuestionAnswering, - FlaxBigBirdForSequenceClassification, - FlaxBigBirdForTokenClassification, - FlaxBigBirdModel, - FlaxBigBirdPreTrainedModel, - ) - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/webUI.py b/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/webUI.py deleted file mode 100644 index 17e39b21fa24d7ec9867b693723b7b087840a9b4..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/webUI.py +++ /dev/null @@ -1,379 +0,0 @@ -import io -import os - -# os.system("wget -P cvec/ https://huggingface.co/spaces/innnky/nanami/resolve/main/checkpoint_best_legacy_500.pt") -import gradio as gr -import gradio.processing_utils as gr_pu -import librosa -import numpy as np -import soundfile -from inference.infer_tool import Svc -import logging -import re -import json - -import subprocess -import edge_tts -import asyncio -from scipy.io import wavfile -import librosa -import torch -import time -import traceback -from itertools import chain -from utils import mix_model -from compress_model import removeOptimizer - -logging.getLogger('numba').setLevel(logging.WARNING) -logging.getLogger('markdown_it').setLevel(logging.WARNING) -logging.getLogger('urllib3').setLevel(logging.WARNING) -logging.getLogger('matplotlib').setLevel(logging.WARNING) -logging.getLogger('multipart').setLevel(logging.WARNING) - -model = None -spk = None -debug = False - -cuda = {} -if torch.cuda.is_available(): - for i in range(torch.cuda.device_count()): - device_name = torch.cuda.get_device_properties(i).name - cuda[f"CUDA:{i} {device_name}"] = f"cuda:{i}" - -def upload_mix_append_file(files,sfiles): - try: - if(sfiles == None): - file_paths = [file.name for file in files] - else: - file_paths = [file.name for file in chain(files,sfiles)] - p = {file:100 for file in file_paths} - return file_paths,mix_model_output1.update(value=json.dumps(p,indent=2)) - except Exception as e: - if debug: traceback.print_exc() - raise gr.Error(e) - -def mix_submit_click(js,mode): - try: - assert js.lstrip()!="" - modes = {"凸组合":0, "线性组合":1} - mode = modes[mode] - data = json.loads(js) - data = list(data.items()) - model_path,mix_rate = zip(*data) - path = mix_model(model_path,mix_rate,mode) - return f"成功,文件被保存在了{path}" - except Exception as e: - if debug: traceback.print_exc() - raise gr.Error(e) - -def updata_mix_info(files): - try: - if files == None : return mix_model_output1.update(value="") - p = {file.name:100 for file in files} - return mix_model_output1.update(value=json.dumps(p,indent=2)) - except Exception as e: - if debug: traceback.print_exc() - raise gr.Error(e) - -def modelAnalysis(model_path,config_path,cluster_model_path,device,enhance,diff_model_path,diff_config_path,only_diffusion,use_spk_mix): - global model - try: - device = cuda[device] if "CUDA" in device else device - cluster_filepath = os.path.split(cluster_model_path.name) if cluster_model_path is not None else "no_cluster" - fr = ".pkl" in cluster_filepath[1] - #model = Svc(model_path.name, config_path.name, device=device if device!="Auto" else None, cluster_model_path = cluster_model_path.name if cluster_model_path != None else "",nsf_hifigan_enhance=enhance) - model = Svc(model_path.name, - config_path.name, - device=device if device != "Auto" else None, - cluster_model_path = cluster_model_path.name if cluster_model_path is not None else "", - nsf_hifigan_enhance=enhance, - diffusion_model_path = diff_model_path.name if diff_model_path is not None else "", - diffusion_config_path = diff_config_path.name if diff_config_path is not None else "", - shallow_diffusion = True if diff_model_path is not None else False, - only_diffusion = only_diffusion, - spk_mix_enable = use_spk_mix, - feature_retrieval = fr - ) - spks = list(model.spk2id.keys()) - device_name = torch.cuda.get_device_properties(model.dev).name if "cuda" in str(model.dev) else str(model.dev) - msg = f"成功加载模型到设备{device_name}上\n" - if cluster_model_path is None: - msg += "未加载聚类模型或特征检索模型\n" - elif fr: - msg += f"特征检索模型{cluster_filepath[1]}加载成功\n" - else: - msg += f"聚类模型{cluster_filepath[1]}加载成功\n" - if diff_model_path is None: - msg += "未加载扩散模型\n" - else: - msg += f"扩散模型{diff_model_path.name}加载成功\n" - msg += "当前模型的可用音色:\n" - for i in spks: - msg += i + " " - return sid.update(choices = spks,value=spks[0]), msg - except Exception as e: - if debug: traceback.print_exc() - raise gr.Error(e) - - -def modelUnload(): - global model - if model is None: - return sid.update(choices = [],value=""),"没有模型需要卸载!" - else: - model.unload_model() - model = None - torch.cuda.empty_cache() - return sid.update(choices = [],value=""),"模型卸载完毕!" - -def vc_fn(sid, input_audio, vc_transform, auto_f0,cluster_ratio, slice_db, noise_scale,pad_seconds,cl_num,lg_num,lgr_num,f0_predictor,enhancer_adaptive_key,cr_threshold,k_step,use_spk_mix,second_encoding,loudness_envelope_adjustment): - global model - try: - if input_audio is None: - return "You need to upload an audio", None - if model is None: - return "You need to upload an model", None - print(input_audio) - sampling_rate, audio = input_audio - print(audio.shape,sampling_rate) - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - print(audio.dtype) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - temp_path = "temp.wav" - soundfile.write(temp_path, audio, sampling_rate, format="wav") - _audio = model.slice_inference( - temp_path, - sid, - vc_transform, - slice_db, - cluster_ratio, - auto_f0, - noise_scale, - pad_seconds, - cl_num, - lg_num, - lgr_num, - f0_predictor, - enhancer_adaptive_key, - cr_threshold, - k_step, - use_spk_mix, - second_encoding, - loudness_envelope_adjustment - ) - model.clear_empty() - os.remove(temp_path) - #构建保存文件的路径,并保存到results文件夹内 - timestamp = str(int(time.time())) - if not os.path.exists("results"): - os.makedirs("results") - output_file = os.path.join("results", sid + "_" + timestamp + ".wav") - soundfile.write(output_file, _audio, model.target_sample, format="wav") - return "Success", output_file - except Exception as e: - if debug: traceback.print_exc() - raise gr.Error(e) - -def tts_func(_text,_rate,_voice): - #使用edge-tts把文字转成音频 - # voice = "zh-CN-XiaoyiNeural"#女性,较高音 - # voice = "zh-CN-YunxiNeural"#男性 - voice = "zh-CN-YunxiNeural"#男性 - if ( _voice == "女" ) : voice = "zh-CN-XiaoyiNeural" - output_file = _text[0:10]+".wav" - # communicate = edge_tts.Communicate(_text, voice) - # await communicate.save(output_file) - if _rate>=0: - ratestr="+{:.0%}".format(_rate) - elif _rate<0: - ratestr="{:.0%}".format(_rate)#减号自带 - - p=subprocess.Popen("edge-tts "+ - " --text "+_text+ - " --write-media "+output_file+ - " --voice "+voice+ - " --rate="+ratestr - ,shell=True, - stdout=subprocess.PIPE, - stdin=subprocess.PIPE) - p.wait() - return output_file - -def text_clear(text): - return re.sub(r"[\n\,\(\) ]", "", text) - -def vc_fn2(sid, input_audio, vc_transform, auto_f0,cluster_ratio, slice_db, noise_scale,pad_seconds,cl_num,lg_num,lgr_num,text2tts,tts_rate,tts_voice,f0_predictor,enhancer_adaptive_key,cr_threshold): - #使用edge-tts把文字转成音频 - text2tts=text_clear(text2tts) - output_file=tts_func(text2tts,tts_rate,tts_voice) - - #调整采样率 - sr2=44100 - wav, sr = librosa.load(output_file) - wav2 = librosa.resample(wav, orig_sr=sr, target_sr=sr2) - save_path2= text2tts[0:10]+"_44k"+".wav" - wavfile.write(save_path2,sr2, - (wav2 * np.iinfo(np.int16).max).astype(np.int16) - ) - - #读取音频 - sample_rate, data=gr_pu.audio_from_file(save_path2) - vc_input=(sample_rate, data) - - a,b=vc_fn(sid, vc_input, vc_transform,auto_f0,cluster_ratio, slice_db, noise_scale,pad_seconds,cl_num,lg_num,lgr_num,f0_predictor,enhancer_adaptive_key,cr_threshold) - os.remove(output_file) - os.remove(save_path2) - return a,b - -def model_compression(_model): - if _model == "": - return "请先选择要压缩的模型" - else: - model_path = os.path.split(_model.name) - filename, extension = os.path.splitext(model_path[1]) - output_model_name = f"{filename}_compressed{extension}" - output_path = os.path.join(os.getcwd(), output_model_name) - removeOptimizer(_model.name, output_path) - return f"模型已成功被保存在了{output_path}" - -def debug_change(): - global debug - debug = debug_button.value - -with gr.Blocks( - theme=gr.themes.Base( - primary_hue = gr.themes.colors.green, - font=["Source Sans Pro", "Arial", "sans-serif"], - font_mono=['JetBrains mono', "Consolas", 'Courier New'] - ), -) as app: - with gr.Tabs(): - with gr.TabItem("推理"): - gr.Markdown(value=""" - So-vits-svc 4.0 推理 webui - """) - with gr.Row(variant="panel"): - with gr.Column(): - gr.Markdown(value=""" - 模型设置 - """) - with gr.Row(): - model_path = gr.File(label="选择模型文件") - config_path = gr.File(label="选择配置文件") - with gr.Row(): - diff_model_path = gr.File(label="选择扩散模型文件") - diff_config_path = gr.File(label="选择扩散模型配置文件") - cluster_model_path = gr.File(label="选择聚类模型或特征检索文件(没有可以不选)") - device = gr.Dropdown(label="推理设备,默认为自动选择CPU和GPU", choices=["Auto",*cuda.keys(),"cpu"], value="Auto") - enhance = gr.Checkbox(label="是否使用NSF_HIFIGAN增强,该选项对部分训练集少的模型有一定的音质增强效果,但是对训练好的模型有反面效果,默认关闭", value=False) - only_diffusion = gr.Checkbox(label="是否使用全扩散推理,开启后将不使用So-VITS模型,仅使用扩散模型进行完整扩散推理,默认关闭", value=False) - with gr.Column(): - gr.Markdown(value=""" - 左侧文件全部选择完毕后(全部文件模块显示download),点击“加载模型”进行解析: - """) - model_load_button = gr.Button(value="加载模型", variant="primary") - model_unload_button = gr.Button(value="卸载模型", variant="primary") - sid = gr.Dropdown(label="音色(说话人)") - sid_output = gr.Textbox(label="Output Message") - - - with gr.Row(variant="panel"): - with gr.Column(): - gr.Markdown(value=""" - 推理设置 - """) - auto_f0 = gr.Checkbox(label="自动f0预测,配合聚类模型f0预测效果更好,会导致变调功能失效(仅限转换语音,歌声勾选此项会究极跑调)", value=False) - f0_predictor = gr.Dropdown(label="选择F0预测器,可选择crepe,pm,dio,harvest,默认为pm(注意:crepe为原F0使用均值滤波器)", choices=["pm","dio","harvest","crepe"], value="pm") - vc_transform = gr.Number(label="变调(整数,可以正负,半音数量,升高八度就是12)", value=0) - cluster_ratio = gr.Number(label="聚类模型/特征检索混合比例,0-1之间,0即不启用聚类/特征检索。使用聚类/特征检索能提升音色相似度,但会导致咬字下降(如果使用建议0.5左右)", value=0) - slice_db = gr.Number(label="切片阈值", value=-40) - noise_scale = gr.Number(label="noise_scale 建议不要动,会影响音质,玄学参数", value=0.4) - k_step = gr.Slider(label="浅扩散步数,只有使用了扩散模型才有效,步数越大越接近扩散模型的结果", value=100, minimum = 1, maximum = 1000) - with gr.Column(): - pad_seconds = gr.Number(label="推理音频pad秒数,由于未知原因开头结尾会有异响,pad一小段静音段后就不会出现", value=0.5) - cl_num = gr.Number(label="音频自动切片,0为不切片,单位为秒(s)", value=0) - lg_num = gr.Number(label="两端音频切片的交叉淡入长度,如果自动切片后出现人声不连贯可调整该数值,如果连贯建议采用默认值0,注意,该设置会影响推理速度,单位为秒/s", value=0) - lgr_num = gr.Number(label="自动音频切片后,需要舍弃每段切片的头尾。该参数设置交叉长度保留的比例,范围0-1,左开右闭", value=0.75) - enhancer_adaptive_key = gr.Number(label="使增强器适应更高的音域(单位为半音数)|默认为0", value=0) - cr_threshold = gr.Number(label="F0过滤阈值,只有启动crepe时有效. 数值范围从0-1. 降低该值可减少跑调概率,但会增加哑音", value=0.05) - loudness_envelope_adjustment = gr.Number(label="输入源响度包络替换输出响度包络融合比例,越靠近1越使用输出响度包络", value = 0) - second_encoding = gr.Checkbox(label = "二次编码,浅扩散前会对原始音频进行二次编码,玄学选项,效果时好时差,默认关闭", value=False) - use_spk_mix = gr.Checkbox(label = "动态声线融合", value = False, interactive = False) - with gr.Tabs(): - with gr.TabItem("音频转音频"): - vc_input3 = gr.Audio(label="选择音频") - vc_submit = gr.Button("音频转换", variant="primary") - with gr.TabItem("文字转音频"): - text2tts=gr.Textbox(label="在此输入要转译的文字。注意,使用该功能建议打开F0预测,不然会很怪") - tts_rate = gr.Number(label="tts语速", value=0) - tts_voice = gr.Radio(label="性别",choices=["男","女"], value="男") - vc_submit2 = gr.Button("文字转换", variant="primary") - with gr.Row(): - with gr.Column(): - vc_output1 = gr.Textbox(label="Output Message") - with gr.Column(): - vc_output2 = gr.Audio(label="Output Audio", interactive=False) - - with gr.TabItem("小工具/实验室特性"): - gr.Markdown(value=""" - So-vits-svc 4.0 小工具/实验室特性 - """) - with gr.Tabs(): - with gr.TabItem("静态声线融合"): - gr.Markdown(value=""" - 介绍:该功能可以将多个声音模型合成为一个声音模型(多个模型参数的凸组合或线性组合),从而制造出现实中不存在的声线 - 注意: - 1.该功能仅支持单说话人的模型 - 2.如果强行使用多说话人模型,需要保证多个模型的说话人数量相同,这样可以混合同一个SpaekerID下的声音 - 3.保证所有待混合模型的config.json中的model字段是相同的 - 4.输出的混合模型可以使用待合成模型的任意一个config.json,但聚类模型将不能使用 - 5.批量上传模型的时候最好把模型放到一个文件夹选中后一起上传 - 6.混合比例调整建议大小在0-100之间,也可以调为其他数字,但在线性组合模式下会出现未知的效果 - 7.混合完毕后,文件将会保存在项目根目录中,文件名为output.pth - 8.凸组合模式会将混合比例执行Softmax使混合比例相加为1,而线性组合模式不会 - - """) - mix_model_path = gr.Files(label="选择需要混合模型文件") - mix_model_upload_button = gr.UploadButton("选择/追加需要混合模型文件", file_count="multiple") - mix_model_output1 = gr.Textbox( - label="混合比例调整,单位/%", - interactive = True - ) - mix_mode = gr.Radio(choices=["凸组合", "线性组合"], label="融合模式",value="凸组合",interactive = True) - mix_submit = gr.Button("声线融合启动", variant="primary") - mix_model_output2 = gr.Textbox( - label="Output Message" - ) - mix_model_path.change(updata_mix_info,[mix_model_path],[mix_model_output1]) - mix_model_upload_button.upload(upload_mix_append_file, [mix_model_upload_button,mix_model_path], [mix_model_path,mix_model_output1]) - mix_submit.click(mix_submit_click, [mix_model_output1,mix_mode], [mix_model_output2]) - - with gr.TabItem("模型压缩工具"): - gr.Markdown(value=""" - 该工具可以实现对模型的体积压缩,在**不影响模型推理功能**的情况下,将原本约600M的So-VITS模型压缩至约200M, 大大减少了硬盘的压力。 - **注意:压缩后的模型将无法继续训练,请在确认封炉后再压缩。** - """) - model_to_compress = gr.File(label="模型上传") - compress_model_btn = gr.Button("压缩模型", variant="primary") - compress_model_output = gr.Textbox(label="输出信息", value="") - - compress_model_btn.click(model_compression, [model_to_compress], [compress_model_output]) - - - with gr.Tabs(): - with gr.Row(variant="panel"): - with gr.Column(): - gr.Markdown(value=""" - WebUI设置 - """) - debug_button = gr.Checkbox(label="Debug模式,如果向社区反馈BUG需要打开,打开后控制台可以显示具体错误提示", value=debug) - vc_submit.click(vc_fn, [sid, vc_input3, vc_transform,auto_f0,cluster_ratio, slice_db, noise_scale,pad_seconds,cl_num,lg_num,lgr_num,f0_predictor,enhancer_adaptive_key,cr_threshold,k_step,use_spk_mix,second_encoding,loudness_envelope_adjustment], [vc_output1, vc_output2]) - vc_submit2.click(vc_fn2, [sid, vc_input3, vc_transform,auto_f0,cluster_ratio, slice_db, noise_scale,pad_seconds,cl_num,lg_num,lgr_num,text2tts,tts_rate,tts_voice,f0_predictor,enhancer_adaptive_key,cr_threshold], [vc_output1, vc_output2]) - debug_button.change(debug_change,[],[]) - model_load_button.click(modelAnalysis,[model_path,config_path,cluster_model_path,device,enhance,diff_model_path,diff_config_path,only_diffusion,use_spk_mix],[sid,sid_output]) - model_unload_button.click(modelUnload,[],[sid,sid_output]) - app.launch() - - - diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/data/build.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/data/build.py deleted file mode 100644 index a31369d1693f86154a7a9249fc043d49f3e9f390..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/data/build.py +++ /dev/null @@ -1,542 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import itertools -import logging -import numpy as np -import operator -import pickle -from typing import Any, Callable, Dict, List, Optional, Union -import torch -import torch.utils.data as torchdata -from tabulate import tabulate -from termcolor import colored - -from detectron2.config import configurable -from detectron2.structures import BoxMode -from detectron2.utils.comm import get_world_size -from detectron2.utils.env import seed_all_rng -from detectron2.utils.file_io import PathManager -from detectron2.utils.logger import _log_api_usage, log_first_n - -from .catalog import DatasetCatalog, MetadataCatalog -from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset -from .dataset_mapper import DatasetMapper -from .detection_utils import check_metadata_consistency -from .samplers import ( - InferenceSampler, - RandomSubsetTrainingSampler, - RepeatFactorTrainingSampler, - TrainingSampler, -) - -""" -This file contains the default logic to build a dataloader for training or testing. -""" - -__all__ = [ - "build_batch_data_loader", - "build_detection_train_loader", - "build_detection_test_loader", - "get_detection_dataset_dicts", - "load_proposals_into_dataset", - "print_instances_class_histogram", -] - - -def filter_images_with_only_crowd_annotations(dataset_dicts): - """ - Filter out images with none annotations or only crowd annotations - (i.e., images without non-crowd annotations). - A common training-time preprocessing on COCO dataset. - - Args: - dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. - - Returns: - list[dict]: the same format, but filtered. - """ - num_before = len(dataset_dicts) - - def valid(anns): - for ann in anns: - if ann.get("iscrowd", 0) == 0: - return True - return False - - dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] - num_after = len(dataset_dicts) - logger = logging.getLogger(__name__) - logger.info( - "Removed {} images with no usable annotations. {} images left.".format( - num_before - num_after, num_after - ) - ) - return dataset_dicts - - -def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): - """ - Filter out images with too few number of keypoints. - - Args: - dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. - - Returns: - list[dict]: the same format as dataset_dicts, but filtered. - """ - num_before = len(dataset_dicts) - - def visible_keypoints_in_image(dic): - # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility - annotations = dic["annotations"] - return sum( - (np.array(ann["keypoints"][2::3]) > 0).sum() - for ann in annotations - if "keypoints" in ann - ) - - dataset_dicts = [ - x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image - ] - num_after = len(dataset_dicts) - logger = logging.getLogger(__name__) - logger.info( - "Removed {} images with fewer than {} keypoints.".format( - num_before - num_after, min_keypoints_per_image - ) - ) - return dataset_dicts - - -def load_proposals_into_dataset(dataset_dicts, proposal_file): - """ - Load precomputed object proposals into the dataset. - - The proposal file should be a pickled dict with the following keys: - - - "ids": list[int] or list[str], the image ids - - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id - - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores - corresponding to the boxes. - - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``. - - Args: - dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. - proposal_file (str): file path of pre-computed proposals, in pkl format. - - Returns: - list[dict]: the same format as dataset_dicts, but added proposal field. - """ - logger = logging.getLogger(__name__) - logger.info("Loading proposals from: {}".format(proposal_file)) - - with PathManager.open(proposal_file, "rb") as f: - proposals = pickle.load(f, encoding="latin1") - - # Rename the key names in D1 proposal files - rename_keys = {"indexes": "ids", "scores": "objectness_logits"} - for key in rename_keys: - if key in proposals: - proposals[rename_keys[key]] = proposals.pop(key) - - # Fetch the indexes of all proposals that are in the dataset - # Convert image_id to str since they could be int. - img_ids = set({str(record["image_id"]) for record in dataset_dicts}) - id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids} - - # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS' - bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS - - for record in dataset_dicts: - # Get the index of the proposal - i = id_to_index[str(record["image_id"])] - - boxes = proposals["boxes"][i] - objectness_logits = proposals["objectness_logits"][i] - # Sort the proposals in descending order of the scores - inds = objectness_logits.argsort()[::-1] - record["proposal_boxes"] = boxes[inds] - record["proposal_objectness_logits"] = objectness_logits[inds] - record["proposal_bbox_mode"] = bbox_mode - - return dataset_dicts - - -def print_instances_class_histogram(dataset_dicts, class_names): - """ - Args: - dataset_dicts (list[dict]): list of dataset dicts. - class_names (list[str]): list of class names (zero-indexed). - """ - num_classes = len(class_names) - hist_bins = np.arange(num_classes + 1) - histogram = np.zeros((num_classes,), dtype=np.int) - for entry in dataset_dicts: - annos = entry["annotations"] - classes = np.asarray( - [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=np.int - ) - if len(classes): - assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}" - assert ( - classes.max() < num_classes - ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes" - histogram += np.histogram(classes, bins=hist_bins)[0] - - N_COLS = min(6, len(class_names) * 2) - - def short_name(x): - # make long class names shorter. useful for lvis - if len(x) > 13: - return x[:11] + ".." - return x - - data = list( - itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)]) - ) - total_num_instances = sum(data[1::2]) - data.extend([None] * (N_COLS - (len(data) % N_COLS))) - if num_classes > 1: - data.extend(["total", total_num_instances]) - data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)]) - table = tabulate( - data, - headers=["category", "#instances"] * (N_COLS // 2), - tablefmt="pipe", - numalign="left", - stralign="center", - ) - log_first_n( - logging.INFO, - "Distribution of instances among all {} categories:\n".format(num_classes) - + colored(table, "cyan"), - key="message", - ) - - -def get_detection_dataset_dicts( - names, - filter_empty=True, - min_keypoints=0, - proposal_files=None, - check_consistency=True, -): - """ - Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. - - Args: - names (str or list[str]): a dataset name or a list of dataset names - filter_empty (bool): whether to filter out images without instance annotations - min_keypoints (int): filter out images with fewer keypoints than - `min_keypoints`. Set to 0 to do nothing. - proposal_files (list[str]): if given, a list of object proposal files - that match each dataset in `names`. - check_consistency (bool): whether to check if datasets have consistent metadata. - - Returns: - list[dict]: a list of dicts following the standard dataset dict format. - """ - if isinstance(names, str): - names = [names] - assert len(names), names - dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names] - for dataset_name, dicts in zip(names, dataset_dicts): - assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) - - if proposal_files is not None: - assert len(names) == len(proposal_files) - # load precomputed proposals from proposal files - dataset_dicts = [ - load_proposals_into_dataset(dataset_i_dicts, proposal_file) - for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files) - ] - - if isinstance(dataset_dicts[0], torchdata.Dataset): - return torchdata.ConcatDataset(dataset_dicts) - - dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) - - has_instances = "annotations" in dataset_dicts[0] - if filter_empty and has_instances: - dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) - if min_keypoints > 0 and has_instances: - dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) - - if check_consistency and has_instances: - try: - class_names = MetadataCatalog.get(names[0]).thing_classes - check_metadata_consistency("thing_classes", names) - print_instances_class_histogram(dataset_dicts, class_names) - except AttributeError: # class names are not available for this dataset - pass - - assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names)) - return dataset_dicts - - -def build_batch_data_loader( - dataset, - sampler, - total_batch_size, - *, - aspect_ratio_grouping=False, - num_workers=0, - collate_fn=None, -): - """ - Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are: - 1. support aspect ratio grouping options - 2. use no "batch collation", because this is common for detection training - - Args: - dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset. - sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices. - Must be provided iff. ``dataset`` is a map-style dataset. - total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see - :func:`build_detection_train_loader`. - - Returns: - iterable[list]. Length of each list is the batch size of the current - GPU. Each element in the list comes from the dataset. - """ - world_size = get_world_size() - assert ( - total_batch_size > 0 and total_batch_size % world_size == 0 - ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format( - total_batch_size, world_size - ) - batch_size = total_batch_size // world_size - - if isinstance(dataset, torchdata.IterableDataset): - assert sampler is None, "sampler must be None if dataset is IterableDataset" - else: - dataset = ToIterableDataset(dataset, sampler) - - if aspect_ratio_grouping: - data_loader = torchdata.DataLoader( - dataset, - num_workers=num_workers, - collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements - worker_init_fn=worker_init_reset_seed, - ) # yield individual mapped dict - data_loader = AspectRatioGroupedDataset(data_loader, batch_size) - if collate_fn is None: - return data_loader - return MapDataset(data_loader, collate_fn) - else: - return torchdata.DataLoader( - dataset, - batch_size=batch_size, - drop_last=True, - num_workers=num_workers, - collate_fn=trivial_batch_collator if collate_fn is None else collate_fn, - worker_init_fn=worker_init_reset_seed, - ) - - -def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None): - if dataset is None: - dataset = get_detection_dataset_dicts( - cfg.DATASETS.TRAIN, - filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, - min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE - if cfg.MODEL.KEYPOINT_ON - else 0, - proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, - ) - _log_api_usage("dataset." + cfg.DATASETS.TRAIN[0]) - - if mapper is None: - mapper = DatasetMapper(cfg, True) - - if sampler is None: - sampler_name = cfg.DATALOADER.SAMPLER_TRAIN - logger = logging.getLogger(__name__) - logger.info("Using training sampler {}".format(sampler_name)) - if sampler_name == "TrainingSampler": - sampler = TrainingSampler(len(dataset)) - elif sampler_name == "RepeatFactorTrainingSampler": - repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( - dataset, cfg.DATALOADER.REPEAT_THRESHOLD - ) - sampler = RepeatFactorTrainingSampler(repeat_factors) - elif sampler_name == "RandomSubsetTrainingSampler": - sampler = RandomSubsetTrainingSampler(len(dataset), cfg.DATALOADER.RANDOM_SUBSET_RATIO) - else: - raise ValueError("Unknown training sampler: {}".format(sampler_name)) - - return { - "dataset": dataset, - "sampler": sampler, - "mapper": mapper, - "total_batch_size": cfg.SOLVER.IMS_PER_BATCH, - "aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING, - "num_workers": cfg.DATALOADER.NUM_WORKERS, - } - - -@configurable(from_config=_train_loader_from_config) -def build_detection_train_loader( - dataset, - *, - mapper, - sampler=None, - total_batch_size, - aspect_ratio_grouping=True, - num_workers=0, - collate_fn=None, -): - """ - Build a dataloader for object detection with some default features. - - Args: - dataset (list or torch.utils.data.Dataset): a list of dataset dicts, - or a pytorch dataset (either map-style or iterable). It can be obtained - by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. - mapper (callable): a callable which takes a sample (dict) from dataset and - returns the format to be consumed by the model. - When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``. - sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces - indices to be applied on ``dataset``. - If ``dataset`` is map-style, the default sampler is a :class:`TrainingSampler`, - which coordinates an infinite random shuffle sequence across all workers. - Sampler must be None if ``dataset`` is iterable. - total_batch_size (int): total batch size across all workers. - aspect_ratio_grouping (bool): whether to group images with similar - aspect ratio for efficiency. When enabled, it requires each - element in dataset be a dict with keys "width" and "height". - num_workers (int): number of parallel data loading workers - collate_fn: a function that determines how to do batching, same as the argument of - `torch.utils.data.DataLoader`. Defaults to do no collation and return a list of - data. No collation is OK for small batch size and simple data structures. - If your batch size is large and each sample contains too many small tensors, - it's more efficient to collate them in data loader. - - Returns: - torch.utils.data.DataLoader: - a dataloader. Each output from it is a ``list[mapped_element]`` of length - ``total_batch_size / num_workers``, where ``mapped_element`` is produced - by the ``mapper``. - """ - if isinstance(dataset, list): - dataset = DatasetFromList(dataset, copy=False) - if mapper is not None: - dataset = MapDataset(dataset, mapper) - - if isinstance(dataset, torchdata.IterableDataset): - assert sampler is None, "sampler must be None if dataset is IterableDataset" - else: - if sampler is None: - sampler = TrainingSampler(len(dataset)) - assert isinstance(sampler, torchdata.Sampler), f"Expect a Sampler but got {type(sampler)}" - return build_batch_data_loader( - dataset, - sampler, - total_batch_size, - aspect_ratio_grouping=aspect_ratio_grouping, - num_workers=num_workers, - collate_fn=collate_fn, - ) - - -def _test_loader_from_config(cfg, dataset_name, mapper=None): - """ - Uses the given `dataset_name` argument (instead of the names in cfg), because the - standard practice is to evaluate each test set individually (not combining them). - """ - if isinstance(dataset_name, str): - dataset_name = [dataset_name] - - dataset = get_detection_dataset_dicts( - dataset_name, - filter_empty=False, - proposal_files=[ - cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name - ] - if cfg.MODEL.LOAD_PROPOSALS - else None, - ) - if mapper is None: - mapper = DatasetMapper(cfg, False) - return { - "dataset": dataset, - "mapper": mapper, - "num_workers": cfg.DATALOADER.NUM_WORKERS, - "sampler": InferenceSampler(len(dataset)), - } - - -@configurable(from_config=_test_loader_from_config) -def build_detection_test_loader( - dataset: Union[List[Any], torchdata.Dataset], - *, - mapper: Callable[[Dict[str, Any]], Any], - sampler: Optional[torchdata.Sampler] = None, - batch_size: int = 1, - num_workers: int = 0, - collate_fn: Optional[Callable[[List[Any]], Any]] = None, -) -> torchdata.DataLoader: - """ - Similar to `build_detection_train_loader`, with default batch size = 1, - and sampler = :class:`InferenceSampler`. This sampler coordinates all workers - to produce the exact set of all samples. - - Args: - dataset: a list of dataset dicts, - or a pytorch dataset (either map-style or iterable). They can be obtained - by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. - mapper: a callable which takes a sample (dict) from dataset - and returns the format to be consumed by the model. - When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``. - sampler: a sampler that produces - indices to be applied on ``dataset``. Default to :class:`InferenceSampler`, - which splits the dataset across all workers. Sampler must be None - if `dataset` is iterable. - batch_size: the batch size of the data loader to be created. - Default to 1 image per worker since this is the standard when reporting - inference time in papers. - num_workers: number of parallel data loading workers - collate_fn: same as the argument of `torch.utils.data.DataLoader`. - Defaults to do no collation and return a list of data. - - Returns: - DataLoader: a torch DataLoader, that loads the given detection - dataset, with test-time transformation and batching. - - Examples: - :: - data_loader = build_detection_test_loader( - DatasetRegistry.get("my_test"), - mapper=DatasetMapper(...)) - - # or, instantiate with a CfgNode: - data_loader = build_detection_test_loader(cfg, "my_test") - """ - if isinstance(dataset, list): - dataset = DatasetFromList(dataset, copy=False) - if mapper is not None: - dataset = MapDataset(dataset, mapper) - if isinstance(dataset, torchdata.IterableDataset): - assert sampler is None, "sampler must be None if dataset is IterableDataset" - else: - if sampler is None: - sampler = InferenceSampler(len(dataset)) - return torchdata.DataLoader( - dataset, - batch_size=batch_size, - sampler=sampler, - drop_last=False, - num_workers=num_workers, - collate_fn=trivial_batch_collator if collate_fn is None else collate_fn, - ) - - -def trivial_batch_collator(batch): - """ - A batch collator that does nothing. - """ - return batch - - -def worker_init_reset_seed(worker_id): - initial_seed = torch.initial_seed() % 2 ** 31 - seed_all_rng(initial_seed + worker_id) diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/export/caffe2_inference.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/export/caffe2_inference.py deleted file mode 100644 index deb886c0417285ed1d5ad85eb941fa1ac757cdab..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/export/caffe2_inference.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -import logging -import numpy as np -from itertools import count -import torch -from caffe2.proto import caffe2_pb2 -from caffe2.python import core - -from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format -from .shared import ScopedWS, get_pb_arg_vali, get_pb_arg_vals, infer_device_type - -logger = logging.getLogger(__name__) - - -# ===== ref: mobile-vision predictor's 'Caffe2Wrapper' class ====== -class ProtobufModel(torch.nn.Module): - """ - Wrapper of a caffe2's protobuf model. - It works just like nn.Module, but running caffe2 under the hood. - Input/Output are tuple[tensor] that match the caffe2 net's external_input/output. - """ - - _ids = count(0) - - def __init__(self, predict_net, init_net): - logger.info(f"Initializing ProtobufModel for: {predict_net.name} ...") - super().__init__() - assert isinstance(predict_net, caffe2_pb2.NetDef) - assert isinstance(init_net, caffe2_pb2.NetDef) - # create unique temporary workspace for each instance - self.ws_name = "__tmp_ProtobufModel_{}__".format(next(self._ids)) - self.net = core.Net(predict_net) - - logger.info("Running init_net once to fill the parameters ...") - with ScopedWS(self.ws_name, is_reset=True, is_cleanup=False) as ws: - ws.RunNetOnce(init_net) - uninitialized_external_input = [] - for blob in self.net.Proto().external_input: - if blob not in ws.Blobs(): - uninitialized_external_input.append(blob) - ws.CreateBlob(blob) - ws.CreateNet(self.net) - - self._error_msgs = set() - self._input_blobs = uninitialized_external_input - - def _infer_output_devices(self, inputs): - """ - Returns: - list[str]: list of device for each external output - """ - - def _get_device_type(torch_tensor): - assert torch_tensor.device.type in ["cpu", "cuda"] - assert torch_tensor.device.index == 0 - return torch_tensor.device.type - - predict_net = self.net.Proto() - input_device_types = { - (name, 0): _get_device_type(tensor) for name, tensor in zip(self._input_blobs, inputs) - } - device_type_map = infer_device_type( - predict_net, known_status=input_device_types, device_name_style="pytorch" - ) - ssa, versions = core.get_ssa(predict_net) - versioned_outputs = [(name, versions[name]) for name in predict_net.external_output] - output_devices = [device_type_map[outp] for outp in versioned_outputs] - return output_devices - - def forward(self, inputs): - """ - Args: - inputs (tuple[torch.Tensor]) - - Returns: - tuple[torch.Tensor] - """ - assert len(inputs) == len(self._input_blobs), ( - f"Length of inputs ({len(inputs)}) " - f"doesn't match the required input blobs: {self._input_blobs}" - ) - - with ScopedWS(self.ws_name, is_reset=False, is_cleanup=False) as ws: - for b, tensor in zip(self._input_blobs, inputs): - ws.FeedBlob(b, tensor) - - try: - ws.RunNet(self.net.Proto().name) - except RuntimeError as e: - if not str(e) in self._error_msgs: - self._error_msgs.add(str(e)) - logger.warning("Encountered new RuntimeError: \n{}".format(str(e))) - logger.warning("Catch the error and use partial results.") - - c2_outputs = [ws.FetchBlob(b) for b in self.net.Proto().external_output] - # Remove outputs of current run, this is necessary in order to - # prevent fetching the result from previous run if the model fails - # in the middle. - for b in self.net.Proto().external_output: - # Needs to create uninitialized blob to make the net runable. - # This is "equivalent" to: ws.RemoveBlob(b) then ws.CreateBlob(b), - # but there'no such API. - ws.FeedBlob(b, f"{b}, a C++ native class of type nullptr (uninitialized).") - - # Cast output to torch.Tensor on the desired device - output_devices = ( - self._infer_output_devices(inputs) - if any(t.device.type != "cpu" for t in inputs) - else ["cpu" for _ in self.net.Proto().external_output] - ) - - outputs = [] - for name, c2_output, device in zip( - self.net.Proto().external_output, c2_outputs, output_devices - ): - if not isinstance(c2_output, np.ndarray): - raise RuntimeError( - "Invalid output for blob {}, received: {}".format(name, c2_output) - ) - outputs.append(torch.tensor(c2_output).to(device=device)) - return tuple(outputs) - - -class ProtobufDetectionModel(torch.nn.Module): - """ - A class works just like a pytorch meta arch in terms of inference, but running - caffe2 model under the hood. - """ - - def __init__(self, predict_net, init_net, *, convert_outputs=None): - """ - Args: - predict_net, init_net (core.Net): caffe2 nets - convert_outptus (callable): a function that converts caffe2 - outputs to the same format of the original pytorch model. - By default, use the one defined in the caffe2 meta_arch. - """ - super().__init__() - self.protobuf_model = ProtobufModel(predict_net, init_net) - self.size_divisibility = get_pb_arg_vali(predict_net, "size_divisibility", 0) - self.device = get_pb_arg_vals(predict_net, "device", b"cpu").decode("ascii") - - if convert_outputs is None: - meta_arch = get_pb_arg_vals(predict_net, "meta_architecture", b"GeneralizedRCNN") - meta_arch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_arch.decode("ascii")] - self._convert_outputs = meta_arch.get_outputs_converter(predict_net, init_net) - else: - self._convert_outputs = convert_outputs - - def _convert_inputs(self, batched_inputs): - # currently all models convert inputs in the same way - return convert_batched_inputs_to_c2_format( - batched_inputs, self.size_divisibility, self.device - ) - - def forward(self, batched_inputs): - c2_inputs = self._convert_inputs(batched_inputs) - c2_results = self.protobuf_model(c2_inputs) - c2_results = dict(zip(self.protobuf_model.net.Proto().external_output, c2_results)) - return self._convert_outputs(batched_inputs, c2_inputs, c2_results) diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/layers/mask_ops.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/layers/mask_ops.py deleted file mode 100644 index e7a9f3a323ddbe75845b668ee6b40c5385d206c3..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/layers/mask_ops.py +++ /dev/null @@ -1,275 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import numpy as np -from typing import Tuple -import torch -from PIL import Image -from torch.nn import functional as F - -__all__ = ["paste_masks_in_image"] - - -BYTES_PER_FLOAT = 4 -# TODO: This memory limit may be too much or too little. It would be better to -# determine it based on available resources. -GPU_MEM_LIMIT = 1024 ** 3 # 1 GB memory limit - - -def _do_paste_mask(masks, boxes, img_h: int, img_w: int, skip_empty: bool = True): - """ - Args: - masks: N, 1, H, W - boxes: N, 4 - img_h, img_w (int): - skip_empty (bool): only paste masks within the region that - tightly bound all boxes, and returns the results this region only. - An important optimization for CPU. - - Returns: - if skip_empty == False, a mask of shape (N, img_h, img_w) - if skip_empty == True, a mask of shape (N, h', w'), and the slice - object for the corresponding region. - """ - # On GPU, paste all masks together (up to chunk size) - # by using the entire image to sample the masks - # Compared to pasting them one by one, - # this has more operations but is faster on COCO-scale dataset. - device = masks.device - - if skip_empty and not torch.jit.is_scripting(): - x0_int, y0_int = torch.clamp(boxes.min(dim=0).values.floor()[:2] - 1, min=0).to( - dtype=torch.int32 - ) - x1_int = torch.clamp(boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) - y1_int = torch.clamp(boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) - else: - x0_int, y0_int = 0, 0 - x1_int, y1_int = img_w, img_h - x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 - - N = masks.shape[0] - - img_y = torch.arange(y0_int, y1_int, device=device, dtype=torch.float32) + 0.5 - img_x = torch.arange(x0_int, x1_int, device=device, dtype=torch.float32) + 0.5 - img_y = (img_y - y0) / (y1 - y0) * 2 - 1 - img_x = (img_x - x0) / (x1 - x0) * 2 - 1 - # img_x, img_y have shapes (N, w), (N, h) - - gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) - gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) - grid = torch.stack([gx, gy], dim=3) - - if not torch.jit.is_scripting(): - if not masks.dtype.is_floating_point: - masks = masks.float() - img_masks = F.grid_sample(masks, grid.to(masks.dtype), align_corners=False) - - if skip_empty and not torch.jit.is_scripting(): - return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) - else: - return img_masks[:, 0], () - - -# Annotate boxes as Tensor (but not Boxes) in order to use scripting -@torch.jit.script_if_tracing -def paste_masks_in_image( - masks: torch.Tensor, boxes: torch.Tensor, image_shape: Tuple[int, int], threshold: float = 0.5 -): - """ - Paste a set of masks that are of a fixed resolution (e.g., 28 x 28) into an image. - The location, height, and width for pasting each mask is determined by their - corresponding bounding boxes in boxes. - - Note: - This is a complicated but more accurate implementation. In actual deployment, it is - often enough to use a faster but less accurate implementation. - See :func:`paste_mask_in_image_old` in this file for an alternative implementation. - - Args: - masks (tensor): Tensor of shape (Bimg, Hmask, Wmask), where Bimg is the number of - detected object instances in the image and Hmask, Wmask are the mask width and mask - height of the predicted mask (e.g., Hmask = Wmask = 28). Values are in [0, 1]. - boxes (Boxes or Tensor): A Boxes of length Bimg or Tensor of shape (Bimg, 4). - boxes[i] and masks[i] correspond to the same object instance. - image_shape (tuple): height, width - threshold (float): A threshold in [0, 1] for converting the (soft) masks to - binary masks. - - Returns: - img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the - number of detected object instances and Himage, Wimage are the image width - and height. img_masks[i] is a binary mask for object instance i. - """ - - assert masks.shape[-1] == masks.shape[-2], "Only square mask predictions are supported" - N = len(masks) - if N == 0: - return masks.new_empty((0,) + image_shape, dtype=torch.uint8) - if not isinstance(boxes, torch.Tensor): - boxes = boxes.tensor - device = boxes.device - assert len(boxes) == N, boxes.shape - - img_h, img_w = image_shape - - # The actual implementation split the input into chunks, - # and paste them chunk by chunk. - if device.type == "cpu" or torch.jit.is_scripting(): - # CPU is most efficient when they are pasted one by one with skip_empty=True - # so that it performs minimal number of operations. - num_chunks = N - else: - # GPU benefits from parallelism for larger chunks, but may have memory issue - # int(img_h) because shape may be tensors in tracing - num_chunks = int(np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) - assert ( - num_chunks <= N - ), "Default GPU_MEM_LIMIT in mask_ops.py is too small; try increasing it" - chunks = torch.chunk(torch.arange(N, device=device), num_chunks) - - img_masks = torch.zeros( - N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8 - ) - for inds in chunks: - masks_chunk, spatial_inds = _do_paste_mask( - masks[inds, None, :, :], boxes[inds], img_h, img_w, skip_empty=device.type == "cpu" - ) - - if threshold >= 0: - masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) - else: - # for visualization and debugging - masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) - - if torch.jit.is_scripting(): # Scripting does not use the optimized codepath - img_masks[inds] = masks_chunk - else: - img_masks[(inds,) + spatial_inds] = masks_chunk - return img_masks - - -# The below are the original paste function (from Detectron1) which has -# larger quantization error. -# It is faster on CPU, while the aligned one is faster on GPU thanks to grid_sample. - - -def paste_mask_in_image_old(mask, box, img_h, img_w, threshold): - """ - Paste a single mask in an image. - This is a per-box implementation of :func:`paste_masks_in_image`. - This function has larger quantization error due to incorrect pixel - modeling and is not used any more. - - Args: - mask (Tensor): A tensor of shape (Hmask, Wmask) storing the mask of a single - object instance. Values are in [0, 1]. - box (Tensor): A tensor of shape (4, ) storing the x0, y0, x1, y1 box corners - of the object instance. - img_h, img_w (int): Image height and width. - threshold (float): Mask binarization threshold in [0, 1]. - - Returns: - im_mask (Tensor): - The resized and binarized object mask pasted into the original - image plane (a tensor of shape (img_h, img_w)). - """ - # Conversion from continuous box coordinates to discrete pixel coordinates - # via truncation (cast to int32). This determines which pixels to paste the - # mask onto. - box = box.to(dtype=torch.int32) # Continuous to discrete coordinate conversion - # An example (1D) box with continuous coordinates (x0=0.7, x1=4.3) will map to - # a discrete coordinates (x0=0, x1=4). Note that box is mapped to 5 = x1 - x0 + 1 - # pixels (not x1 - x0 pixels). - samples_w = box[2] - box[0] + 1 # Number of pixel samples, *not* geometric width - samples_h = box[3] - box[1] + 1 # Number of pixel samples, *not* geometric height - - # Resample the mask from it's original grid to the new samples_w x samples_h grid - mask = Image.fromarray(mask.cpu().numpy()) - mask = mask.resize((samples_w, samples_h), resample=Image.BILINEAR) - mask = np.array(mask, copy=False) - - if threshold >= 0: - mask = np.array(mask > threshold, dtype=np.uint8) - mask = torch.from_numpy(mask) - else: - # for visualization and debugging, we also - # allow it to return an unmodified mask - mask = torch.from_numpy(mask * 255).to(torch.uint8) - - im_mask = torch.zeros((img_h, img_w), dtype=torch.uint8) - x_0 = max(box[0], 0) - x_1 = min(box[2] + 1, img_w) - y_0 = max(box[1], 0) - y_1 = min(box[3] + 1, img_h) - - im_mask[y_0:y_1, x_0:x_1] = mask[ - (y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0]) - ] - return im_mask - - -# Our pixel modeling requires extrapolation for any continuous -# coordinate < 0.5 or > length - 0.5. When sampling pixels on the masks, -# we would like this extrapolation to be an interpolation between boundary values and zero, -# instead of using absolute zero or boundary values. -# Therefore `paste_mask_in_image_old` is often used with zero padding around the masks like this: -# masks, scale = pad_masks(masks[:, 0, :, :], 1) -# boxes = scale_boxes(boxes.tensor, scale) - - -def pad_masks(masks, padding): - """ - Args: - masks (tensor): A tensor of shape (B, M, M) representing B masks. - padding (int): Number of cells to pad on all sides. - - Returns: - The padded masks and the scale factor of the padding size / original size. - """ - B = masks.shape[0] - M = masks.shape[-1] - pad2 = 2 * padding - scale = float(M + pad2) / M - padded_masks = masks.new_zeros((B, M + pad2, M + pad2)) - padded_masks[:, padding:-padding, padding:-padding] = masks - return padded_masks, scale - - -def scale_boxes(boxes, scale): - """ - Args: - boxes (tensor): A tensor of shape (B, 4) representing B boxes with 4 - coords representing the corners x0, y0, x1, y1, - scale (float): The box scaling factor. - - Returns: - Scaled boxes. - """ - w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5 - h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5 - x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5 - y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5 - - w_half *= scale - h_half *= scale - - scaled_boxes = torch.zeros_like(boxes) - scaled_boxes[:, 0] = x_c - w_half - scaled_boxes[:, 2] = x_c + w_half - scaled_boxes[:, 1] = y_c - h_half - scaled_boxes[:, 3] = y_c + h_half - return scaled_boxes - - -@torch.jit.script_if_tracing -def _paste_masks_tensor_shape( - masks: torch.Tensor, - boxes: torch.Tensor, - image_shape: Tuple[torch.Tensor, torch.Tensor], - threshold: float = 0.5, -): - """ - A wrapper of paste_masks_in_image where image_shape is Tensor. - During tracing, shapes might be tensors instead of ints. The Tensor->int - conversion should be scripted rather than traced. - """ - return paste_masks_in_image(masks, boxes, (int(image_shape[0]), int(image_shape[1])), threshold) diff --git a/spaces/ysharma/OSChatbots_ChatGPT_ToeToToe/README.md b/spaces/ysharma/OSChatbots_ChatGPT_ToeToToe/README.md deleted file mode 100644 index 063daee6fdabcc9c2ba69522bf68ccc2c3c8f9d4..0000000000000000000000000000000000000000 --- a/spaces/ysharma/OSChatbots_ChatGPT_ToeToToe/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: OSChatbots ChatGPT ToeToToe -emoji: 🏢 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/yuan1615/EmpathyTTS/app.py b/spaces/yuan1615/EmpathyTTS/app.py deleted file mode 100644 index 9c9be92a9086ee1d786130c7f1923ef3f00dfbd0..0000000000000000000000000000000000000000 --- a/spaces/yuan1615/EmpathyTTS/app.py +++ /dev/null @@ -1,80 +0,0 @@ -import argparse -from models import SynthesizerTrn -from text.symbols import symbols -import utils -import gradio as gr -import tempfile -from scipy.io import wavfile -import numpy as np -import re -from synthesize_fastapi import * -config = "./configs/baker_base.json" - -import subprocess -def install(name): - subprocess.call(['pip', 'install', 'packages/ttsfrd-0.0.4-cp38-cp38-linux_x86_64.whl']) -install('ttsfrd') - -import ttsfrd - -print("---------- Loading VITS Model ----------") -hps = utils.get_hparams_from_file(config) -net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model) -_ = net_g.eval() - -_ = utils.load_checkpoint("./ckpt/pretrained_baker.pth", net_g, None) - -lexicon_mandarin = read_lexicon("./lexicon/pinyin-lexicon-r.txt") - -# 解压文件 -import zipfile -f = zipfile.ZipFile("./resource.zip", 'r') # 压缩文件位置 -for file in f.namelist(): - f.extract(file, "./") # 解压位置 -f.close() - -frontend = ttsfrd.TtsFrontendEngine() -model_dir = './resource' -frontend.initialize(model_dir) -frontend.set_lang_type('zhcn') - - -def tts(text): - audio_all = np.zeros(1, dtype=np.int16) # 设置初始音频 - # 删除英文 - text = re.sub('[a-zA-Z]', '', text) - text = text.replace(',[object Object]', '') - pinyin_list, prosody_list = g2p_mandarin(frontend, text) - for texts, phone_prosody in zip(pinyin_list, prosody_list): - print(texts) - print(phone_prosody) - stn_tst = get_text(texts, hps) - prosody = get_prosody(phone_prosody, hps) - with torch.no_grad(): - x_tst = stn_tst.unsqueeze(0) - prosody = prosody.unsqueeze(0) - x_tst_lengths = torch.LongTensor([stn_tst.size(0)]) - audio = net_g.infer(x_tst, x_tst_lengths, prosody, noise_scale=.667, noise_scale_w=0.0, length_scale=1)[0][ - 0, 0].data.cpu().float().numpy() * 32767.0 - i = np.random.uniform(0.12, 0.35, 1)[0] - space_time = np.zeros(int(i * 22050), dtype=np.int16) - audio_all = np.concatenate((audio_all, audio, space_time)) - with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp: - wavfile.write( - fp.name, - 22050, - audio_all.astype(np.int16), - ) - return fp.name - - -inputs = [gr.inputs.Textbox(label="Input Text", default='国务院银行业监督管理机构会按照国家法律法规规定的程序处理', lines=6)] -outputs = gr.Audio(label="Output") -interface = gr.Interface(fn=tts, inputs=inputs, outputs=outputs, - title='Empathy-TTS', - description='Note: This space is running on CPU, inference times will be higher.') -interface.launch(server_name='0.0.0.0') diff --git a/spaces/yuezih/BLIP-SMILE/SMILE/BLIP/train_caption.py b/spaces/yuezih/BLIP-SMILE/SMILE/BLIP/train_caption.py deleted file mode 100644 index efdf7bb052c43af2c8fab5e8b72f974534fbdfbc..0000000000000000000000000000000000000000 --- a/spaces/yuezih/BLIP-SMILE/SMILE/BLIP/train_caption.py +++ /dev/null @@ -1,221 +0,0 @@ -''' - * Copyright (c) 2022, salesforce.com, inc. - * All rights reserved. - * SPDX-License-Identifier: BSD-3-Clause - * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause - * By Junnan Li - - * Modified by Zihao Yue -''' - -import argparse -import os -try: - import ruamel_yaml as yaml -except: - import ruamel.yaml as yaml -import numpy as np -import random -import time -import datetime -import json -from pathlib import Path - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.backends.cudnn as cudnn -import torch.distributed as dist -from torch.utils.data import DataLoader - -from models.model import caption_model -import utils -from utils import warmup_lr_schedule, step_lr_schedule, cosine_lr_schedule -from data import create_dataset, create_sampler, create_loader -from data.utils import save_result, coco_caption_eval - -def train(model, data_loader, optimizer, epoch, device): - # train - model.train() - - metric_logger = utils.MetricLogger(delimiter=" ") - metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) - metric_logger.add_meter('loss', utils.SmoothedValue(window_size=1, fmt='{value:.4f}')) - header = 'Train Caption Epoch: [{}]'.format(epoch) - print_freq = 50 - - for i, (image, caption, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)): - image = image.to(device) - - loss = model(image, caption) - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - metric_logger.update(loss=loss.item()) - metric_logger.update(lr=optimizer.param_groups[0]["lr"]) - - # gather the stats from all processes - metric_logger.synchronize_between_processes() - print("Averaged stats:", metric_logger.global_avg()) - return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()} - - -@torch.no_grad() -def evaluate(model, data_loader, device, config): - # evaluate - model.eval() - - metric_logger = utils.MetricLogger(delimiter=" ") - header = 'Caption generation:' - print_freq = 10 - - result = [] - for image, image_id in metric_logger.log_every(data_loader, print_freq, header): - - image = image.to(device) - captions = model.generate(image, sample=False, num_beams=config['num_beams'], max_length=config['max_length'], min_length=config['min_length']) - - for caption, img_id in zip(captions, image_id): - result.append({"image_id": img_id.item(), "caption": caption}) - - return result - - -def main(args, config): - utils.init_distributed_mode(args) - - device = torch.device(args.device) - - # fix the seed for reproducibility - seed = args.seed + utils.get_rank() - torch.manual_seed(seed) - np.random.seed(seed) - random.seed(seed) - cudnn.benchmark = True - - #### Dataset #### - print("Creating captioning dataset") - train_dataset, val_dataset, test_dataset = create_dataset('caption_coco', config) - - if args.distributed: - num_tasks = utils.get_world_size() - global_rank = utils.get_rank() - samplers = create_sampler([train_dataset,val_dataset,test_dataset], [True,False,False], num_tasks, global_rank) - else: - samplers = [None, None, None] - - train_loader, val_loader, test_loader = create_loader([train_dataset, val_dataset, test_dataset],samplers, - batch_size=[config['batch_size']]*3,num_workers=[4,4,4], - is_trains=[True, False, False], collate_fns=[None,None,None]) - - #### Model #### - print("Creating model") - model = caption_model(pretrained=config['pretrained'], image_size=config['image_size'], vit=config['vit'], - vit_grad_ckpt=config['vit_grad_ckpt'], vit_ckpt_layer=config['vit_ckpt_layer'], - prompt=config['prompt']) - - model = model.to(device) - - model_without_ddp = model - if args.distributed: - model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) - model_without_ddp = model.module - - optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay']) - - best = 0 - best_epoch = 0 - - print("Start training") - start_time = time.time() - for epoch in range(0, config['max_epoch']): - if not args.evaluate: - if args.distributed: - train_loader.sampler.set_epoch(epoch) - - cosine_lr_schedule(optimizer, epoch, config['max_epoch'], config['init_lr'], config['min_lr']) - - train_stats = train(model, train_loader, optimizer, epoch, device) - - if args.eval_split == 'val' or not args.evaluate: - val_result = evaluate(model_without_ddp, val_loader, device, config) - val_result_file = save_result(val_result, args.result_dir, 'val_epoch%d'%epoch, remove_duplicate='image_id') - else: - test_result = evaluate(model_without_ddp, test_loader, device, config) - test_result_file = save_result(test_result, args.result_dir, 'test_epoch%d'%epoch, remove_duplicate='image_id') - - if utils.is_main_process(): - - if args.eval_split == 'val' or not args.evaluate: - coco_val = coco_caption_eval(config['coco_gt_root'],val_result_file,'val') - else: - coco_test = coco_caption_eval(config['coco_gt_root'],test_result_file,'test') - - if args.evaluate: - if args.eval_split == 'val': - log_stats = { - **{f'val_{k}': v for k, v in coco_val.eval.items()}, - } - else: - log_stats = { - **{f'test_{k}': v for k, v in coco_test.eval.items()}, - } - with open(os.path.join(args.output_dir, "evaluate.txt"),"a") as f: - f.write(json.dumps(log_stats) + "\n") - else: - save_obj = { - 'model': model_without_ddp.state_dict(), - 'optimizer': optimizer.state_dict(), - 'config': config, - 'epoch': epoch, - } - - if coco_val.eval['CIDEr'] + coco_val.eval['Bleu_4'] > best: - best = coco_val.eval['CIDEr'] + coco_val.eval['Bleu_4'] - best_epoch = epoch - # torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_best.pth')) - # save each epoch - torch.save(save_obj, os.path.join(args.output_dir, 'epoch%d.pth'%epoch)) - - log_stats = {**{f'train_{k}': float(v) for k, v in train_stats.items()}, - **{f'val_{k}': v for k, v in coco_val.eval.items()}, - 'epoch': epoch, - 'best_epoch': best_epoch, - } - with open(os.path.join(args.output_dir, "log.txt"),"a") as f: - f.write(json.dumps(log_stats) + "\n") - - if args.evaluate: - break - dist.barrier() - - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print('Training time {}'.format(total_time_str)) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--config', default='./configs/caption_coco.yaml') - parser.add_argument('--output_dir', default='output/caption_coco') - parser.add_argument('--evaluate', action='store_true') - parser.add_argument('--device', default='cuda') - parser.add_argument('--seed', default=42, type=int) - parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') - parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') - parser.add_argument('--distributed', default=True, type=bool) - parser.add_argument('--eval_split', default='val', type=str) - args = parser.parse_args() - - config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader) - - args.result_dir = os.path.join(args.output_dir, 'result') - - Path(args.output_dir).mkdir(parents=True, exist_ok=True) - Path(args.result_dir).mkdir(parents=True, exist_ok=True) - - yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w')) - - main(args, config) \ No newline at end of file diff --git a/spaces/z-uo/streamlit_music_demo/app.py b/spaces/z-uo/streamlit_music_demo/app.py deleted file mode 100644 index 64beee86a5c60aec8301814f73f542e6ae1098a9..0000000000000000000000000000000000000000 --- a/spaces/z-uo/streamlit_music_demo/app.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -import io - -import streamlit as st -from PIL import Image - -import pretty_midi -import music21 -import note_seq -from scipy.io import wavfile - -import numpy as np - -st.title('Music Demo') -st.markdown('This example need a .xml music file as input and show partiture, piano roll and a simply wav. You can generate it from MuseScore or similar software.') - -example_1 = st.button('Test Example') - -uploaded_file = st.file_uploader("Choose a .xml music file", type='xml') - -if uploaded_file is not None or example_1: - if uploaded_file is not None: - # save file - with open(os.path.join(os.getcwd(), uploaded_file.name), "wb") as f: - f.write(uploaded_file.getbuffer()) - file_path = os.path.join(os.getcwd(), uploaded_file.name) - else: - file_path = 'examples/test.xml' - - # Show partiture with music21 - music = music21.converter.parse(file_path) - streaming_partiture = str(music.write('lily.png')) - image = Image.open(streaming_partiture) - st.text('Partiture') - st.image(image) - - # save to mid - midi_path = file_path + '.mid' - music.write('midi', fp=midi_path) - - # plot piano rol - unconditional_ns = note_seq.midi_file_to_note_sequence(midi_path) - piano_roll = note_seq.plot_sequence(unconditional_ns, show_figure=False) - st.text('Piano Roll') - st.bokeh_chart(piano_roll, use_container_width=True) - - # generate wav - with st.spinner(f"Sinthezizing to wav"): - midi_data = pretty_midi.PrettyMIDI(midi_path) - audio_data = midi_data.synthesize() - audio_data = np.int16( - audio_data / np.max(np.abs(audio_data)) * 32767 * 0.9 - ) # -- Normalize for 16 bit audio https://github.com/jkanner/streamlit-audio/blob/main/helper.py - virtualfile = io.BytesIO() - wavfile.write(virtualfile, 44100, audio_data) - - st.audio(virtualfile) - st.markdown("Download the audio by right-clicking on the media player") diff --git a/spaces/zhan66/vits-simple-api/bert_vits2/text/symbols.py b/spaces/zhan66/vits-simple-api/bert_vits2/text/symbols.py deleted file mode 100644 index 0639a3e07665f9fb0e26f7002c811b987f161ba1..0000000000000000000000000000000000000000 --- a/spaces/zhan66/vits-simple-api/bert_vits2/text/symbols.py +++ /dev/null @@ -1,198 +0,0 @@ -punctuation = ["!", "?", "…", ",", ".", "'", "-"] -pu_symbols = punctuation + ["SP", "UNK"] -pad = "_" - -# chinese -zh_symbols = [ - "E", - "En", - "a", - "ai", - "an", - "ang", - "ao", - "b", - "c", - "ch", - "d", - "e", - "ei", - "en", - "eng", - "er", - "f", - "g", - "h", - "i", - "i0", - "ia", - "ian", - "iang", - "iao", - "ie", - "in", - "ing", - "iong", - "ir", - "iu", - "j", - "k", - "l", - "m", - "n", - "o", - "ong", - "ou", - "p", - "q", - "r", - "s", - "sh", - "t", - "u", - "ua", - "uai", - "uan", - "uang", - "ui", - "un", - "uo", - "v", - "van", - "ve", - "vn", - "w", - "x", - "y", - "z", - "zh", - "AA", - "EE", - "OO", -] -num_zh_tones = 6 - -# japanese -ja_symbols_legacy = ['I', 'N', 'U', 'a', 'b', 'by', 'ch', 'cl', 'd', 'dy', 'e', 'f', 'g', 'gy', 'h', 'hy', 'i', 'j', - 'k', 'ky', - 'm', 'my', 'n', 'ny', 'o', 'p', 'py', 'r', 'ry', 's', 'sh', 't', 'ts', 'u', 'V', 'w', 'y', 'z'] -ja_symbols = [ - "N", - "a", - "a:", - "b", - "by", - "ch", - "d", - "dy", - "e", - "e:", - "f", - "g", - "gy", - "h", - "hy", - "i", - "i:", - "j", - "k", - "ky", - "m", - "my", - "n", - "ny", - "o", - "o:", - "p", - "py", - "q", - "r", - "ry", - "s", - "sh", - "t", - "ts", - "ty", - "u", - "u:", - "w", - "y", - "z", - "zy", -] -num_ja_tones = 1 - -# English -en_symbols = [ - "aa", - "ae", - "ah", - "ao", - "aw", - "ay", - "b", - "ch", - "d", - "dh", - "eh", - "er", - "ey", - "f", - "g", - "hh", - "ih", - "iy", - "jh", - "k", - "l", - "m", - "n", - "ng", - "ow", - "oy", - "p", - "r", - "s", - "sh", - "t", - "th", - "uh", - "uw", - "V", - "w", - "y", - "z", - "zh", -] -num_en_tones = 4 - -normal_symbols = sorted(set(zh_symbols + ja_symbols + en_symbols)) -symbols = [pad] + normal_symbols + pu_symbols -sil_phonemes_ids = [symbols.index(i) for i in pu_symbols] - -# legacy -normal_symbols_legacy = sorted(set(zh_symbols + ja_symbols_legacy + en_symbols)) -symbols_legacy = [pad] + normal_symbols_legacy + pu_symbols -sil_phonemes_ids_legacy = [symbols_legacy.index(i) for i in pu_symbols] - -# combine all tones -num_tones = num_zh_tones + num_ja_tones + num_en_tones - -# language maps -language_id_map = {"zh": 0, "ja": 1, "en": 2} -num_languages = len(language_id_map.keys()) - -language_tone_start_map = { - "zh": 0, - "ja": num_zh_tones, - "en": num_zh_tones + num_ja_tones, -} - -if __name__ == "__main__": - zh = set(zh_symbols) - en = set(en_symbols) - ja = set(ja_symbols) - print(zh) - print(en) - print(ja) - print(sorted(zh & en)) diff --git a/spaces/zhaoys/wfms-kuiwenc/src/components/chat-progress.tsx b/spaces/zhaoys/wfms-kuiwenc/src/components/chat-progress.tsx deleted file mode 100644 index 58101fd7e640b82ea00a54235ad25686dbe36864..0000000000000000000000000000000000000000 --- a/spaces/zhaoys/wfms-kuiwenc/src/components/chat-progress.tsx +++ /dev/null @@ -1,22 +0,0 @@ -import { MemoizedReactMarkdown } from "./markdown" -import { SVG } from "./ui/svg" -import CheckMarkIcon from '@/assets/images/check-mark.svg' - -interface ChatProgressProps { - progress?: string[] -} - -export function ChatProgress({ progress = [] }: ChatProgressProps) { - return progress?.length ? ( -
            - {progress.map((item, index) => ( -
            - -
            - {item} -
            -
            - ))} -
            - ) : null -} diff --git a/spaces/zhuowen999/vits_chinese/modules.py b/spaces/zhuowen999/vits_chinese/modules.py deleted file mode 100644 index 289f4e3bdc7e1c783766b4c20bdf4475e65c932b..0000000000000000000000000000000000000000 --- a/spaces/zhuowen999/vits_chinese/modules.py +++ /dev/null @@ -1,522 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x