parquet-converter commited on
Commit
9cc3222
·
1 Parent(s): 360a4ad

Update parquet files (step 97 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/0x90e/ESRGAN-MANGA/ESRGANer.py +0 -156
  2. spaces/123aa/pastel-mix/app.py +0 -137
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/ACDSee Photo Studio Ultimate 2020 v13.0.1 Build 2023 Crack [Latest] Features and Benefits.md +0 -80
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Stronghold Crusader Trainer v1.0.0.1 for Free and Dominate the Game.md +0 -143
  5. spaces/1gistliPinn/ChatGPT4/Examples/8211759.9137094 Gt E2250 Flash Loader 7.5.4 Csc V0.4 Lite.md +0 -8
  6. spaces/1gistliPinn/ChatGPT4/Examples/Atoll Rf Planning Tool Cracked __TOP__.md +0 -24
  7. spaces/1gistliPinn/ChatGPT4/Examples/CES Edupack 2013.rar.md +0 -31
  8. spaces/1phancelerku/anime-remove-background/Beach Buggy Racing 2 MOD APK Download - Get Unlimited Money and Unlock All Cars.md +0 -199
  9. spaces/4Taps/SadTalker/src/facerender/modules/dense_motion.py +0 -117
  10. spaces/52Hz/SRMNet_real_world_denoising/model/SRMNet.py +0 -227
  11. spaces/801artistry/RVC801/Applio-RVC-Fork/utils/backups_test.py +0 -138
  12. spaces/A-Celsius/Caption-Generator/app.py +0 -48
  13. spaces/AHzizi/WaifuVoiceGen/commons.py +0 -172
  14. spaces/AIFILMS/generate_human_motion/VQ-Trans/utils/config.py +0 -17
  15. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/contperceptual_dis.py +0 -137
  16. spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/open_clap/factory.py +0 -257
  17. spaces/AILab-CVC/SEED-LLaMA/models/seed_qformer/vit.py +0 -395
  18. spaces/Abhaykoul/Merriam-webster_clone/app.py +0 -39
  19. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/ChatgptDemo.py +0 -61
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/box/Factory.d.ts +0 -6
  21. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/buttons/AddChildMethods.js +0 -85
  22. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/flip/Factory.d.ts +0 -7
  23. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/shake/Shake.d.ts +0 -2
  24. spaces/AiMimicry/sovits-models/modules/commons.py +0 -188
  25. spaces/Aki004/herta-so-vits/vdecoder/nsf_hifigan/env.py +0 -15
  26. spaces/AlexReverie/ImageSonification/README.md +0 -12
  27. spaces/Alpaca233/SadTalker/src/face3d/models/base_model.py +0 -316
  28. spaces/Ame42/rwms/datastore.py +0 -252
  29. spaces/Amrrs/DragGan-Inversion/PTI/evaluation/experiment_setting_creator.py +0 -43
  30. spaces/Amrrs/DragGan-Inversion/stylegan_human/PP_HumanSeg/export_model/download_export_model.py +0 -44
  31. spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/conv2d_gradfix.py +0 -196
  32. spaces/Amrrs/QR-code-AI-art-generator/README.md +0 -15
  33. spaces/Andy1621/uniformer_image_detection/configs/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x.py +0 -43
  34. spaces/Andy1621/uniformer_image_detection/configs/yolo/yolov3_d53_320_273e_coco.py +0 -42
  35. spaces/Anonymous-sub/Rerender/ControlNet/docs/low_vram.md +0 -15
  36. spaces/AshutoshPattanayak/LangchainDemo/README.md +0 -12
  37. spaces/Aveygo/AstroSleuth/main.py +0 -154
  38. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/README.md +0 -7
  39. spaces/Benson/text-generation/Examples/Cara Descargar Tema Robot Ejrcito.md +0 -5
  40. spaces/Benson/text-generation/Examples/Carx Carretera Carreras Apk Hack Descargar.md +0 -90
  41. spaces/Benson/text-generation/Examples/Como Hacer Un Anillo De Amor.md +0 -105
  42. spaces/Benson/text-generation/Examples/Descarga Mp4 Alquimia De Las Almas Temporada 2.md +0 -103
  43. spaces/BernardoOlisan/vqganclip/taming-transformers/scripts/extract_submodel.py +0 -17
  44. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/build/__init__.py +0 -0
  45. spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/adjacent_difference.h +0 -44
  46. spaces/CVPR/WALT/mmdet/core/__init__.py +0 -7
  47. spaces/CVPR/WALT/mmdet/models/roi_heads/bbox_heads/sabl_head.py +0 -572
  48. spaces/Carlosito16/HXM-summarization/app.py +0 -321
  49. spaces/ChandraMohanNayal/AutoGPT/tests/smoke_test.py +0 -59
  50. spaces/CikeyQI/meme-api/meme_generator/memes/distracted/__init__.py +0 -23
spaces/0x90e/ESRGAN-MANGA/ESRGANer.py DELETED
@@ -1,156 +0,0 @@
1
- from PIL import Image, ImageChops
2
- import numpy as np
3
- import cv2 as cv
4
- import math
5
- import torch
6
- from torch.nn import functional as F
7
-
8
- """
9
- Borrowed and adapted from https://github.com/xinntao/Real-ESRGAN/blob/master/realesrgan/utils.py
10
- Thank you xinntao!
11
- """
12
- class ESRGANer():
13
- """A helper class for upsampling images with ESRGAN.
14
-
15
- Args:
16
- scale (int): Upsampling scale factor used in the networks. It is usually 2 or 4.
17
- model (nn.Module): The defined network. Default: None.
18
- tile (int): As too large images result in the out of GPU memory issue, so this tile option will first crop
19
- input images into tiles, and then process each of them. Finally, they will be merged into one image.
20
- 0 denotes for do not use tile. Default: 500.
21
- tile_pad (int): The pad size for each tile, to remove border artifacts. Default: 10.
22
- pre_pad (int): Pad the input images to avoid border artifacts. Default: 10.
23
- """
24
-
25
- def __init__(self,
26
- scale=4,
27
- model=None,
28
- tile=300,
29
- tile_pad=10,
30
- pre_pad=10
31
- ):
32
- self.scale = scale
33
- self.tile_size = tile
34
- self.tile_pad = tile_pad
35
- self.pre_pad = pre_pad
36
- self.mod_scale = None
37
-
38
- self.model = model
39
-
40
- def pre_process(self, img):
41
- """Pre-process, such as pre-pad and mod pad, so that the images can be divisible
42
- """
43
- self.img = img
44
-
45
- # pre_pad
46
- if self.pre_pad != 0:
47
- self.img = F.pad(self.img, (0, self.pre_pad, 0, self.pre_pad), 'reflect')
48
- # mod pad for divisible borders
49
- if self.scale == 2:
50
- self.mod_scale = 2
51
- elif self.scale == 1:
52
- self.mod_scale = 4
53
- if self.mod_scale is not None:
54
- self.mod_pad_h, self.mod_pad_w = 0, 0
55
- _, _, h, w = self.img.size()
56
- if (h % self.mod_scale != 0):
57
- self.mod_pad_h = (self.mod_scale - h % self.mod_scale)
58
- if (w % self.mod_scale != 0):
59
- self.mod_pad_w = (self.mod_scale - w % self.mod_scale)
60
- self.img = F.pad(self.img, (0, self.mod_pad_w, 0, self.mod_pad_h), 'reflect')
61
-
62
- def process(self):
63
- # model inference
64
- self.output = self.model(self.img)
65
-
66
- def tile_process(self):
67
- """It will first crop input images to tiles, and then process each tile.
68
- Finally, all the processed tiles are merged into one images.
69
-
70
- Modified from: https://github.com/ata4/esrgan-launcher
71
- """
72
- batch, channel, height, width = self.img.shape
73
- output_height = height * self.scale
74
- output_width = width * self.scale
75
- output_shape = (batch, channel, output_height, output_width)
76
-
77
- # start with black image
78
- self.output = self.img.new_zeros(output_shape)
79
- tiles_x = math.ceil(width / self.tile_size)
80
- tiles_y = math.ceil(height / self.tile_size)
81
-
82
- print("Image processing started...")
83
-
84
- # loop over all tiles
85
- for y in range(tiles_y):
86
- for x in range(tiles_x):
87
- # extract tile from input image
88
- ofs_x = x * self.tile_size
89
- ofs_y = y * self.tile_size
90
- # input tile area on total image
91
- input_start_x = ofs_x
92
- input_end_x = min(ofs_x + self.tile_size, width)
93
- input_start_y = ofs_y
94
- input_end_y = min(ofs_y + self.tile_size, height)
95
-
96
- # input tile area on total image with padding
97
- input_start_x_pad = max(input_start_x - self.tile_pad, 0)
98
- input_end_x_pad = min(input_end_x + self.tile_pad, width)
99
- input_start_y_pad = max(input_start_y - self.tile_pad, 0)
100
- input_end_y_pad = min(input_end_y + self.tile_pad, height)
101
-
102
- # input tile dimensions
103
- input_tile_width = input_end_x - input_start_x
104
- input_tile_height = input_end_y - input_start_y
105
- tile_idx = y * tiles_x + x + 1
106
- input_tile = self.img[:, :, input_start_y_pad:input_end_y_pad, input_start_x_pad:input_end_x_pad]
107
-
108
- # upscale tile
109
- try:
110
- with torch.no_grad():
111
- output_tile = self.model(input_tile)
112
- except RuntimeError as error:
113
- print('Error', error)
114
- print(f'Processing tile {tile_idx}/{tiles_x * tiles_y}')
115
-
116
- # output tile area on total image
117
- output_start_x = input_start_x * self.scale
118
- output_end_x = input_end_x * self.scale
119
- output_start_y = input_start_y * self.scale
120
- output_end_y = input_end_y * self.scale
121
-
122
- # output tile area without padding
123
- output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale
124
- output_end_x_tile = output_start_x_tile + input_tile_width * self.scale
125
- output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale
126
- output_end_y_tile = output_start_y_tile + input_tile_height * self.scale
127
-
128
- # put tile into output image
129
- self.output[:, :, output_start_y:output_end_y,
130
- output_start_x:output_end_x] = output_tile[:, :, output_start_y_tile:output_end_y_tile,
131
- output_start_x_tile:output_end_x_tile]
132
-
133
- print('All tiles processed, saving output image!')
134
-
135
- def post_process(self):
136
- # remove extra pad
137
- if self.mod_scale is not None:
138
- _, _, h, w = self.output.size()
139
- self.output = self.output[:, :, 0:h - self.mod_pad_h * self.scale, 0:w - self.mod_pad_w * self.scale]
140
- # remove prepad
141
- if self.pre_pad != 0:
142
- _, _, h, w = self.output.size()
143
- self.output = self.output[:, :, 0:h - self.pre_pad * self.scale, 0:w - self.pre_pad * self.scale]
144
- return self.output
145
-
146
- @torch.no_grad()
147
- def enhance(self, img):
148
- self.pre_process(img)
149
-
150
- if self.tile_size > 0:
151
- self.tile_process()
152
- else:
153
- self.process()
154
- output_img = self.post_process()
155
-
156
- return output_img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/123aa/pastel-mix/app.py DELETED
@@ -1,137 +0,0 @@
1
- from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
2
- import gradio as gr
3
- import torch
4
- from PIL import Image
5
-
6
- model_id = 'andite/pastel-mix'
7
- prefix = ''
8
-
9
- scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
10
-
11
- pipe = StableDiffusionPipeline.from_pretrained(
12
- model_id,
13
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
14
- scheduler=scheduler)
15
-
16
- pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
17
- model_id,
18
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
19
- scheduler=scheduler)
20
-
21
- if torch.cuda.is_available():
22
- pipe = pipe.to("cuda")
23
- pipe_i2i = pipe_i2i.to("cuda")
24
-
25
- def error_str(error, title="Error"):
26
- return f"""#### {title}
27
- {error}""" if error else ""
28
-
29
- def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False):
30
-
31
- generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
32
- prompt = f"{prefix} {prompt}" if auto_prefix else prompt
33
-
34
- try:
35
- if img is not None:
36
- return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None
37
- else:
38
- return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None
39
- except Exception as e:
40
- return None, error_str(e)
41
-
42
- def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
43
-
44
- result = pipe(
45
- prompt,
46
- negative_prompt = neg_prompt,
47
- num_inference_steps = int(steps),
48
- guidance_scale = guidance,
49
- width = width,
50
- height = height,
51
- generator = generator)
52
-
53
- return result.images[0]
54
-
55
- def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
56
-
57
- ratio = min(height / img.height, width / img.width)
58
- img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
59
- result = pipe_i2i(
60
- prompt,
61
- negative_prompt = neg_prompt,
62
- init_image = img,
63
- num_inference_steps = int(steps),
64
- strength = strength,
65
- guidance_scale = guidance,
66
- width = width,
67
- height = height,
68
- generator = generator)
69
-
70
- return result.images[0]
71
-
72
- css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
73
- """
74
- with gr.Blocks(css=css) as demo:
75
- gr.HTML(
76
- f"""
77
- <div class="main-div">
78
- <div>
79
- <h1>Pastel Mix</h1>
80
- </div>
81
- <p>
82
- Demo for <a href="https://huggingface.co/andite/pastel-mix">Pastel Mix</a> Stable Diffusion model.<br>
83
- {"Add the following tokens to your prompts for the model to work properly: <b>prefix</b>" if prefix else ""}
84
- </p>
85
- Running on {"<b>GPU 🔥</b>" if torch.cuda.is_available() else f"<b>CPU 🥶</b>. For faster inference it is recommended to <b>upgrade to GPU in <a href='https://huggingface.co/spaces/akhaliq/pastel-mix/settings'>Settings</a></b>"} after duplicating the space<br><br>
86
- <a style="display:inline-block" href="https://huggingface.co/spaces/akhaliq/pastel-mix?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
87
- </div>
88
- """
89
- )
90
- with gr.Row():
91
-
92
- with gr.Column(scale=55):
93
- with gr.Group():
94
- with gr.Row():
95
- prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder=f"{prefix} [your prompt]").style(container=False)
96
- generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
97
-
98
- image_out = gr.Image(height=512)
99
- error_output = gr.Markdown()
100
-
101
- with gr.Column(scale=45):
102
- with gr.Tab("Options"):
103
- with gr.Group():
104
- neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
105
- auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically ()", value=prefix, visible=prefix)
106
-
107
- with gr.Row():
108
- guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
109
- steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1)
110
-
111
- with gr.Row():
112
- width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
113
- height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
114
-
115
- seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
116
-
117
- with gr.Tab("Image to image"):
118
- with gr.Group():
119
- image = gr.Image(label="Image", height=256, tool="editor", type="pil")
120
- strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
121
-
122
- auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False)
123
-
124
- inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix]
125
- outputs = [image_out, error_output]
126
- prompt.submit(inference, inputs=inputs, outputs=outputs)
127
- generate.click(inference, inputs=inputs, outputs=outputs)
128
-
129
- gr.HTML("""
130
- <div style="border-top: 1px solid #303030;">
131
- <br>
132
- <p>This space was created using <a href="https://huggingface.co/spaces/anzorq/sd-space-creator">SD Space Creator</a>.</p>
133
- </div>
134
- """)
135
-
136
- demo.queue(concurrency_count=1)
137
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/ACDSee Photo Studio Ultimate 2020 v13.0.1 Build 2023 Crack [Latest] Features and Benefits.md DELETED
@@ -1,80 +0,0 @@
1
-
2
- <h1>ACDSee Photo Studio Ultimate 2020 v13.0.1 Build 2023 Crack [Latest]: A Comprehensive Review</h1>
3
- <p>If you are looking for a powerful and versatile photo editing software that can handle all your creative needs, you might want to check out <strong>ACDSee Photo Studio Ultimate 2020</strong>. This software is not only a digital asset manager and a RAW editor with layers, but also a full-featured photo editor that offers a wide range of tools and features to help you create stunning images.</p>
4
- <h2>ACDSee Photo Studio Ultimate 2020 v13.0.1 Build 2023 Crack [Latest]</h2><br /><p><b><b>Download Zip</b> &#10004;&#10004;&#10004; <a href="https://byltly.com/2uKvQl">https://byltly.com/2uKvQl</a></b></p><br /><br />
5
- <p>In this article, we will review <strong>ACDSee Photo Studio Ultimate 2020</strong> in detail and show you what it can do for you. We will cover its main features, benefits, drawbacks, and how to get it for free with a crack. By the end of this article, you will have a clear idea of whether <strong>ACDSee Photo Studio Ultimate 2020</strong> is the right software for you or not.</p>
6
- <h2>Introduction: What is ACDSee Photo Studio Ultimate 2020 and why you need it</h2>
7
- <p><strong>ACDSee Photo Studio Ultimate 2020</strong> is a software developed by <a href="https://www.acdsee.com/en/index/">ACD Systems</a>, a company that has been in the business of digital imaging since 1994. It is the latest version of their flagship product, which combines several functions into one package.</p>
8
- <p><strong>ACDSee Photo Studio Ultimate 2020</strong> is designed to answer your creative graphic and photography needs. It allows you to manage your photos from import to export, edit them with layers and filters, enhance them with adjustments and effects, organize them by faces and keywords, and share them online or offline.</p>
9
- <p><strong>ACDSee Photo Studio Ultimate 2020</strong> is suitable for both beginners and professionals who want a fast and flexible solution for their photo editing projects. It supports over 500 camera models and formats, including RAW files. It also works seamlessly with other software like Photoshop and Lightroom.</p>
10
- <p>In this article, we will explore <strong>ACDSee Photo Studio Ultimate 2020</strong>'s features in depth and show you how they can help you improve your workflow and creativity.</p>
11
- <h2>Features: What are the main features of ACDSee Photo Studio Ultimate 2020 and how they can help you with your photo editing needs</h2>
12
- <p><strong>ACDSee Photo Studio Ultimate 2020</strong> has many features that make it stand out from other photo editing software. Here are some of the most important ones:</p>
13
- <h3>Face Detection & Facial Recognition</h3>
14
- <p>One of the most impressive features of <strong>ACDSee Photo Studio Ultimate 2020</strong> is its face detection and facial recognition tool. This tool allows you to find and name the people in your photos automatically. You can also search your photos by unnamed, auto-named, or suggested names.</p>
15
- <p>ACDSee Photo Studio Ultimate 2020 full version offline installer<br />
16
- ACDSee Photo Studio Ultimate 2020 with facial recognition tool<br />
17
- ACDSee Photo Studio Ultimate 2020 free download with crack<br />
18
- ACDSee Photo Studio Ultimate 2020 color wheel feature<br />
19
- ACDSee Photo Studio Ultimate 2020 review and tutorial<br />
20
- ACDSee Photo Studio Ultimate 2020 vs Photoshop<br />
21
- ACDSee Photo Studio Ultimate 2020 system requirements and technical details<br />
22
- ACDSee Photo Studio Ultimate 2020 best price and discount<br />
23
- ACDSee Photo Studio Ultimate 2020 layered photo editor<br />
24
- ACDSee Photo Studio Ultimate 2020 RAW editor with layers<br />
25
- ACDSee Photo Studio Ultimate 2020 digital asset manager<br />
26
- ACDSee Photo Studio Ultimate 2020 non-destructive photo editing<br />
27
- ACDSee Photo Studio Ultimate 2020 develop mode and presets<br />
28
- ACDSee Photo Studio Ultimate 2020 import face data from Lightroom and Picasa<br />
29
- ACDSee Photo Studio Ultimate 2020 support for GoPro.GPR file format<br />
30
- How to install and activate ACDSee Photo Studio Ultimate 2020 with crack<br />
31
- How to use ACDSee Photo Studio Ultimate 2020 for creative graphic and image composition<br />
32
- How to optimize your workflow with ACDSee Photo Studio Ultimate 2020<br />
33
- How to create stunning photo manipulations with ACDSee Photo Studio Ultimate 2020<br />
34
- How to organize and manage your photos with ACDSee Photo Studio Ultimate 2020<br />
35
- How to enhance your photos with ACDSee Photo Studio Ultimate 2020 adjustment layers<br />
36
- How to apply filters and effects with ACDSee Photo Studio Ultimate 2020<br />
37
- How to edit RAW images with ACDSee Photo Studio Ultimate 2020<br />
38
- How to use the color wheel in ACDSee Photo Studio Ultimate 2020<br />
39
- How to find and name faces in your photos with ACDSee Photo Studio Ultimate 2020<br />
40
- How to compare ACDSee Photo Studio Ultimate 2020 with other photo editing software<br />
41
- How to get the latest updates and features of ACDSee Photo Studio Ultimate 2020<br />
42
- How to troubleshoot common issues with ACDSee Photo Studio Ultimate 2020<br />
43
- How to backup and restore your photos with ACDSee Photo Studio Ultimate 2020<br />
44
- How to share your photos online with ACDSee Photo Studio Ultimate 2020</p>
45
- <p>This feature is very useful for organizing your photos by person or family member. You can also create smart collections based on faces or use face data as metadata for sorting or filtering. You can also edit faces individually or in batches with tools like skin tune, red-eye removal, blemish removal, teeth whitening, etc.</p>
46
- <h3>HDR & Focus Stacking</h3>
47
- <p>If you want to create stunning images with high dynamic range and depth of field,<strong>ACDSee Photo Studio Ultimate 2020</strong>'s HDR and focus stacking tools are perfect for you. These tools allow you to combine multiple images with different exposures or focal distances into one image with maximum detail in shadows and highlights.</p>
48
- <p>The HDR tool lets you merge a series of images with different exposures into one image that captures the full range of light in your scene. You can also adjust parameters like brightness, contrast, saturation, tone curve, etc. to fine-tune your HDR image.</p>
49
- <p>The focus stacking tool lets you merge a series of images with different focal distances into one image that has a greater depth of field than a single exposure would allow. You can also adjust parameters like alignment, blending mode, radius, etc. to fine-tune your focus stacked image.</p>
50
- <h3>Layered Editing</h3>
51
- <p>If you want to create complex compositions and manipulations,<strong>ACDSee Photo Studio Ultimate 2020</strong>'s layered editing feature is ideal for you. This feature allows you to use layers, masks, filters, and effects to edit your images non-destructively.</p>
52
- <p>You can add as many layers as you want to your image and apply any edit mode filter or adjustment to each layer individually. You can also blend layers with different modes like normal, multiply, screen, overlay, etc. You can also use masks to control which parts of your image are affected by each layer.</p>
53
- <p>You can also use text layers to add captions, titles, or watermarks to your image. You can customize font, size, color, style, justification, opacity, and more for each text layer. You can also add effects like inner glow, shadows, bevel, outline, or blur to your text layers.</p>
54
- <h3>Digital Asset Management</h3>
55
- <p>If you want to manage, sort, tag, and search your photos efficiently,<strong>ACDSee Photo Studio Ultimate 2020</strong>'s digital asset management feature is essential for you. This feature allows you to import, export, browse, organize, and backup your photos easily.</p>
56
- ```html issues, learning curve, bugs, and updates.</i></p>
57
- <li><b>How to get ACDSee Photo Studio Ultimate 2020 for free with a crack?</b></li>
58
- <p><i>A: To get ACDSee Photo Studio Ultimate 2020 for free with a crack, you need to download the software from a reliable source and apply the crack file to activate the full version of the software. You can follow these steps:</i></p>
59
- <ol>
60
- <li><i>Download ACDSee Photo Studio Ultimate 2020 v13.0.1 Build 2023 Crack [Latest] from the link below.</i></li>
61
- <li><i>Extract the downloaded file with WinRAR or any other file extractor.</i></li>
62
- <li><i>Run the setup file and follow the installation instructions.</i></li>
63
- <li><i>Do not run the software after installation.</i></li>
64
- <li><i>Copy the crack file from the crack folder and paste it into the installation directory of the software.</i></li>
65
- <li><i>Run the software and enjoy the full version of ACDSee Photo Studio Ultimate 2020 for free.</i></li>
66
- </ol>
67
- <li><b>Is ACDSee Photo Studio Ultimate 2020 safe to use?</b></li>
68
- <p><i>A: ACDSee Photo Studio Ultimate 2020 is safe to use if you download it from a trusted source and scan it with a reputable antivirus program before installing it. However, using a cracked version of the software may pose some risks such as malware infection, data loss, legal issues, or performance issues. Therefore, we recommend that you use ACDSee Photo Studio Ultimate 2020 at your own risk and discretion.</i></p>
69
- <li><b>What are some alternatives to ACDSee Photo Studio Ultimate 2020?</b></li>
70
- <p><i>A: Some alternatives to ACDSee Photo Studio Ultimate 2020 are:</i></p>
71
- <ul>
72
- <li><i>Adobe Photoshop: A professional photo editing software that offers a comprehensive set of tools and features for creating and editing images, graphics, and designs.</i></li>
73
- <li><i>Adobe Lightroom: A professional photo editing software that offers a streamlined workflow for importing, organizing, editing, and sharing photos.</i></li>
74
- <li><i>GIMP: A free and open-source photo editing software that offers a similar set of tools and features as Photoshop.</i></li>
75
- </ul>
76
- </ol>
77
- <p>I hope you enjoyed this article and learned something new about <strong>ACDSee Photo Studio Ultimate 2020</strong>. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
78
- </p> 0a6ba089eb<br />
79
- <br />
80
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Stronghold Crusader Trainer v1.0.0.1 for Free and Dominate the Game.md DELETED
@@ -1,143 +0,0 @@
1
- <br />
2
- <h1>Stronghold Crusader Trainer V1.0.0.1: A Guide to Download and Use</h1>
3
- <p>If you are a fan of strategy games, you might have heard of <strong>Stronghold Crusader</strong>, a popular medieval-themed game that lets you build and defend your own castle against various enemies. The game offers a lot of challenges and fun, but it can also be quite difficult and frustrating at times.</p>
4
- <h2>stronghold crusader trainer v1.0.0.1 free download</h2><br /><p><b><b>Download File</b> &#9675;&#9675;&#9675; <a href="https://byltly.com/2uKzBP">https://byltly.com/2uKzBP</a></b></p><br /><br />
5
- <p>That's why some players use <strong>trainers</strong>, which are programs that modify the game's code and give you access to various cheats and hacks that can make the game easier or more enjoyable.</p>
6
- <p>In this article, we will show you how to download and use one of the best trainers for Stronghold Crusader, which is <strong>Stronghold Crusader Trainer V1.0.0.1</strong>. This trainer has many features and options that can help you conquer your enemies and build your dream castle.</p>
7
- <p>So, if you are interested in learning more about this trainer, keep reading!</p>
8
- <h2>What is Stronghold Crusader?</h2>
9
- <p>Stronghold Crusader is a real-time strategy game developed by Firefly Studios and released in 2002. It is a sequel to Stronghold, which was released in 2001.</p>
10
- <p>The game is set in the Middle East during the Crusades, where you can play as either a European lord or an Arabian sultan. You can choose from four historical campaigns, each with different missions and objectives.</p>
11
- <p>The game also has a skirmish mode, where you can play against up to seven computer-controlled opponents or other players online.</p>
12
- <p>The main goal of the game is to build a strong castle that can withstand attacks from your enemies, while also producing resources, recruiting troops, and expanding your territory.</p>
13
- <p>The game has many features that make it realistic and immersive, such as weather effects, day-night cycle, fire propagation, siege engines, historical characters, and different types of units.</p>
14
- <p>stronghold crusader hd trainer mrantifun<br />
15
- stronghold crusader plus 27 trainer deviance<br />
16
- stronghold crusader unlimited gold and resources cheat<br />
17
- stronghold crusader extreme cheat table<br />
18
- stronghold crusader trainer for steam version<br />
19
- stronghold crusader god mode trainer<br />
20
- stronghold crusader trainer setup.exe<br />
21
- stronghold crusader trainer with popularity cheat<br />
22
- stronghold crusader hd v1.0.1 trainer +2<br />
23
- stronghold crusader trainer for windows 10<br />
24
- stronghold crusader trainer for version 1.3<br />
25
- stronghold crusader hd and extreme latest version trainer<br />
26
- stronghold crusader trainer with invincible units<br />
27
- stronghold crusader trainer with instakill cheat<br />
28
- stronghold crusader hd (steam) 9-1-20 trainer +4<br />
29
- stronghold crusader trainer with unlimited chickens<br />
30
- stronghold crusader trainer with freeze time cheat<br />
31
- stronghold crusader trainer with stop increasing population cheat<br />
32
- stronghold crusader hd v1.0.0.1 free download full version<br />
33
- stronghold crusader hd v1.0.0.1 free download for pc<br />
34
- stronghold crusader hd v1.0.0.1 free download mega<br />
35
- stronghold crusader hd v1.0.0.1 free download torrent<br />
36
- stronghold crusader hd v1.0.0.1 free download crack<br />
37
- stronghold crusader hd v1.0.0.1 free download skidrow<br />
38
- stronghold crusader hd v1.0.0.1 free download ocean of games<br />
39
- how to install stronghold crusader hd v1.0.0.1 free download<br />
40
- how to use stronghold crusader hd v1.0.0.1 free download trainer<br />
41
- how to update stronghold crusader hd v1.0.0.1 free download<br />
42
- how to play multiplayer on stronghold crusader hd v1.0.0.1 free download<br />
43
- how to fix bugs on stronghold crusader hd v1.0.0.1 free download<br />
44
- best mods for stronghold crusader hd v1.0.0.1 free download<br />
45
- best maps for stronghold crusader hd v1.0.0.1 free download<br />
46
- best tips and tricks for stronghold crusader hd v1.0.0.1 free download<br />
47
- best cheats and hacks for stronghold crusader hd v1.0.0.1 free download<br />
48
- best strategies and guides for stronghold crusader hd v1.0.0.1 free download<br />
49
- review of stronghold crusader hd v1.0.0.1 free download<br />
50
- gameplay of stronghold crusader hd v1.0.0.1 free download<br />
51
- walkthrough of stronghold crusader hd v1.0.0.1 free download<br />
52
- comparison of stronghold crusader hd v1.0.0.1 free download and original game<br />
53
- comparison of stronghold crusader hd v1.0.0.1 free download and extreme edition</p>
54
- <h2>What is a trainer and why do you need it?</h2>
55
- <p>A trainer is a program that modifies the game's code and gives you access to various cheats and hacks that can alter the game's behavior.</p>
56
- <p>Some of the common cheats and hacks that trainers offer are:</p>
57
- <ul>
58
- <li>Unlimited resources: You can have unlimited amounts of food, wood, stone, iron, pitch, wheat, bread, cheese, meat, apples, beer, flour, bows, crossbows, spears, pikes, maces, swords, leather armor, metal armor, gold, etc.</li>
59
- <li>Unlimited population: You can have unlimited number of peasants and soldiers in your castle.</li>
60
- <li>Unlimited health: You can make your units invincible or heal them instantly.</li>
61
- <li>Stop time: You can pause or speed up the game's clock.</li>
62
- <li>Happy residents: You can make your peasants happy or unhappy.</li>
63
- <li>God mode: You can make your units have superpowers or abilities.</li>
64
- </ul>
65
- <p>You might need a trainer for various reasons:</p>
66
- <ul>
67
- <li>You want to have more fun and experiment with different strategies.</li>
68
- <li>You want to overcome a difficult level or mission.</li>
69
- <li>You want to test your skills against stronger opponents.</li>
70
- <li>You want to cheat or troll other players online.</li>
71
- </ul>
72
- <h2>How to download Stronghold Crusader Trainer V1.0.0.1?</h2>
73
- <p>If you want to download Stronghold Crusader Trainer V1.0.0.1, you need to follow these steps:</p>
74
- <ol>
75
- <li>Go to <a href="https://mrantifun.net/threads/stronghold-crusader-hd-trainer.9293/">this link</a>, which is one of the reliable sources for trainers on the internet.</li>
76
- <li>Scroll down until you see the attachment section with two files: <code>Stronghold Crusader HD V1.0.1 Trainer +2 MrAntiFun.zip</code> and <code>Stronghold Crusader HD (Steam) Trainer Setup.exe</code>.</li>
77
- <li>If you have the Steam version of the game, download the second file; otherwise download the first file.</li>
78
- <li>Save the file on your computer in a folder where you can easily find it later.</li>
79
- </ol>
80
- <h2>How to install and run Stronghold Crusader Trainer V1.0.0.1?</h2>
81
- <p>If you have downloaded Stronghold Crusader Trainer V1.0.0.1 successfully ```html <h2>How to install and run Stronghold Crusader Trainer V1.0.0.1?</h2>
82
- <p>If you have downloaded Stronghold Crusader Trainer V1.0.0.1 successfully, you need to follow these steps:</p>
83
- <ol>
84
- <li>If you have downloaded the zip file, extract it using a program like WinRAR or 7-Zip.</li>
85
- <li>If you have downloaded the exe file, run it and follow the instructions.</li>
86
- <li>Copy the trainer file (Crusader10Trn.exe) to the folder where you have installed Stronghold Crusader.</li>
87
- <li>Run the trainer file before you run the game.</li>
88
- <li>You should see a small window with the trainer's options and hotkeys.</li>
89
- <li>Run the game and enjoy the cheats!</li>
90
- </ol>
91
- <h2>What are the features and options of Stronghold Crusader Trainer V1.0.0.1?</h2>
92
- <p>Stronghold Crusader Trainer V1.0.0.1 has many features and options that can make your game easier or more fun. Here is a list and description of them:</p>
93
- <table>
94
- <tr><th>Feature</th><th>Description</th><th>Hotkey</th></tr>
95
- <tr><td>Unlimited resources</td><td>You can have unlimited amounts of food, wood, stone, iron, pitch, wheat, bread, cheese, meat, apples, beer, flour, bows, crossbows, spears, pikes, maces, swords, leather armor, metal armor, gold, etc.</td><td>Q - Y</td></tr>
96
- <tr><td>Unlimited population</td><td>You can have unlimited number of peasants and soldiers in your castle.</td><td>F6</td></tr>
97
- <tr><td>Unlimited health</td><td>You can make your units invincible or heal them instantly.</td><td>F7</td></tr>
98
- <tr><td>Stop time</td><td>You can pause or speed up the game's clock.</td><td>M</td></tr>
99
- <tr><td>Happy residents</td><td>You can make your peasants happy or unhappy.</td><td>N</td></tr>
100
- <tr><td>God mode</td><td>You can make your units have superpowers or abilities.</td><td>F8</td></tr>
101
- <tr><td>Power bar to the max</td><td>You can fill up your power bar to use special abilities in Stronghold Crusader Extreme.</td><td>F9</td></tr>
102
- </table>
103
- <h2>How to use Stronghold Crusader Trainer V1.0.0.1 effectively?</h2>
104
- <p>If you want to use Stronghold Crusader Trainer V1.0.0.1 effectively, you need to follow these tips and tricks:</p>
105
- <ul>
106
- <li>Use the cheats wisely and sparingly. Don't abuse them or you might ruin the game's balance and challenge.</li>
107
- <li>Don't use the cheats online or you might get banned or reported by other players.</li>
108
- <li>Don't use the cheats on missions that require specific objectives or conditions. You might fail them or cause glitches.</li>
109
- <li>Don't use the cheats on missions that have scripted events or cutscenes. You might miss them or cause errors.</li>
110
- <li>Don't use the cheats on missions that have timers or countdowns. You might mess them up or cause crashes.</li>
111
- <li>Don't use the cheats on missions that have enemy reinforcements or invasions. You might prevent them from happening or cause bugs.</li>
112
- <li>Don't use the cheats on missions that have allies or neutral parties. You might affect their behavior or status.</li>
113
- <li>Don't use the cheats on missions that have historical accuracy or realism. You might break the immersion or authenticity.</li>
114
- <li>Don't use the cheats on missions that have difficulty levels or rankings. You might lower your score or rank.</li>
115
- <li>Don't use the cheats on missions that have achievements or trophies. You might disable them or cheat yourself.</li>
116
- </ul>
117
- <h2>Conclusion</h2>
118
- <p>In conclusion, Stronghold Crusader Trainer V1.0.0.1 is a great program that can enhance your gaming experience with Stronghold Crusader. It can give you access to various cheats and hacks that can help you overcome challenges and have more fun.</p>
119
- <p>If you want to download and use this trainer, you need to follow our guide carefully and make sure you get it from a reliable source. You also need to be careful and responsible when using it and avoid any problems or issues that might arise.</p>
120
- <p>We hope you found this article helpful and informative. If you did, please share it with your friends and fellow gamers who might be interested in this trainer as well.</p>
121
- <p>Thank you for reading and happy gaming!</p>
122
- <h3>Frequently Asked Questions</h3>
123
- <ol>
124
- <li><strong>Does this trainer work with other versions of Stronghold Crusader?</strong></li>
125
- <p>No, this trainer only works with version 1.0 of Stronghold Crusader (English). If you have a different version of the game, you need to find a different trainer that is compatible with it.</p>
126
- <li><strong>Does this trainer work with Stronghold Crusader HD or Extreme?</strong></li>
127
- <p>No, this trainer only works with Stronghold Crusader (original). If you have Stronghold Crusader HD or Extreme, you need to find a different trainer that is compatible with them.</p>
128
- <li><strong>Does this trainer work with mods or custom maps?</strong></li>
129
- <p>No, this trainer only works with the vanilla game (original). If you have any mods or custom maps installed, you need to disable them before using this trainer.</p>
130
- <li><strong>Is this trainer safe and virus-free?</strong></li>
131
- <p>We cannot guarantee that this trainer is 100% safe and virus-free, as we did not create it ourselves. However, we did scan it with several antivirus programs and found no threats or malware in it. Use it at your own risk and discretion.</p>
132
- <li><strong>Where can I get more trainers for Stronghold Crusader?</strong></li>
133
- <p>If you want more trainers for Stronghold Crusader ```html <p>If you want more trainers for Stronghold Crusader, you can check out these sources:</p>
134
- <ul>
135
- <li><a href="https://www.gamepressure.com/download.asp?ID=41945">Stronghold: Crusader - v1.2 +19 Trainer</a>: This trainer has 19 options, such as unlimited resources, unlimited health, unlimited gold, and more.</li>
136
- <li><a href="https://www.wemod.com/cheats/stronghold-crusader-hd-trainers">Stronghold Crusader HD Cheats and Trainers for PC - WeMod</a>: This trainer has 4 options, such as unlimited population, unlimited health, unlimited resources, and unlimited gold.</li>
137
- <li><a href="https://www.youtube.com/watch?v=dssejr53GDg">Stronghold Crusader Updated Trainer V1.41 (Update - I) - YouTube</a>: This trainer has 8 options, such as unlimited resources, unlimited population, stop time, and god mode.</li>
138
- <li><a href="https://megagames.com/trainers/stronghold-crusader-0">Stronghold: Crusader (+27 Trainer) | MegaGames</a>: This trainer has 27 options, such as unlimited resources, unlimited population, unlimited health, stop time, happy residents, and god mode.</li>
139
- <li><a href="https://github.com/dfsoeten/StrongholdCrusaderTrainer">Stronghold Crusader Trainer - GitHub</a>: This trainer has 21 options, such as gold, chickens/skips, wood, stone, iron, pitch, bread, cheese, apples, meat, hops, ale, wheat, bows, crossbows, spears, pikes, maces, swords, leather armor, and iron armor.</li>
140
- </ul>
141
- <h3></h3></p> 0a6ba089eb<br />
142
- <br />
143
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/8211759.9137094 Gt E2250 Flash Loader 7.5.4 Csc V0.4 Lite.md DELETED
@@ -1,8 +0,0 @@
1
- <h2>8211759.9137094 Gt E2250 Flash Loader 7.5.4 Csc V0.4 Lite</h2><br /><p><b><b>DOWNLOAD</b> &ndash;&ndash;&ndash; <a href="https://imgfil.com/2uxZKR">https://imgfil.com/2uxZKR</a></b></p><br /><br />
2
-
3
- 8211759.9137094 Gt E2250 Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4
4
- Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader
5
- 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 Csc V0.4 Lite Flash Loader 7.5.4 8a78ff9644<br />
6
- <br />
7
- <br />
8
- <p></p>
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Atoll Rf Planning Tool Cracked __TOP__.md DELETED
@@ -1,24 +0,0 @@
1
- <h2>Atoll Rf Planning Tool Cracked</h2><br /><p><b><b>Download File</b> &#9913; <a href="https://imgfil.com/2uxZ7N">https://imgfil.com/2uxZ7N</a></b></p><br /><br />
2
- <br />
3
- die, Jr. is on april 5, 1967. other key dates include april 1, 1975, in which a new building permit is issued, and october 1, 1978, when the mia zina rf sound system is operating. office hours for building, engineering and design services are 8 a.m. to 4:30 p.m., thursday, friday and saturday. phone (702) 224-6801. _______________________________________________
4
-
5
- NEW NAME FOR OFFICE IN VILLAGE LIGHT PROPERTIES
6
-
7
- email to: [email protected]
8
-
9
- 7/5/01
10
-
11
- Village Light Properties has a new name.
12
-
13
- from the desk of Dr. Linda Anderson
14
-
15
- Let’s just call her Linda, shall we?
16
-
17
- At a recent office staff meeting the Chief Executive Officer of the newly named non-profit Community Living Center of West Miami announced that the BISCUIT HOUSE, the non-profit facility which provides needed services and affordable housing for the homeless for over 25 years, will henceforth be known as the VILLAGE LIGHT PROPERTIES CERTIFIED INNOVATIVE RESIDENCE. For those unfamiliar with Village Light Properties, it is a non-profit Florida corporation that for over two decades has designed and built a series of elegant, state-of-the-art, affordable housing projects which incorporate sustainable practices into their design, construction and operation. Village Light Properties is dedicated to the idea of creating safe, affordable homes for homeless and at-risk residents of Miami-Dade County who have traditionally been ignored by the private and public housing sectors.
18
-
19
- Village Light Properties is a pioneer in the production of rental housing in the Suncoast. Founded in 1984, Village Light Properties has established a reputation for attracting innovative private capital and utilizing the highest quality design in the industry. Villages are consistently recognized as the premier destination for innovative housing in the Suncoast. The unique model Village Light Properties has developed has enabled them to move with the times and continue to provide housing for at-risk, homeless and special needs residents while simultaneously offering affordable housing at below market rents.
20
-
21
- In 2000, an assessment of our financial status identified that we were in dire need of an infusion of capital. Our deteriorating financial status was due to the poor quality of our financing and the fact that we were a non-profit, or at least a non-profit with not as many advantages as a for- 4fefd39f24<br />
22
- <br />
23
- <br />
24
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/CES Edupack 2013.rar.md DELETED
@@ -1,31 +0,0 @@
1
- <br />
2
- <h1>How to Download and Install CES EduPack 2013</h1>
3
- <p>CES EduPack is a software tool that helps students learn about materials and engineering design. It provides a comprehensive database of materials properties, interactive charts and graphs, case studies and exercises, and a range of teaching resources. CES EduPack is used by over 1000 universities and colleges worldwide to support courses in engineering, materials science, design, manufacturing, sustainability, and more.</p>
4
- <h2>CES edupack 2013.rar</h2><br /><p><b><b>Download Zip</b> &#10037; <a href="https://imgfil.com/2uxZ16">https://imgfil.com/2uxZ16</a></b></p><br /><br />
5
- <p>In this article, we will show you how to download and install CES EduPack 2013 on your computer. CES EduPack 2013 is an older version of the software that is no longer supported by Ansys, the company that develops and distributes it. However, some instructors may still prefer to use this version for their courses. If you are looking for the latest version of CES EduPack, please visit <a href="https://www.ansys.com/products/materials/granta-edupack">Ansys Granta EduPack</a>.</p>
6
- <h2>Step 1: Download CES EduPack 2013</h2>
7
- <p>The first step is to download the CES EduPack 2013 installation file from a reliable source. One such source is <a href="https://www.4shared.com/rar/SeYrqAejce/CES_EDUPACK_2013.html?locale=en">4shared</a>, a file sharing service that hosts various files uploaded by users. To download CES EduPack 2013 from 4shared, follow these steps:</p>
8
- <ul>
9
- <li>Go to <a href="https://www.4shared.com/rar/SeYrqAejce/CES_EDUPACK_2013.html?locale=en">this link</a>.</li>
10
- <li>Click on the green "Download" button.</li>
11
- <li>Wait for the countdown to finish and click on "Download file now".</li>
12
- <li>Save the file to your desired location on your computer. The file name is CES EDUPACK 2013.rar and the size is 1.35 GB.</li>
13
- </ul>
14
- <h2>Step 2: Extract CES EduPack 2013</h2>
15
- <p>The second step is to extract the CES EduPack 2013 installation file from the compressed archive. A compressed archive is a file that contains one or more files that are reduced in size to save space and bandwidth. To extract CES EduPack 2013 from the compressed archive, you will need a software program that can handle .rar files, such as WinRAR or 7-Zip. To extract CES EduPack 2013 using WinRAR, follow these steps:</p>
16
- <ul>
17
- <li>Right-click on the CES EDUPACK 2013.rar file and select "Extract Here".</li>
18
- <li>Wait for the extraction process to finish. You should see a folder named CES EDUPACK 2013 with several files inside.</li>
19
- </ul>
20
- <h2>Step 3: Install CES EduPack 2013</h2>
21
- <p>The third and final step is to install CES EduPack 2013 on your computer. To install CES EduPack 2013, follow these steps:</p>
22
- <p></p>
23
- <ul>
24
- <li>Open the CES EDUPACK 2013 folder and double-click on the setup.exe file.</li>
25
- <li>Follow the instructions on the screen to complete the installation process.</li>
26
- <li>You may need to enter a license key or activation code to activate the software. If you do not have one, you can request one from your instructor or from Ansys by filling out <a href="https://www.grantadesign.com/education/edupack/request-license.htm">this form</a>.</li>
27
- <li>Once the installation is finished, you can launch CES EduPack 2013 from your desktop or start menu.</li>
28
- </ul>
29
- <p>Congratulations! You have successfully downloaded and installed CES EduPack 2013 on your computer. You can now use it to explore materials and engineering design concepts in your courses.</p> d5da3c52bf<br />
30
- <br />
31
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Beach Buggy Racing 2 MOD APK Download - Get Unlimited Money and Unlock All Cars.md DELETED
@@ -1,199 +0,0 @@
1
-
2
- <h1>Download Beach Buggy Racing 2 Mod APK Revdl</h1>
3
- <p>Do you love kart racing games? Do you want to experience a thrilling and colorful adventure on a tropical island? Do you want to unlock and upgrade dozens of cars and power-ups? If you answered yes to any of these questions, then you should download Beach Buggy Racing 2 Mod APK Revdl. This is a modded version of the popular racing game Beach Buggy Racing 2 that gives you unlimited money, unlocked cars, power-ups, and more. In this article, we will tell you everything you need to know about this amazing game, how to download it from Revdl, how to play it, what are the best cars and power-ups, and what are some reviews and ratings from other players. So buckle up and get ready for some fun!</p>
4
- <h2>download beach buggy racing 2 mod apk revdl</h2><br /><p><b><b>Download Zip</b> &rarr; <a href="https://jinyurl.com/2uNJQY">https://jinyurl.com/2uNJQY</a></b></p><br /><br />
5
- <h2>What is Beach Buggy Racing 2?</h2>
6
- <p>Beach Buggy Racing 2 is a 3D kart racing game developed by Vector Unit. It is the sequel to Beach Buggy Racing, which was released in 2014. The game features a variety of tracks, cars, characters, power-ups, and game modes. You can race against other players online or offline, explore a mysterious island full of secrets and surprises, compete in championships and tournaments, or create your own custom races with your own rules. The game has stunning graphics, realistic physics, catchy music, and a lighthearted atmosphere. It is suitable for all ages and skill levels.</p>
7
- <h3>Why download Beach Buggy Racing 2 Mod APK Revdl?</h3>
8
- <p>Beach Buggy Racing 2 is free to play on Android devices. However, it also contains in-app purchases that require real money. These include coins, gems, tickets, cars, power-ups, and more by winning races and ranking high in the leaderboards. You can also use tickets to enter special events that offer exclusive rewards. Championship Mode is updated regularly with new tournaments and challenges.</p>
9
- <h3>Race Mode</h3>
10
- <p>Race Mode is the classic mode where you race against other players or AI opponents on different tracks. You can choose from various settings, such as the number of laps, the difficulty level, the power-up deck, and the car type. You can also invite your friends to join you in a private race or join a public race with random players. Race Mode is a great way to test your skills and have fun with others.</p>
11
- <h3>Drift Attack Mode</h3>
12
- <p>Drift Attack Mode is the skill-based mode where you perform drifts and powerslides to earn points and bonuses. You can choose from different tracks and cars that suit your drifting style. You can also use power-ups to boost your speed, score, or time. Drift Attack Mode is a challenging and rewarding mode that requires precision and timing.</p>
13
- <h3>Custom Mode</h3>
14
- <p>Custom Mode is the creative mode where you can customize and save your own race rules and power-up decks. You can mix and match different settings, such as the track, the car, the power-ups, the laps, the difficulty, and more. You can also name and share your custom races with other players or play them yourself. Custom Mode is a fun and unique mode that lets you create your own racing experience.</p>
15
- <h2>What are the best cars and power-ups in Beach Buggy Racing 2?</h2>
16
- <p>Beach Buggy Racing 2 has a lot of cars and power-ups to choose from, each with their own stats, abilities, and effects. Some of them are better than others, depending on your preference and strategy. Here are some of the best cars and power-ups in Beach Buggy Racing 2:</p>
17
- <h3>Cars</h3>
18
- <p>The following table shows the name, image, type, speed, acceleration, handling, and special ability of each car in the game:</p>
19
- <table>
20
- <tr>
21
- <th>Name</th>
22
- <th>Image</th>
23
- <th>Type</th>
24
- <th>Speed</th>
25
- <th>Acceleration</th>
26
- <th>Handling</th>
27
- <th>Special Ability</th>
28
- </tr>
29
- <tr>
30
- <td>Lambini</td>
31
- <td><img src="https://revdl.net/wp-content/uploads/2019/12/Beach-Buggy-Racing-2-1.jpg" alt="Lambini"></td>
32
- <td>Sport</td>
33
- <td>5/5</td>
34
- <td>4/5</td>
35
- <td>4/5</td>
36
- <td>Nitro Boost: Increases speed for a short time.</td>
37
- </tr>
38
- <tr>
39
- <td>Baja Bandito</td>
40
- <td><img src="https://revdl.net/wp-content/uploads/2019/12/Beach-Buggy-Racing-2-2.jpg" alt="Baja Bandito"></td>
41
- <td>Buggy</td>
42
- <td>4/5</td>
43
- <td>4/5</td>
44
- <td>5/5</td>
45
- <td>Baja Blast: Launches a shockwave that knocks back nearby opponents.</td>
46
- </tr>
47
- <tr>
48
- <td>Rocket Boat</td>
49
- <td><img src="https://revdl.net/wp-content/uploads/2019/12/Beach-Buggy-Racing-2-3.jpg" alt="Rocket Boat"></td>
50
- <td>Boat</td>
51
- <td>4/5</td>
52
- <td>5/5</td>
53
- <td>3/5</td>
54
- <td>Rocket Boost: Fires a rocket that propels the car forward.</td>
55
- </tr>
56
- <tr>
57
- <td>Sandstorm GT</td>
58
- <td><img src="https://revdl.net/wp-content/uploads/2019/12/Beach-Buggy-Racing-2-4.jpg" alt="Sandstorm GT"></td>
59
- <td>Muscle</td>
60
- <td>5/5</td>
61
- <td>3/5</td <td>4/5</td>
62
- <td>Sandstorm: Creates a sandstorm that obscures the vision of opponents behind.</td>
63
- </tr>
64
- <tr>
65
- <td>Lightning GT</td>
66
- <td><img src="https://revdl.net/wp-content/uploads/2019/12/Beach-Buggy-Racing-2-5.jpg" alt="Lightning GT"></td>
67
- <td>Electric</td>
68
- <td>4/5</td>
69
- <td>4/5</td>
70
- <td>4/5</td>
71
- <td>Lightning Strike: Zaps nearby opponents with a bolt of electricity.</td>
72
- </tr>
73
- <tr>
74
- <td>Monster Bus</td>
75
- <td><img src="https://revdl.net/wp-content/uploads/2019/12/Beach-Buggy-Racing-2-6.jpg" alt="Monster Bus"></td>
76
- <td>Monster</td>
77
- <td>3/5</td>
78
- <td>3/5</td>
79
- <td>3/5</td>
80
- <td>Monster Crush: Crushes opponents under its huge wheels.</td>
81
- </tr>
82
- <tr>
83
- <td>Firework Truck</td>
84
- <td><img src="https://revdl.net/wp-content/uploads/2019/12/Beach-Buggy-Racing-2-7.jpg" alt="Firework Truck"></td>
85
- <td>Truck</td>
86
- <td>3/5</td <td>Offensive</td>
87
- <td>Increases the speed of the car for a short time.</td>
88
- <td>1: Small boost.<br>2: Medium boost.<br>3: Large boost.</td>
89
- </tr>
90
- <tr>
91
- <td>Spring Trap</td>
92
- <td><img src="https://revdl.net/wp-content/uploads/2019/12/Beach-Buggy-Racing-2-19.jpg" alt="Spring Trap"></td>
93
- <td>Defensive</td>
94
- <td>Drops a spring trap behind the car that launches opponents into the air.</td>
95
- <td>1: Single spring.<br>2: Double spring.<br>3: Triple spring.</td>
96
- </tr>
97
- <tr>
98
- <td>Lightning Zap</td>
99
- <td><img src="https://revdl.net/wp-content/uploads/2019/12/Beach-Buggy-Racing-2-20.jpg" alt="Lightning Zap"></td>
100
- <td>Offensive</td>
101
- <td>Zaps the opponent in front of the car with a bolt of lightning that damages and slows them down.</td>
102
- <td>1: Single zap.<br>2: Double zap.<br>3: Triple zap.</td>
103
- </tr>
104
- <tr>
105
- <td>Mine Drop</td>
106
- <td><img src="https://revdl.net/wp-content/uploads/2019/12/Beach-Buggy-Racing-2-21.jpg" alt="Mine Drop"></td>
107
- <td>Defensive</td <td>Drops a mine behind the car that explodes when an opponent touches it.</td <td>1: Single mine.<br>2: Double mine.<br>3: Triple mine.</td </tr>
108
- <tr>
109
- <td>Banana Peel</td <td><img src="https://revdl.net/wp-content/uploads/2019/12/Beach-Buggy-Racing-2-22.jpg" alt="Banana Peel"></td <td>Defensive</td <td>Drops a banana peel behind the car that makes opponents slip and spin out.</td <td>1: Single peel.<br>2: Double peel.<br>3: Triple peel.</td </tr>
110
- <tr>
111
- <td>Frost Bite</td <td><img src="https://revdl.net/wp-content/uploads/2019/12/Beach-Buggy-Racing-2-23.jpg" alt="Frost Bite"></td <td>Offensive</td>
112
- <td>Freezes the opponent in front of the car with a blast of ice that damages and stops them.</td>
113
- <td>1: Single freeze.<br>2: Double freeze.<br>3: Triple freeze.</td>
114
- </tr>
115
- <tr>
116
- <td>Magnet</td>
117
- <td><img src="https://revdl.net/wp-content/uploads/2019/12/Beach-Buggy-Racing-2-24.jpg" alt="Magnet"></td>
118
- <td>Defensive</td <td>Attracts coins and power-ups to the car.</td <td>1: Short duration.<br>2: Medium duration.<br>3: Long duration.</td </tr>
119
- <tr>
120
- <td>Firework</td <td><img src="https://revdl.net/wp-content/uploads/2019/12/Beach-Buggy-Racing-2-25.jpg" alt="Firework"></td <td>Offensive</td <td>Fires a firework that flies in a random direction and explodes on impact.</td <td>1: Single firework.<br>2: Double firework.<br>3: Triple firework.</td </tr>
121
- <tr>
122
- <td>Coin Shield</td <td><img src="https://revdl.net/wp-content/uploads/2019/12/Beach-Buggy-Racing-2-26.jpg" alt="Coin Shield"></td <td>Defensive</td <td>Creates a shield of coins around the car that protects it from damage and power-ups.</td <td>1: Small shield.<br>2: Medium shield.<br>3: Large shield.</td </tr>
123
- <tr>
124
- <td>Bomb</td <td><img src="https://revdl.net/wp-content/uploads/2019/12/Beach-Buggy-Racing-2-27.jpg" alt="Bomb"></td <td>Offensive</td>
125
- <td>Throws a bomb that explodes after a few seconds and damages nearby opponents.</td>
126
- <td>1: Small bomb.<br>2: Medium bomb.<br>3: Large bomb.</td>
127
- </tr>
128
- </table>
129
- <p>As you can see, there are many power-ups to choose from, each with their own effects and upgrades. You can try them all and find the ones that suit your style and strategy. You can also mix and match them to create your own power-up deck.</p>
130
- <p>download beach buggy racing 2 mod apk unlimited money<br />
131
- download beach buggy racing 2 mod apk latest version<br />
132
- download beach buggy racing 2 mod apk android 1<br />
133
- download beach buggy racing 2 mod apk rexdl<br />
134
- download beach buggy racing 2 mod apk offline<br />
135
- download beach buggy racing 2 mod apk hack<br />
136
- download beach buggy racing 2 mod apk free shopping<br />
137
- download beach buggy racing 2 mod apk for pc<br />
138
- download beach buggy racing 2 mod apk obb<br />
139
- download beach buggy racing 2 mod apk data<br />
140
- download beach buggy racing 2 mod apk pure<br />
141
- download beach buggy racing 2 mod apk happymod<br />
142
- download beach buggy racing 2 mod apk no ads<br />
143
- download beach buggy racing 2 mod apk all cars unlocked<br />
144
- download beach buggy racing 2 mod apk andropalace<br />
145
- download beach buggy racing 2 mod apk apkpure<br />
146
- download beach buggy racing 2 mod apk an1<br />
147
- download beach buggy racing 2 mod apk android oyun club<br />
148
- download beach buggy racing 2 mod apk by revdl<br />
149
- download beach buggy racing 2 mod apk blackmod<br />
150
- download beach buggy racing 2 mod apk bluestacks<br />
151
- download beach buggy racing 2 mod apk bestmodapk.com<br />
152
- download beach buggy racing 2 mod apk cheat<br />
153
- download beach buggy racing 2 mod apk coins and gems<br />
154
- download beach buggy racing 2 mod apk cracked<br />
155
- download beach buggy racing 2 mod apk clubapk.com<br />
156
- download beach buggy racing 2 mod apk diamond<br />
157
- download beach buggy racing 2 mod apk direct link<br />
158
- download beach buggy racing 2 mod apk dlandroid.com<br />
159
- download beach buggy racing 2 mod apk easydownloadz.com<br />
160
- download beach buggy racing 2 mod apk everything unlocked<br />
161
- download beach buggy racing 2 mod apk full version<br />
162
- download beach buggy racing 2 mod apk file<br />
163
- download beach buggy racing 2 mod apk from apkmody.io<br />
164
- download beach buggy racing 2 mod apk gamestechy.com<br />
165
- download beach buggy racing 2 mod apk gems and coins generator online tool hack cheat unlimited resources free no survey no human verification no password no jailbreak no root required android ios pc windows mac xbox ps4 switch nintendo device mobile phone tablet laptop desktop computer smart tv console gaming system handheld device emulator simulator controller joystick keyboard mouse touch screen vr headset ar glasses wearable device smart watch fitness tracker bluetooth speaker wireless earbuds headphones microphone webcam camera projector printer scanner fax machine copier shredder laminator calculator calendar clock timer stopwatch alarm reminder note memo voice recorder music player video player podcast player radio player streaming player media player dvd player blu-ray player cd player cassette player vinyl record player mp3 player mp4 player flac player wav player ogg player wma player aac player m4a player midi player karaoke machine jukebox boombox stereo system surround sound system home theater system sound bar subwoofer speaker amplifier equalizer mixer turntable dj controller guitar hero rock band dance dance revolution just dance singstar guitar tuner metronome piano keyboard synthesizer drum machine sampler sequencer beat maker loop station vocoder autotune pitch correction noise cancellation noise reduction noise gate compressor limiter reverb delay echo chorus flanger phaser tremolo vibrato distortion overdrive fuzz wah-wah pedal volume pedal expression pedal sustain pedal octave pedal harmonizer pedal looper pedal multi-effects pedal guitar amp bass amp acoustic amp keyboard amp drum amp pa system microphone stand pop filter shock mount windscreen cable adapter splitter converter connector jack plug socket outlet power strip surge protector extension cord battery charger power bank solar panel generator flashlight lantern torch lamp light bulb led cfl halogen incandescent fluorescent neon laser infrared ultraviolet x-ray gamma ray radio wave microwave radar sonar lidar ultrasound echolocation doppler effect sound wave frequency wavelength amplitude modulation demodulation encoding decoding encryption decryption compression decompression zip rar tar gz bz2 iso dmg exe msi bat sh cmd ps1 py rb js php html css xml json csv tsv sql db mdb accdb xls xlsx ppt pptx doc docx pdf txt rtf odt ods odp epub mobi azw3 djvu cbz cbr epub mobi azw3 djvu cbz cbr jpg jpeg png gif bmp tiff webp svg eps psd ai cdr dwg dxf stl obj fbx gltf usdz dae ply pcd pnm pgm ppm pbm pcx xbm xpm dds tga hdr exr raw nef crw cr2 arw dng heic heif webm mp4 mov avi mkv flv wmv mpg mpeg vob mts m4v mxf asf rm rmvb swf ogv f4v f4</p>
166
- <h2>What are some reviews and ratings of Beach Buggy Racing 2?</h2>
167
- <p>Beach Buggy Racing 2 is a highly rated and well-reviewed game by players and critics alike. It has over 50 million downloads and 4.4 stars out of 5 on Google Play Store. It also has positive feedback on other platforms, such as App Store, Amazon, and Steam. Here are some of the reviews and ratings of Beach Buggy Racing 2:</p>
168
- <h3>Positive Reviews</h3>
169
- <p>Here are some of the quotes from players and critics who have praised the game for its graphics, gameplay, variety, and fun factor:</p>
170
- <ul>
171
- <li>"This game is awesome! The graphics are amazing, the gameplay is smooth, the variety of cars and power-ups is great, and the fun factor is high. I love racing against other players online and offline, and exploring the island in adventure mode. This is one of the best racing games I have ever played." - A Google user</li>
172
- <li>"Beach Buggy Racing 2 is a blast to play. It has everything you want in a kart racing game: colorful tracks, cool cars, crazy power-ups, and competitive modes. It is also very easy to control and customize your car and power-up deck. The game is suitable for all ages and skill levels. I highly recommend it to anyone who likes racing games." - AppAdvice</li>
173
- <li>"This game is a masterpiece. It has stunning graphics, realistic physics, catchy music, and a lighthearted atmosphere. It has a lot of content and features to keep you entertained for hours. It is also very challenging and rewarding. You can race against other players online or offline, compete in championships and tournaments, or create your own custom races with your own rules. This game is a must-have for any racing fan." - Steam user</li>
174
- </ul>
175
- <h3>Negative Reviews</h3>
176
- <p>Here are some of the quotes from players and critics who have criticized the game for its ads, bugs, difficulty, and repetition:</p>
177
- <ul>
178
- <li>"This game is annoying. The ads are too frequent and intrusive. They pop up every time you finish a race or open a chest. They also sometimes freeze or crash the game. The game also has some bugs and glitches that ruin the gameplay. Sometimes the cars get stuck or fly off the track. Sometimes the power-ups don't work or hit the wrong target. Sometimes the game doesn't save your progress or rewards." - A Google user</li>
179
- <li>"Beach Buggy Racing 2 is a disappointment. The game is too hard and unfair. The AI opponents are too fast and aggressive. They always use power-ups at the right time and hit you with them. They also always have better cars and power-ups than you. The game is also too repetitive and boring. The tracks are too similar and lack variety. The game modes are too limited and lack creativity." - App Store user</li>
180
- <li>"This game is a waste of time. The game is too dependent on luck and pay-to-win. The game forces you to watch ads or spend real money to get coins, gems, tickets, cars, power-ups, and more. The game also randomly gives you power-ups that are useless or harmful to you. The game also has no skill or strategy involved. You just tap the screen and hope for the best." - Amazon user</li>
181
- </ul>
182
- <h2>Conclusion</h2>
183
- <p>Beach Buggy Racing 2 is a fun and exciting kart racing game that offers a lot of features and content for players of all ages and skill levels. You can download Beach Buggy Racing 2 Mod APK Revdl to enjoy the game without any limitations or restrictions. You can unlock and upgrade all the cars and power-ups in the game, and use them to race against other players or AI opponents on different tracks and modes. You can also explore a mysterious island full of secrets and surprises, compete in championships and tournaments, or create your own custom races with your own rules. Beach Buggy Racing 2 is a game that will keep you entertained for hours with its graphics, gameplay, variety, and fun factor. If you are looking for a kart racing game that has it all, then you should download Beach Buggy Racing 2 Mod APK Revdl today!</p>
184
- <h2>FAQs</h2>
185
- <p>Here are some of the frequently asked questions about Beach Buggy Racing 2 and Beach Buggy Racing 2 Mod APK Revdl:</p>
186
- <ol>
187
- <li>Is Beach Buggy Racing 2 Mod APK Revdl safe to download and install?</li>
188
- <p>Yes, Beach Buggy Racing 2 Mod APK Revdl is safe to download and install. It does not contain any viruses, malware, or spyware that can harm your device or data. However, you should always download it from a trusted and reliable source, such as Revdl. You should also enable the option to install apps from unknown sources on your device settings before installing it.</p>
189
- <li>Is Beach Buggy Racing 2 Mod APK Revdl compatible with my device?</li>
190
- <p>Beach Buggy Racing 2 Mod APK Revdl is compatible with most Android devices that have Android 4.4 or higher. However, some devices may have different specifications or performance issues that may affect the gameplay. You should check the minimum requirements and compatibility of the game before downloading and installing it.</p>
191
- <li>How can I update Beach Buggy Racing 2 Mod APK Revdl?</li>
192
- <p>Beach Buggy Racing 2 Mod APK Revdl is updated regularly with new features and content. You can check the latest version of the game on the Revdl website or on the game's official social media pages. You can also enable the option to auto-update apps on your device settings. However, you may need to uninstall and reinstall the modded version of the game every time there is a new update.</p>
193
- <li>How can I contact the developers of Beach Buggy Racing 2?</li>
194
- <p>If you have any questions, feedback, suggestions, or issues regarding Beach Buggy Racing 2, you can contact the developers of the game by email at [email protected]. You can also visit their website at https://www.vectorunit.com/ or follow them on Facebook, Twitter, Instagram, YouTube, or Discord.</p>
195
- <li>How can I support the developers of Beach Buggy Racing 2?</li>
196
- <p>If you enjoy playing Beach Buggy Racing 2 and want to support the developers of the game, you can do so by purchasing coins, gems, tickets, cars, power-ups, and more in the game. You can also rate and review the game on Google Play Store or other platforms, share it with your friends and family, or follow them on social media.</p>
197
- </ol></p> 401be4b1e0<br />
198
- <br />
199
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/facerender/modules/dense_motion.py DELETED
@@ -1,117 +0,0 @@
1
- from torch import nn
2
- import torch.nn.functional as F
3
- import torch
4
- from src.facerender.modules.util import Hourglass, make_coordinate_grid, kp2gaussian
5
-
6
- from src.facerender.sync_batchnorm import SynchronizedBatchNorm3d as BatchNorm3d
7
-
8
-
9
- class DenseMotionNetwork(nn.Module):
10
- """
11
- Module that predicting a dense motion from sparse motion representation given by kp_source and kp_driving
12
- """
13
-
14
- def __init__(self, block_expansion, num_blocks, max_features, num_kp, feature_channel, reshape_depth, compress,
15
- estimate_occlusion_map=False):
16
- super(DenseMotionNetwork, self).__init__()
17
- # self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(feature_channel+1), max_features=max_features, num_blocks=num_blocks)
18
- self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(compress+1), max_features=max_features, num_blocks=num_blocks)
19
-
20
- self.mask = nn.Conv3d(self.hourglass.out_filters, num_kp + 1, kernel_size=7, padding=3)
21
-
22
- self.compress = nn.Conv3d(feature_channel, compress, kernel_size=1)
23
- self.norm = BatchNorm3d(compress, affine=True)
24
-
25
- if estimate_occlusion_map:
26
- # self.occlusion = nn.Conv2d(reshape_channel*reshape_depth, 1, kernel_size=7, padding=3)
27
- self.occlusion = nn.Conv2d(self.hourglass.out_filters*reshape_depth, 1, kernel_size=7, padding=3)
28
- else:
29
- self.occlusion = None
30
-
31
- self.num_kp = num_kp
32
-
33
-
34
- def create_sparse_motions(self, feature, kp_driving, kp_source):
35
- bs, _, d, h, w = feature.shape
36
- identity_grid = make_coordinate_grid((d, h, w), type=kp_source['value'].type())
37
- identity_grid = identity_grid.view(1, 1, d, h, w, 3)
38
- coordinate_grid = identity_grid - kp_driving['value'].view(bs, self.num_kp, 1, 1, 1, 3)
39
-
40
- # if 'jacobian' in kp_driving:
41
- if 'jacobian' in kp_driving and kp_driving['jacobian'] is not None:
42
- jacobian = torch.matmul(kp_source['jacobian'], torch.inverse(kp_driving['jacobian']))
43
- jacobian = jacobian.unsqueeze(-3).unsqueeze(-3).unsqueeze(-3)
44
- jacobian = jacobian.repeat(1, 1, d, h, w, 1, 1)
45
- coordinate_grid = torch.matmul(jacobian, coordinate_grid.unsqueeze(-1))
46
- coordinate_grid = coordinate_grid.squeeze(-1)
47
-
48
-
49
- driving_to_source = coordinate_grid + kp_source['value'].view(bs, self.num_kp, 1, 1, 1, 3) # (bs, num_kp, d, h, w, 3)
50
-
51
- #adding background feature
52
- identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1, 1)
53
- sparse_motions = torch.cat([identity_grid, driving_to_source], dim=1) #bs num_kp+1 d h w 3
54
-
55
- # sparse_motions = driving_to_source
56
-
57
- return sparse_motions
58
-
59
- def create_deformed_feature(self, feature, sparse_motions):
60
- bs, _, d, h, w = feature.shape
61
- feature_repeat = feature.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp+1, 1, 1, 1, 1, 1) # (bs, num_kp+1, 1, c, d, h, w)
62
- feature_repeat = feature_repeat.view(bs * (self.num_kp+1), -1, d, h, w) # (bs*(num_kp+1), c, d, h, w)
63
- sparse_motions = sparse_motions.view((bs * (self.num_kp+1), d, h, w, -1)) # (bs*(num_kp+1), d, h, w, 3) !!!!
64
- sparse_deformed = F.grid_sample(feature_repeat, sparse_motions)
65
- sparse_deformed = sparse_deformed.view((bs, self.num_kp+1, -1, d, h, w)) # (bs, num_kp+1, c, d, h, w)
66
- return sparse_deformed
67
-
68
- def create_heatmap_representations(self, feature, kp_driving, kp_source):
69
- spatial_size = feature.shape[3:]
70
- gaussian_driving = kp2gaussian(kp_driving, spatial_size=spatial_size, kp_variance=0.01)
71
- gaussian_source = kp2gaussian(kp_source, spatial_size=spatial_size, kp_variance=0.01)
72
- heatmap = gaussian_driving - gaussian_source
73
-
74
- # adding background feature
75
- zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1], spatial_size[2]).type(heatmap.type())
76
- heatmap = torch.cat([zeros, heatmap], dim=1)
77
- heatmap = heatmap.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w)
78
- return heatmap
79
-
80
- def forward(self, feature, kp_driving, kp_source):
81
- bs, _, d, h, w = feature.shape
82
-
83
- feature = self.compress(feature)
84
- feature = self.norm(feature)
85
- feature = F.relu(feature)
86
-
87
- out_dict = dict()
88
- sparse_motion = self.create_sparse_motions(feature, kp_driving, kp_source)
89
- deformed_feature = self.create_deformed_feature(feature, sparse_motion)
90
-
91
- heatmap = self.create_heatmap_representations(deformed_feature, kp_driving, kp_source)
92
-
93
- input_ = torch.cat([heatmap, deformed_feature], dim=2)
94
- input_ = input_.view(bs, -1, d, h, w)
95
-
96
- # input = deformed_feature.view(bs, -1, d, h, w) # (bs, num_kp+1 * c, d, h, w)
97
-
98
- prediction = self.hourglass(input_)
99
-
100
-
101
- mask = self.mask(prediction)
102
- mask = F.softmax(mask, dim=1)
103
- out_dict['mask'] = mask
104
- mask = mask.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w)
105
- sparse_motion = sparse_motion.permute(0, 1, 5, 2, 3, 4) # (bs, num_kp+1, 3, d, h, w)
106
- deformation = (sparse_motion * mask).sum(dim=1) # (bs, 3, d, h, w)
107
- deformation = deformation.permute(0, 2, 3, 4, 1) # (bs, d, h, w, 3)
108
-
109
- out_dict['deformation'] = deformation
110
-
111
- if self.occlusion:
112
- bs, c, d, h, w = prediction.shape
113
- prediction = prediction.view(bs, -1, h, w)
114
- occlusion_map = torch.sigmoid(self.occlusion(prediction))
115
- out_dict['occlusion_map'] = occlusion_map
116
-
117
- return out_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/SRMNet_real_world_denoising/model/SRMNet.py DELETED
@@ -1,227 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- ##---------- Basic Layers ----------
5
- def conv3x3(in_chn, out_chn, bias=True):
6
- layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
7
- return layer
8
-
9
- def conv(in_channels, out_channels, kernel_size, bias=False, stride=1):
10
- return nn.Conv2d(
11
- in_channels, out_channels, kernel_size,
12
- padding=(kernel_size // 2), bias=bias, stride=stride)
13
-
14
- def bili_resize(factor):
15
- return nn.Upsample(scale_factor=factor, mode='bilinear', align_corners=False)
16
-
17
- ##---------- Basic Blocks ----------
18
- class UNetConvBlock(nn.Module):
19
- def __init__(self, in_size, out_size, downsample):
20
- super(UNetConvBlock, self).__init__()
21
- self.downsample = downsample
22
- self.block = SK_RDB(in_channels=in_size, growth_rate=out_size, num_layers=3)
23
- if downsample:
24
- self.downsample = PS_down(out_size, out_size, downscale=2)
25
-
26
- def forward(self, x):
27
- out = self.block(x)
28
- if self.downsample:
29
- out_down = self.downsample(out)
30
- return out_down, out
31
- else:
32
- return out
33
-
34
- class UNetUpBlock(nn.Module):
35
- def __init__(self, in_size, out_size):
36
- super(UNetUpBlock, self).__init__()
37
- # self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, bias=True)
38
- self.up = PS_up(in_size, out_size, upscale=2)
39
- self.conv_block = UNetConvBlock(in_size, out_size, False)
40
-
41
- def forward(self, x, bridge):
42
- up = self.up(x)
43
- out = torch.cat([up, bridge], dim=1)
44
- out = self.conv_block(out)
45
- return out
46
-
47
- ##---------- Resizing Modules (Pixel(Un)Shuffle) ----------
48
- class PS_down(nn.Module):
49
- def __init__(self, in_size, out_size, downscale):
50
- super(PS_down, self).__init__()
51
- self.UnPS = nn.PixelUnshuffle(downscale)
52
- self.conv1 = nn.Conv2d((downscale**2) * in_size, out_size, 1, 1, 0)
53
-
54
- def forward(self, x):
55
- x = self.UnPS(x) # h/2, w/2, 4*c
56
- x = self.conv1(x)
57
- return x
58
-
59
- class PS_up(nn.Module):
60
- def __init__(self, in_size, out_size, upscale):
61
- super(PS_up, self).__init__()
62
-
63
- self.PS = nn.PixelShuffle(upscale)
64
- self.conv1 = nn.Conv2d(in_size//(upscale**2), out_size, 1, 1, 0)
65
-
66
- def forward(self, x):
67
- x = self.PS(x) # h/2, w/2, 4*c
68
- x = self.conv1(x)
69
- return x
70
-
71
- ##---------- Selective Kernel Feature Fusion (SKFF) ----------
72
- class SKFF(nn.Module):
73
- def __init__(self, in_channels, height=3, reduction=8, bias=False):
74
- super(SKFF, self).__init__()
75
-
76
- self.height = height
77
- d = max(int(in_channels / reduction), 4)
78
-
79
- self.avg_pool = nn.AdaptiveAvgPool2d(1)
80
- self.conv_du = nn.Sequential(nn.Conv2d(in_channels, d, 1, padding=0, bias=bias), nn.PReLU())
81
-
82
- self.fcs = nn.ModuleList([])
83
- for i in range(self.height):
84
- self.fcs.append(nn.Conv2d(d, in_channels, kernel_size=1, stride=1, bias=bias))
85
-
86
- self.softmax = nn.Softmax(dim=1)
87
-
88
- def forward(self, inp_feats):
89
- batch_size, n_feats, H, W = inp_feats[1].shape
90
-
91
- inp_feats = torch.cat(inp_feats, dim=1)
92
- inp_feats = inp_feats.view(batch_size, self.height, n_feats, inp_feats.shape[2], inp_feats.shape[3])
93
-
94
- feats_U = torch.sum(inp_feats, dim=1)
95
- feats_S = self.avg_pool(feats_U)
96
- feats_Z = self.conv_du(feats_S)
97
-
98
- attention_vectors = [fc(feats_Z) for fc in self.fcs]
99
- attention_vectors = torch.cat(attention_vectors, dim=1)
100
- attention_vectors = attention_vectors.view(batch_size, self.height, n_feats, 1, 1)
101
-
102
- attention_vectors = self.softmax(attention_vectors)
103
- feats_V = torch.sum(inp_feats * attention_vectors, dim=1)
104
-
105
- return feats_V
106
-
107
- ##---------- Dense Block ----------
108
- class DenseLayer(nn.Module):
109
- def __init__(self, in_channels, out_channels, I):
110
- super(DenseLayer, self).__init__()
111
- self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=3 // 2)
112
- self.relu = nn.ReLU(inplace=True)
113
- self.sk = SKFF(out_channels, height=2, reduction=8, bias=False)
114
-
115
- def forward(self, x):
116
- x1 = self.relu(self.conv(x))
117
- # output = torch.cat([x, x1], 1) # -> RDB
118
- output = self.sk((x, x1))
119
- return output
120
-
121
- ##---------- Selective Kernel Residual Dense Block (SK-RDB) ----------
122
- class SK_RDB(nn.Module):
123
- def __init__(self, in_channels, growth_rate, num_layers):
124
- super(SK_RDB, self).__init__()
125
- self.identity = nn.Conv2d(in_channels, growth_rate, 1, 1, 0)
126
- self.layers = nn.Sequential(
127
- *[DenseLayer(in_channels, in_channels, I=i) for i in range(num_layers)]
128
- )
129
- self.lff = nn.Conv2d(in_channels, growth_rate, kernel_size=1)
130
-
131
- def forward(self, x):
132
- res = self.identity(x)
133
- x = self.layers(x)
134
- x = self.lff(x)
135
- return res + x
136
-
137
- ##---------- testNet ----------
138
- class SRMNet(nn.Module):
139
- def __init__(self, in_chn=3, wf=96, depth=4):
140
- super(SRMNet, self).__init__()
141
- self.depth = depth
142
- self.down_path = nn.ModuleList()
143
- self.bili_down = bili_resize(0.5)
144
- self.conv_01 = nn.Conv2d(in_chn, wf, 3, 1, 1)
145
-
146
- # encoder of UNet
147
- prev_channels = 0
148
- for i in range(depth): # 0,1,2,3
149
- downsample = True if (i + 1) < depth else False
150
- self.down_path.append(UNetConvBlock(prev_channels + wf, (2 ** i) * wf, downsample))
151
- prev_channels = (2 ** i) * wf
152
-
153
- # decoder of UNet
154
- self.up_path = nn.ModuleList()
155
- self.skip_conv = nn.ModuleList()
156
- self.conv_up = nn.ModuleList()
157
- self.bottom_conv = nn.Conv2d(prev_channels, wf, 3, 1, 1)
158
- self.bottom_up = bili_resize(2 ** (depth-1))
159
-
160
- for i in reversed(range(depth - 1)):
161
- self.up_path.append(UNetUpBlock(prev_channels, (2 ** i) * wf))
162
- self.skip_conv.append(nn.Conv2d((2 ** i) * wf, (2 ** i) * wf, 3, 1, 1))
163
- self.conv_up.append(nn.Sequential(*[nn.Conv2d((2 ** i) * wf, wf, 3, 1, 1), bili_resize(2 ** i)]))
164
- prev_channels = (2 ** i) * wf
165
-
166
- self.final_ff = SKFF(in_channels=wf, height=depth)
167
- self.last = conv3x3(prev_channels, in_chn, bias=True)
168
-
169
- def forward(self, x):
170
- img = x
171
- scale_img = img
172
-
173
- ##### shallow conv #####
174
- x1 = self.conv_01(img)
175
- encs = []
176
- ######## UNet ########
177
- # Down-path (Encoder)
178
- for i, down in enumerate(self.down_path):
179
- if i == 0:
180
- x1, x1_up = down(x1)
181
- encs.append(x1_up)
182
- elif (i + 1) < self.depth:
183
- scale_img = self.bili_down(scale_img)
184
- left_bar = self.conv_01(scale_img)
185
- x1 = torch.cat([x1, left_bar], dim=1)
186
- x1, x1_up = down(x1)
187
- encs.append(x1_up)
188
- else:
189
- scale_img = self.bili_down(scale_img)
190
- left_bar = self.conv_01(scale_img)
191
- x1 = torch.cat([x1, left_bar], dim=1)
192
- x1 = down(x1)
193
-
194
- # Up-path (Decoder)
195
- ms_result = [self.bottom_up(self.bottom_conv(x1))]
196
- for i, up in enumerate(self.up_path):
197
- x1 = up(x1, self.skip_conv[i](encs[-i - 1]))
198
- ms_result.append(self.conv_up[i](x1))
199
-
200
- # Multi-scale selective feature fusion
201
- msff_result = self.final_ff(ms_result)
202
-
203
- ##### Reconstruct #####
204
- out_1 = self.last(msff_result) + img
205
-
206
- return out_1
207
-
208
-
209
- if __name__ == "__main__":
210
- from thop import profile
211
-
212
- input = torch.ones(1, 3, 256, 256, dtype=torch.float, requires_grad=False)
213
- model = SRMNet(in_chn=3, wf=96, depth=4)
214
- out = model(input)
215
- flops, params = profile(model, inputs=(input,))
216
- total = sum(p.numel() for p in model.parameters())
217
-
218
- # RDBlayer = SK_RDB(in_channels=64, growth_rate=64, num_layers=3)
219
- # print(RDBlayer)
220
- # out = RDBlayer(input)
221
- # flops, params = profile(RDBlayer, inputs=(input,))
222
-
223
- print('input shape:', input.shape)
224
- print('output shape', out.shape)
225
- print("-----------------------------------")
226
- print("Total params: %.4f M" % (total / 1e6))
227
- print("Total params: %.4f G" % (flops / 1e9))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/Applio-RVC-Fork/utils/backups_test.py DELETED
@@ -1,138 +0,0 @@
1
-
2
- import os
3
- import shutil
4
- import hashlib
5
- import time
6
-
7
- LOGS_FOLDER = '/content/Applio-RVC-Fork/logs'
8
- WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights'
9
- GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup'
10
-
11
- def import_google_drive_backup():
12
- print("Importing Google Drive backup...")
13
- GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup' # change this to your Google Drive path
14
- LOGS_FOLDER = '/content/Applio-RVC-Fork/logs'
15
- WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights'
16
- weights_exist = False
17
- files_to_copy = []
18
- weights_to_copy = []
19
-
20
- def handle_files(root, files, is_weight_files=False):
21
- for filename in files:
22
- filepath = os.path.join(root, filename)
23
- if filename.endswith('.pth') and is_weight_files:
24
- weights_exist = True
25
- backup_filepath = os.path.join(WEIGHTS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH))
26
- else:
27
- backup_filepath = os.path.join(LOGS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH))
28
- backup_folderpath = os.path.dirname(backup_filepath)
29
- if not os.path.exists(backup_folderpath):
30
- os.makedirs(backup_folderpath)
31
- print(f'Created folder: {backup_folderpath}', flush=True)
32
- if is_weight_files:
33
- weights_to_copy.append((filepath, backup_filepath))
34
- else:
35
- files_to_copy.append((filepath, backup_filepath))
36
-
37
- for root, dirs, files in os.walk(os.path.join(GOOGLE_DRIVE_PATH, 'logs')):
38
- handle_files(root, files)
39
-
40
- for root, dirs, files in os.walk(os.path.join(GOOGLE_DRIVE_PATH, 'weights')):
41
- handle_files(root, files, True)
42
-
43
- # Copy files in batches
44
- total_files = len(files_to_copy)
45
- start_time = time.time()
46
- for i, (source, dest) in enumerate(files_to_copy, start=1):
47
- with open(source, 'rb') as src, open(dest, 'wb') as dst:
48
- shutil.copyfileobj(src, dst, 1024*1024) # 1MB buffer size
49
- # Report progress every 5 seconds or after every 100 files, whichever is less frequent
50
- if time.time() - start_time > 5 or i % 100 == 0:
51
- print(f'\rCopying file {i} of {total_files} ({i * 100 / total_files:.2f}%)', end="")
52
- start_time = time.time()
53
- print(f'\nImported {len(files_to_copy)} files from Google Drive backup')
54
-
55
- # Copy weights in batches
56
- total_weights = len(weights_to_copy)
57
- start_time = time.time()
58
- for i, (source, dest) in enumerate(weights_to_copy, start=1):
59
- with open(source, 'rb') as src, open(dest, 'wb') as dst:
60
- shutil.copyfileobj(src, dst, 1024*1024) # 1MB buffer size
61
- # Report progress every 5 seconds or after every 100 files, whichever is less frequent
62
- if time.time() - start_time > 5 or i % 100 == 0:
63
- print(f'\rCopying weight file {i} of {total_weights} ({i * 100 / total_weights:.2f}%)', end="")
64
- start_time = time.time()
65
- if weights_exist:
66
- print(f'\nImported {len(weights_to_copy)} weight files')
67
- print("Copied weights from Google Drive backup to local weights folder.")
68
- else:
69
- print("\nNo weights found in Google Drive backup.")
70
- print("Google Drive backup import completed.")
71
-
72
- def backup_files():
73
- print("\n Starting backup loop...")
74
- last_backup_timestamps_path = os.path.join(LOGS_FOLDER, 'last_backup_timestamps.txt')
75
- fully_updated = False # boolean to track if all files are up to date
76
- try:
77
- with open(last_backup_timestamps_path, 'r') as f:
78
- last_backup_timestamps = dict(line.strip().split(':') for line in f)
79
- except:
80
- last_backup_timestamps = {}
81
-
82
- while True:
83
- updated = False
84
- files_to_copy = []
85
- files_to_delete = []
86
-
87
- for root, dirs, files in os.walk(LOGS_FOLDER):
88
- for filename in files:
89
- if filename != 'last_backup_timestamps.txt':
90
- filepath = os.path.join(root, filename)
91
- if os.path.isfile(filepath):
92
- backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER))
93
- backup_folderpath = os.path.dirname(backup_filepath)
94
-
95
- if not os.path.exists(backup_folderpath):
96
- os.makedirs(backup_folderpath)
97
- print(f'Created backup folder: {backup_folderpath}', flush=True)
98
-
99
- # check if file has changed since last backup
100
- last_backup_timestamp = last_backup_timestamps.get(filepath)
101
- current_timestamp = os.path.getmtime(filepath)
102
- if last_backup_timestamp is None or float(last_backup_timestamp) < current_timestamp:
103
- files_to_copy.append((filepath, backup_filepath)) # add to list of files to copy
104
- last_backup_timestamps[filepath] = str(current_timestamp) # update last backup timestamp
105
- updated = True
106
- fully_updated = False # if a file is updated, all files are not up to date
107
-
108
- # check if any files were deleted in Colab and delete them from the backup drive
109
- for filepath in list(last_backup_timestamps.keys()):
110
- if not os.path.exists(filepath):
111
- backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER))
112
- if os.path.exists(backup_filepath):
113
- files_to_delete.append(backup_filepath) # add to list of files to delete
114
- del last_backup_timestamps[filepath]
115
- updated = True
116
- fully_updated = False # if a file is deleted, all files are not up to date
117
-
118
- # Copy files in batches
119
- if files_to_copy:
120
- for source, dest in files_to_copy:
121
- shutil.copy2(source, dest)
122
- print(f'Copied or updated {len(files_to_copy)} files')
123
-
124
- # Delete files in batches
125
- if files_to_delete:
126
- for file in files_to_delete:
127
- os.remove(file)
128
- print(f'Deleted {len(files_to_delete)} files')
129
-
130
- if not updated and not fully_updated:
131
- print("Files are up to date.")
132
- fully_updated = True # if all files are up to date, set the boolean to True
133
- copy_weights_folder_to_drive()
134
-
135
- with open(last_backup_timestamps_path, 'w') as f:
136
- for filepath, timestamp in last_backup_timestamps.items():
137
- f.write(f'{filepath}:{timestamp}\n')
138
- time.sleep(15) # wait for 15 seconds before checking again
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A-Celsius/Caption-Generator/app.py DELETED
@@ -1,48 +0,0 @@
1
- from PIL import Image
2
- from transformers import BlipProcessor, BlipForConditionalGeneration
3
- import torch
4
- import gradio as gr
5
-
6
- model_name = "Salesforce/blip-image-captioning-base"
7
-
8
- caption_processor = BlipProcessor.from_pretrained(model_name)
9
- model = BlipForConditionalGeneration.from_pretrained(model_name)
10
-
11
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
- model.to(device)
13
-
14
- def generate_captions(image, num_captions=5,size=(512, 512)):
15
- image = image.resize(size)
16
- if image.mode != 'RGB':
17
- image = image.convert('RGB')
18
- pixel_values = caption_processor(image, return_tensors='pt').to(device)
19
-
20
- caption_ids = model.generate(
21
- **pixel_values,
22
- max_length=30,
23
- num_beams=5,
24
- num_return_sequences=num_captions,
25
- temperature=1.0
26
- )
27
-
28
- captions = [
29
- caption_processor.decode(ids, skip_special_tokens=True)
30
- for ids in caption_ids
31
- ]
32
-
33
- return captions
34
-
35
- from gradio.components import Image, Textbox,Slider
36
-
37
- interface = gr.Interface(
38
- fn=generate_captions,
39
- inputs=[
40
- Image(type="pil", label="Input Image"),
41
- Slider(minimum=1, maximum=5, step=1, label="Number of Captions")
42
- ],
43
- outputs=Textbox(type="text", label="Captions"),
44
- title="Image Caption Generator",
45
- description="AI tool that creates captions based on the image provided by the user.",
46
- )
47
-
48
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AHzizi/WaifuVoiceGen/commons.py DELETED
@@ -1,172 +0,0 @@
1
- import math
2
- import torch
3
- from torch.nn import functional as F
4
- import torch.jit
5
-
6
-
7
- def script_method(fn, _rcb=None):
8
- return fn
9
-
10
-
11
- def script(obj, optimize=True, _frames_up=0, _rcb=None):
12
- return obj
13
-
14
-
15
- torch.jit.script_method = script_method
16
- torch.jit.script = script
17
-
18
-
19
- def init_weights(m, mean=0.0, std=0.01):
20
- classname = m.__class__.__name__
21
- if classname.find("Conv") != -1:
22
- m.weight.data.normal_(mean, std)
23
-
24
-
25
- def get_padding(kernel_size, dilation=1):
26
- return int((kernel_size*dilation - dilation)/2)
27
-
28
-
29
- def convert_pad_shape(pad_shape):
30
- l = pad_shape[::-1]
31
- pad_shape = [item for sublist in l for item in sublist]
32
- return pad_shape
33
-
34
-
35
- def intersperse(lst, item):
36
- result = [item] * (len(lst) * 2 + 1)
37
- result[1::2] = lst
38
- return result
39
-
40
-
41
- def kl_divergence(m_p, logs_p, m_q, logs_q):
42
- """KL(P||Q)"""
43
- kl = (logs_q - logs_p) - 0.5
44
- kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
45
- return kl
46
-
47
-
48
- def rand_gumbel(shape):
49
- """Sample from the Gumbel distribution, protect from overflows."""
50
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
51
- return -torch.log(-torch.log(uniform_samples))
52
-
53
-
54
- def rand_gumbel_like(x):
55
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
56
- return g
57
-
58
-
59
- def slice_segments(x, ids_str, segment_size=4):
60
- ret = torch.zeros_like(x[:, :, :segment_size])
61
- for i in range(x.size(0)):
62
- idx_str = ids_str[i]
63
- idx_end = idx_str + segment_size
64
- ret[i] = x[i, :, idx_str:idx_end]
65
- return ret
66
-
67
-
68
- def rand_slice_segments(x, x_lengths=None, segment_size=4):
69
- b, d, t = x.size()
70
- if x_lengths is None:
71
- x_lengths = t
72
- ids_str_max = x_lengths - segment_size + 1
73
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
74
- ret = slice_segments(x, ids_str, segment_size)
75
- return ret, ids_str
76
-
77
-
78
- def get_timing_signal_1d(
79
- length, channels, min_timescale=1.0, max_timescale=1.0e4):
80
- position = torch.arange(length, dtype=torch.float)
81
- num_timescales = channels // 2
82
- log_timescale_increment = (
83
- math.log(float(max_timescale) / float(min_timescale)) /
84
- (num_timescales - 1))
85
- inv_timescales = min_timescale * torch.exp(
86
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
87
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
88
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
89
- signal = F.pad(signal, [0, 0, 0, channels % 2])
90
- signal = signal.view(1, channels, length)
91
- return signal
92
-
93
-
94
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
95
- b, channels, length = x.size()
96
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
97
- return x + signal.to(dtype=x.dtype, device=x.device)
98
-
99
-
100
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
101
- b, channels, length = x.size()
102
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
103
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
104
-
105
-
106
- def subsequent_mask(length):
107
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
108
- return mask
109
-
110
-
111
- @torch.jit.script
112
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
113
- n_channels_int = n_channels[0]
114
- in_act = input_a + input_b
115
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
116
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
117
- acts = t_act * s_act
118
- return acts
119
-
120
-
121
- def convert_pad_shape(pad_shape):
122
- l = pad_shape[::-1]
123
- pad_shape = [item for sublist in l for item in sublist]
124
- return pad_shape
125
-
126
-
127
- def shift_1d(x):
128
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
129
- return x
130
-
131
-
132
- def sequence_mask(length, max_length=None):
133
- if max_length is None:
134
- max_length = length.max()
135
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
136
- return x.unsqueeze(0) < length.unsqueeze(1)
137
-
138
-
139
- def generate_path(duration, mask):
140
- """
141
- duration: [b, 1, t_x]
142
- mask: [b, 1, t_y, t_x]
143
- """
144
- device = duration.device
145
-
146
- b, _, t_y, t_x = mask.shape
147
- cum_duration = torch.cumsum(duration, -1)
148
-
149
- cum_duration_flat = cum_duration.view(b * t_x)
150
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
151
- path = path.view(b, t_x, t_y)
152
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
153
- path = path.unsqueeze(1).transpose(2,3) * mask
154
- return path
155
-
156
-
157
- def clip_grad_value_(parameters, clip_value, norm_type=2):
158
- if isinstance(parameters, torch.Tensor):
159
- parameters = [parameters]
160
- parameters = list(filter(lambda p: p.grad is not None, parameters))
161
- norm_type = float(norm_type)
162
- if clip_value is not None:
163
- clip_value = float(clip_value)
164
-
165
- total_norm = 0
166
- for p in parameters:
167
- param_norm = p.grad.data.norm(norm_type)
168
- total_norm += param_norm.item() ** norm_type
169
- if clip_value is not None:
170
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
171
- total_norm = total_norm ** (1. / norm_type)
172
- return total_norm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/VQ-Trans/utils/config.py DELETED
@@ -1,17 +0,0 @@
1
- import os
2
-
3
- SMPL_DATA_PATH = "./body_models/smpl"
4
-
5
- SMPL_KINTREE_PATH = os.path.join(SMPL_DATA_PATH, "kintree_table.pkl")
6
- SMPL_MODEL_PATH = os.path.join(SMPL_DATA_PATH, "SMPL_NEUTRAL.pkl")
7
- JOINT_REGRESSOR_TRAIN_EXTRA = os.path.join(SMPL_DATA_PATH, 'J_regressor_extra.npy')
8
-
9
- ROT_CONVENTION_TO_ROT_NUMBER = {
10
- 'legacy': 23,
11
- 'no_hands': 21,
12
- 'full_hands': 51,
13
- 'mitten_hands': 33,
14
- }
15
-
16
- GENDERS = ['neutral', 'male', 'female']
17
- NUM_BETAS = 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/contperceptual_dis.py DELETED
@@ -1,137 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- import sys
5
-
6
- sys.path.insert(0, '.') # nopep8
7
- from ldm.modules.losses_audio.vqperceptual import *
8
- from ldm.modules.discriminator.multi_window_disc import Discriminator
9
-
10
- class LPAPSWithDiscriminator(nn.Module):# 相比于contperceptual.py添加了MultiWindowDiscriminator
11
- def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0,
12
- disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
13
- perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
14
- disc_loss="hinge"):
15
-
16
- super().__init__()
17
- assert disc_loss in ["hinge", "vanilla"]
18
- self.kl_weight = kl_weight
19
- self.pixel_weight = pixelloss_weight
20
- self.perceptual_loss = LPAPS().eval()
21
- self.perceptual_weight = perceptual_weight
22
- # output log variance
23
- self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init)
24
-
25
- self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
26
- n_layers=disc_num_layers,
27
- use_actnorm=use_actnorm,
28
- ).apply(weights_init)
29
- self.discriminator_iter_start = disc_start
30
- if disc_loss == "hinge":
31
- self.disc_loss = hinge_d_loss
32
- elif disc_loss == "vanilla":
33
- self.disc_loss = vanilla_d_loss
34
- else:
35
- raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
36
- print(f"LPAPSWithDiscriminator running with {disc_loss} loss.")
37
- self.disc_factor = disc_factor
38
- self.discriminator_weight = disc_weight
39
- self.disc_conditional = disc_conditional
40
-
41
- disc_win_num = 3
42
- mel_disc_hidden_size = 128
43
- self.discriminator_multi = Discriminator(time_lengths=[32, 64, 128][:disc_win_num],
44
- freq_length=80, hidden_size=mel_disc_hidden_size, kernel=(3, 3),
45
- cond_size=0, norm_type="in", reduction="stack")
46
-
47
- def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
48
- if last_layer is not None:
49
- nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
50
- g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
51
- else:
52
- nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
53
- g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
54
-
55
- d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
56
- d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
57
- d_weight = d_weight * self.discriminator_weight
58
- return d_weight
59
-
60
- def forward(self, inputs, reconstructions, posteriors, optimizer_idx,
61
- global_step, last_layer=None, cond=None, split="train", weights=None):
62
- rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
63
- if self.perceptual_weight > 0:
64
- p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
65
- rec_loss = rec_loss + self.perceptual_weight * p_loss
66
- else:
67
- p_loss = torch.tensor([0.0])
68
-
69
- nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
70
- weighted_nll_loss = nll_loss
71
- if weights is not None:
72
- weighted_nll_loss = weights*nll_loss
73
- weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
74
- nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
75
- kl_loss = posteriors.kl()
76
- kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
77
-
78
- # now the GAN part
79
- if optimizer_idx == 0:
80
- # generator update
81
- if cond is None:
82
- assert not self.disc_conditional
83
- logits_fake = self.discriminator(reconstructions.contiguous())
84
- else:
85
- assert self.disc_conditional
86
- logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
87
-
88
- logits_fake_multi = self.discriminator_multi(reconstructions.contiguous().squeeze(1).transpose(1, 2))
89
-
90
- g_loss = -torch.mean(logits_fake)
91
- g_loss_multi = -torch.mean(logits_fake_multi['y'])
92
-
93
- try:
94
- d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
95
- d_weight_multi = self.calculate_adaptive_weight(nll_loss, g_loss_multi, last_layer=last_layer)
96
- except RuntimeError:
97
- assert not self.training
98
- d_weight = d_weight_multi = torch.tensor(0.0)
99
-
100
- disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
101
- loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss + d_weight_multi * disc_factor * g_loss_multi
102
-
103
- log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
104
- "{}/logvar".format(split): self.logvar.detach(),
105
- "{}/kl_loss".format(split): kl_loss.detach().mean(),
106
- "{}/nll_loss".format(split): nll_loss.detach().mean(),
107
- "{}/rec_loss".format(split): rec_loss.detach().mean(),
108
- "{}/d_weight".format(split): d_weight.detach(),
109
- "{}/disc_factor".format(split): torch.tensor(disc_factor),
110
- "{}/g_loss".format(split): g_loss.detach().mean(),
111
- "{}/g_loss_multi".format(split): g_loss_multi.detach().mean(),
112
- }
113
- return loss, log
114
-
115
- if optimizer_idx == 1:
116
- # second pass for discriminator update
117
- if cond is None:
118
- logits_real = self.discriminator(inputs.contiguous().detach())
119
- logits_fake = self.discriminator(reconstructions.contiguous().detach())
120
- else:
121
- logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
122
- logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
123
-
124
- logits_real_multi = self.discriminator_multi(inputs.contiguous().detach().squeeze(1).transpose(1, 2))
125
- logits_fake_multi = self.discriminator_multi(reconstructions.contiguous().detach().squeeze(1).transpose(1, 2))
126
-
127
- disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
128
- d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
129
- d_loss_multi = disc_factor * self.disc_loss(logits_real_multi['y'], logits_fake_multi['y'])
130
-
131
- log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
132
- "{}/disc_loss_multi".format(split): d_loss_multi.clone().detach().mean(),
133
- "{}/logits_real".format(split): logits_real.detach().mean(),
134
- "{}/logits_fake".format(split): logits_fake.detach().mean()
135
- }
136
- return d_loss+d_loss_multi, log
137
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/open_clap/factory.py DELETED
@@ -1,257 +0,0 @@
1
- import json
2
- import logging
3
- import os
4
- import pathlib
5
- import re
6
- from copy import deepcopy
7
- from pathlib import Path
8
-
9
- import torch
10
-
11
- from .model import CLAP, convert_weights_to_fp16
12
- from .openai import load_openai_model
13
- from .pretrained import get_pretrained_url, download_pretrained
14
- from .transform import image_transform
15
-
16
- _MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
17
- _MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
18
-
19
-
20
- def _natural_key(string_):
21
- return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
22
-
23
-
24
- def _rescan_model_configs():
25
- global _MODEL_CONFIGS
26
-
27
- config_ext = (".json",)
28
- config_files = []
29
- for config_path in _MODEL_CONFIG_PATHS:
30
- if config_path.is_file() and config_path.suffix in config_ext:
31
- config_files.append(config_path)
32
- elif config_path.is_dir():
33
- for ext in config_ext:
34
- config_files.extend(config_path.glob(f"*{ext}"))
35
-
36
- for cf in config_files:
37
- with open(cf, "r") as f:
38
- model_cfg = json.load(f)
39
- if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")):
40
- _MODEL_CONFIGS[cf.stem] = model_cfg
41
-
42
- _MODEL_CONFIGS = {
43
- k: v
44
- for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
45
- }
46
-
47
-
48
- _rescan_model_configs() # initial populate of model config registry
49
-
50
-
51
- def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True):
52
- checkpoint = torch.load(checkpoint_path, map_location=map_location)
53
- if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
54
- state_dict = checkpoint["state_dict"]
55
- else:
56
- state_dict = checkpoint
57
- if skip_params:
58
- if next(iter(state_dict.items()))[0].startswith("module"):
59
- state_dict = {k[7:]: v for k, v in state_dict.items()}
60
- # for k in state_dict:
61
- # if k.startswith('transformer'):
62
- # v = state_dict.pop(k)
63
- # state_dict['text_branch.' + k[12:]] = v
64
- return state_dict
65
-
66
-
67
- def create_model(
68
- amodel_name: str,
69
- tmodel_name: str,
70
- pretrained: str = "",
71
- precision: str = "fp32",
72
- device: torch.device = torch.device("cpu"),
73
- jit: bool = False,
74
- force_quick_gelu: bool = False,
75
- openai_model_cache_dir: str = os.path.expanduser("~/.cache/clip"),
76
- skip_params=True,
77
- pretrained_audio: str = "",
78
- pretrained_text: str = "",
79
- enable_fusion: bool = False,
80
- fusion_type: str = 'None'
81
- # pretrained_image: bool = False,
82
- ):
83
- amodel_name = amodel_name.replace(
84
- "/", "-"
85
- ) # for callers using old naming with / in ViT names
86
- pretrained_orig = pretrained
87
- pretrained = pretrained.lower()
88
- if pretrained == "openai":
89
- if amodel_name in _MODEL_CONFIGS:
90
- logging.info(f"Loading {amodel_name} model config.")
91
- model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
92
- else:
93
- logging.error(
94
- f"Model config for {amodel_name} not found; available models {list_models()}."
95
- )
96
- raise RuntimeError(f"Model config for {amodel_name} not found.")
97
-
98
- logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.")
99
- # Hard Code in model name
100
- model_cfg["text_cfg"]["model_type"] = tmodel_name
101
- model = load_openai_model(
102
- "ViT-B-16",
103
- model_cfg,
104
- device=device,
105
- jit=jit,
106
- cache_dir=openai_model_cache_dir,
107
- enable_fusion=enable_fusion,
108
- fusion_type=fusion_type
109
- )
110
- # See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
111
- if precision == "amp" or precision == "fp32":
112
- model = model.float()
113
- else:
114
- if amodel_name in _MODEL_CONFIGS:
115
- logging.info(f"Loading {amodel_name} model config.")
116
- model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
117
- else:
118
- logging.error(
119
- f"Model config for {amodel_name} not found; available models {list_models()}."
120
- )
121
- raise RuntimeError(f"Model config for {amodel_name} not found.")
122
-
123
- if force_quick_gelu:
124
- # override for use of QuickGELU on non-OpenAI transformer models
125
- model_cfg["quick_gelu"] = True
126
-
127
- # if pretrained_image:
128
- # if 'timm_amodel_name' in model_cfg.get('vision_cfg', {}):
129
- # # pretrained weight loading for timm models set via vision_cfg
130
- # model_cfg['vision_cfg']['timm_model_pretrained'] = True
131
- # else:
132
- # assert False, 'pretrained image towers currently only supported for timm models'
133
- model_cfg["text_cfg"]["model_type"] = tmodel_name
134
- model_cfg["enable_fusion"] = enable_fusion
135
- model_cfg["fusion_type"] = fusion_type
136
- model = CLAP(**model_cfg)
137
-
138
- if pretrained:
139
- checkpoint_path = ""
140
- url = get_pretrained_url(amodel_name, pretrained)
141
- if url:
142
- checkpoint_path = download_pretrained(url, root=openai_model_cache_dir)
143
- elif os.path.exists(pretrained_orig):
144
- checkpoint_path = pretrained_orig
145
- if checkpoint_path:
146
- logging.info(f"Loading pretrained {amodel_name}-{tmodel_name} weights ({pretrained}).")
147
- ckpt = load_state_dict(checkpoint_path, skip_params=True)
148
- model.load_state_dict(ckpt)
149
- param_names = [n for n, p in model.named_parameters()]
150
- for n in param_names:
151
- print(n, "\t", "Loaded" if n in ckpt else "Unloaded")
152
- else:
153
- logging.warning(
154
- f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
155
- )
156
- raise RuntimeError(
157
- f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
158
- )
159
-
160
- if pretrained_audio:
161
- if amodel_name.startswith('PANN'):
162
- if 'Cnn14_mAP' in pretrained_audio: # official checkpoint
163
- audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
164
- audio_ckpt = audio_ckpt['model']
165
- keys = list(audio_ckpt.keys())
166
- for key in keys:
167
- if 'spectrogram_extractor' not in key and 'logmel_extractor' not in key:
168
- v = audio_ckpt.pop(key)
169
- audio_ckpt['audio_branch.' + key] = v
170
- elif os.path.basename(pretrained_audio).startswith('PANN'): # checkpoint trained via HTSAT codebase
171
- audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
172
- audio_ckpt = audio_ckpt['state_dict']
173
- keys = list(audio_ckpt.keys())
174
- for key in keys:
175
- if key.startswith('sed_model'):
176
- v = audio_ckpt.pop(key)
177
- audio_ckpt['audio_branch.' + key[10:]] = v
178
- elif os.path.basename(pretrained_audio).startswith('finetuned'): # checkpoint trained via linear probe codebase
179
- audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
180
- else:
181
- raise ValueError('Unknown audio checkpoint')
182
- elif amodel_name.startswith('HTSAT'):
183
- if 'HTSAT_AudioSet_Saved' in pretrained_audio: # official checkpoint
184
- audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
185
- audio_ckpt = audio_ckpt['state_dict']
186
- keys = list(audio_ckpt.keys())
187
- for key in keys:
188
- if key.startswith('sed_model') and ('spectrogram_extractor' not in key
189
- and 'logmel_extractor' not in key):
190
- v = audio_ckpt.pop(key)
191
- audio_ckpt['audio_branch.' + key[10:]] = v
192
- elif os.path.basename(pretrained_audio).startswith('HTSAT'): # checkpoint trained via HTSAT codebase
193
- audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
194
- audio_ckpt = audio_ckpt['state_dict']
195
- keys = list(audio_ckpt.keys())
196
- for key in keys:
197
- if key.startswith('sed_model'):
198
- v = audio_ckpt.pop(key)
199
- audio_ckpt['audio_branch.' + key[10:]] = v
200
- elif os.path.basename(pretrained_audio).startswith('finetuned'): # checkpoint trained via linear probe codebase
201
- audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
202
- else:
203
- raise ValueError('Unknown audio checkpoint')
204
- else:
205
- raise f'this audio encoder pretrained checkpoint is not support'
206
-
207
- model.load_state_dict(audio_ckpt, strict=False)
208
- logging.info(f"Loading pretrained {amodel_name} weights ({pretrained_audio}).")
209
- param_names = [n for n, p in model.named_parameters()]
210
- for n in param_names:
211
- print(n, "\t", "Loaded" if n in audio_ckpt else "Unloaded")
212
-
213
- model.to(device=device)
214
- if precision == "fp16":
215
- assert device.type != "cpu"
216
- convert_weights_to_fp16(model)
217
-
218
- if jit:
219
- model = torch.jit.script(model)
220
-
221
- return model, model_cfg
222
-
223
-
224
- def create_model_and_transforms(
225
- model_name: str,
226
- pretrained: str = "",
227
- precision: str = "fp32",
228
- device: torch.device = torch.device("cpu"),
229
- jit: bool = False,
230
- force_quick_gelu: bool = False,
231
- # pretrained_image: bool = False,
232
- ):
233
- model = create_model(
234
- model_name,
235
- pretrained,
236
- precision,
237
- device,
238
- jit,
239
- force_quick_gelu=force_quick_gelu,
240
- # pretrained_image=pretrained_image
241
- )
242
- preprocess_train = image_transform(model.visual.image_size, is_train=True)
243
- preprocess_val = image_transform(model.visual.image_size, is_train=False)
244
- return model, preprocess_train, preprocess_val
245
-
246
-
247
- def list_models():
248
- """enumerate available model architectures based on config files"""
249
- return list(_MODEL_CONFIGS.keys())
250
-
251
-
252
- def add_model_config(path):
253
- """add model config path or file and update registry"""
254
- if not isinstance(path, Path):
255
- path = Path(path)
256
- _MODEL_CONFIG_PATHS.append(path)
257
- _rescan_model_configs()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AILab-CVC/SEED-LLaMA/models/seed_qformer/vit.py DELETED
@@ -1,395 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
-
7
- Based on timm code base
8
- https://github.com/rwightman/pytorch-image-models/tree/master/timm
9
- """
10
-
11
- import math
12
- import torch
13
- import torch.nn as nn
14
- import torch.nn.functional as F
15
- from functools import partial
16
-
17
- from timm.models.vision_transformer import _cfg, PatchEmbed
18
- from timm.models.registry import register_model
19
- from timm.models.layers import trunc_normal_, DropPath
20
- from timm.models.helpers import named_apply, adapt_input_conv
21
-
22
-
23
- class Mlp(nn.Module):
24
- """MLP as used in Vision Transformer, MLP-Mixer and related networks"""
25
- def __init__(
26
- self,
27
- in_features,
28
- hidden_features=None,
29
- out_features=None,
30
- act_layer=nn.GELU,
31
- drop=0.0,
32
- ):
33
- super().__init__()
34
- out_features = out_features or in_features
35
- hidden_features = hidden_features or in_features
36
- self.fc1 = nn.Linear(in_features, hidden_features)
37
- self.act = act_layer()
38
- self.fc2 = nn.Linear(hidden_features, out_features)
39
- self.drop = nn.Dropout(drop)
40
-
41
- def forward(self, x):
42
- x = self.fc1(x)
43
- x = self.act(x)
44
- x = self.drop(x)
45
- x = self.fc2(x)
46
- x = self.drop(x)
47
- return x
48
-
49
-
50
- class Attention(nn.Module):
51
- def __init__(
52
- self,
53
- dim,
54
- num_heads=8,
55
- qkv_bias=False,
56
- qk_scale=None,
57
- attn_drop=0.0,
58
- proj_drop=0.0,
59
- ):
60
- super().__init__()
61
- self.num_heads = num_heads
62
- head_dim = dim // num_heads
63
- # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
64
- self.scale = qk_scale or head_dim**-0.5
65
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
66
- self.attn_drop = nn.Dropout(attn_drop)
67
- self.proj = nn.Linear(dim, dim)
68
- self.proj_drop = nn.Dropout(proj_drop)
69
- self.attn_gradients = None
70
- self.attention_map = None
71
-
72
- def save_attn_gradients(self, attn_gradients):
73
- self.attn_gradients = attn_gradients
74
-
75
- def get_attn_gradients(self):
76
- return self.attn_gradients
77
-
78
- def save_attention_map(self, attention_map):
79
- self.attention_map = attention_map
80
-
81
- def get_attention_map(self):
82
- return self.attention_map
83
-
84
- def forward(self, x, register_hook=False):
85
- B, N, C = x.shape
86
- qkv = (self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4))
87
- q, k, v = (
88
- qkv[0],
89
- qkv[1],
90
- qkv[2],
91
- ) # make torchscript happy (cannot use tensor as tuple)
92
-
93
- attn = (q @ k.transpose(-2, -1)) * self.scale
94
- attn = attn.softmax(dim=-1)
95
- attn = self.attn_drop(attn)
96
-
97
- if register_hook:
98
- self.save_attention_map(attn)
99
- attn.register_hook(self.save_attn_gradients)
100
-
101
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
102
- x = self.proj(x)
103
- x = self.proj_drop(x)
104
- return x
105
-
106
-
107
- class Block(nn.Module):
108
- def __init__(
109
- self,
110
- dim,
111
- num_heads,
112
- mlp_ratio=4.0,
113
- qkv_bias=False,
114
- qk_scale=None,
115
- drop=0.0,
116
- attn_drop=0.0,
117
- drop_path=0.0,
118
- act_layer=nn.GELU,
119
- norm_layer=nn.LayerNorm,
120
- use_grad_checkpointing=False,
121
- ):
122
- super().__init__()
123
- self.norm1 = norm_layer(dim)
124
- self.attn = Attention(
125
- dim,
126
- num_heads=num_heads,
127
- qkv_bias=qkv_bias,
128
- qk_scale=qk_scale,
129
- attn_drop=attn_drop,
130
- proj_drop=drop,
131
- )
132
- # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
133
- self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
134
- self.norm2 = norm_layer(dim)
135
- mlp_hidden_dim = int(dim * mlp_ratio)
136
- self.mlp = Mlp(
137
- in_features=dim,
138
- hidden_features=mlp_hidden_dim,
139
- act_layer=act_layer,
140
- drop=drop,
141
- )
142
-
143
- # if use_grad_checkpointing:
144
- # self.attn = checkpoint_wrapper(self.attn)
145
- # self.mlp = checkpoint_wrapper(self.mlp)
146
-
147
- def forward(self, x, register_hook=False):
148
- x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook))
149
- x = x + self.drop_path(self.mlp(self.norm2(x)))
150
- return x
151
-
152
-
153
- class VisionTransformer(nn.Module):
154
- """Vision Transformer
155
- A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
156
- https://arxiv.org/abs/2010.11929
157
- """
158
- def __init__(
159
- self,
160
- img_size=224,
161
- patch_size=16,
162
- in_chans=3,
163
- num_classes=1000,
164
- embed_dim=768,
165
- depth=12,
166
- num_heads=12,
167
- mlp_ratio=4.0,
168
- qkv_bias=True,
169
- qk_scale=None,
170
- representation_size=None,
171
- drop_rate=0.0,
172
- attn_drop_rate=0.0,
173
- drop_path_rate=0.0,
174
- norm_layer=None,
175
- use_grad_checkpointing=False,
176
- ckpt_layer=0,
177
- ):
178
- """
179
- Args:
180
- img_size (int, tuple): input image size
181
- patch_size (int, tuple): patch size
182
- in_chans (int): number of input channels
183
- num_classes (int): number of classes for classification head
184
- embed_dim (int): embedding dimension
185
- depth (int): depth of transformer
186
- num_heads (int): number of attention heads
187
- mlp_ratio (int): ratio of mlp hidden dim to embedding dim
188
- qkv_bias (bool): enable bias for qkv if True
189
- qk_scale (float): override default qk scale of head_dim ** -0.5 if set
190
- representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
191
- drop_rate (float): dropout rate
192
- attn_drop_rate (float): attention dropout rate
193
- drop_path_rate (float): stochastic depth rate
194
- norm_layer: (nn.Module): normalization layer
195
- """
196
- super().__init__()
197
- self.num_features = (self.embed_dim) = embed_dim # num_features for consistency with other models
198
- norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
199
-
200
- self.patch_embed = PatchEmbed(
201
- img_size=img_size,
202
- patch_size=patch_size,
203
- in_chans=in_chans,
204
- embed_dim=embed_dim,
205
- )
206
-
207
- num_patches = self.patch_embed.num_patches
208
-
209
- self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
210
- self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
211
- self.pos_drop = nn.Dropout(p=drop_rate)
212
-
213
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
214
- self.blocks = nn.ModuleList([
215
- Block(
216
- dim=embed_dim,
217
- num_heads=num_heads,
218
- mlp_ratio=mlp_ratio,
219
- qkv_bias=qkv_bias,
220
- qk_scale=qk_scale,
221
- drop=drop_rate,
222
- attn_drop=attn_drop_rate,
223
- drop_path=dpr[i],
224
- norm_layer=norm_layer,
225
- use_grad_checkpointing=(use_grad_checkpointing and i >= depth - ckpt_layer),
226
- ) for i in range(depth)
227
- ])
228
- self.norm = norm_layer(embed_dim)
229
-
230
- trunc_normal_(self.pos_embed, std=0.02)
231
- trunc_normal_(self.cls_token, std=0.02)
232
- self.apply(self._init_weights)
233
-
234
- def _init_weights(self, m):
235
- if isinstance(m, nn.Linear):
236
- trunc_normal_(m.weight, std=0.02)
237
- if isinstance(m, nn.Linear) and m.bias is not None:
238
- nn.init.constant_(m.bias, 0)
239
- elif isinstance(m, nn.LayerNorm):
240
- nn.init.constant_(m.bias, 0)
241
- nn.init.constant_(m.weight, 1.0)
242
-
243
- @torch.jit.ignore
244
- def no_weight_decay(self):
245
- return {"pos_embed", "cls_token"}
246
-
247
- def forward(self, x, register_blk=-1):
248
- B = x.shape[0]
249
- x = self.patch_embed(x)
250
-
251
- cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
252
- x = torch.cat((cls_tokens, x), dim=1)
253
-
254
- x = x + self.pos_embed[:, :x.size(1), :]
255
- x = self.pos_drop(x)
256
-
257
- for i, blk in enumerate(self.blocks):
258
- x = blk(x, register_blk == i)
259
- x = self.norm(x)
260
-
261
- return x
262
-
263
- @torch.jit.ignore()
264
- def load_pretrained(self, checkpoint_path, prefix=""):
265
- _load_weights(self, checkpoint_path, prefix)
266
-
267
-
268
- @torch.no_grad()
269
- def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ""):
270
- """Load weights from .npz checkpoints for official Google Brain Flax implementation"""
271
- import numpy as np
272
-
273
- def _n2p(w, t=True):
274
- if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:
275
- w = w.flatten()
276
- if t:
277
- if w.ndim == 4:
278
- w = w.transpose([3, 2, 0, 1])
279
- elif w.ndim == 3:
280
- w = w.transpose([2, 0, 1])
281
- elif w.ndim == 2:
282
- w = w.transpose([1, 0])
283
- return torch.from_numpy(w)
284
-
285
- w = np.load(checkpoint_path)
286
- if not prefix and "opt/target/embedding/kernel" in w:
287
- prefix = "opt/target/"
288
-
289
- if hasattr(model.patch_embed, "backbone"):
290
- # hybrid
291
- backbone = model.patch_embed.backbone
292
- stem_only = not hasattr(backbone, "stem")
293
- stem = backbone if stem_only else backbone.stem
294
- stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f"{prefix}conv_root/kernel"])))
295
- stem.norm.weight.copy_(_n2p(w[f"{prefix}gn_root/scale"]))
296
- stem.norm.bias.copy_(_n2p(w[f"{prefix}gn_root/bias"]))
297
- if not stem_only:
298
- for i, stage in enumerate(backbone.stages):
299
- for j, block in enumerate(stage.blocks):
300
- bp = f"{prefix}block{i + 1}/unit{j + 1}/"
301
- for r in range(3):
302
- getattr(block, f"conv{r + 1}").weight.copy_(_n2p(w[f"{bp}conv{r + 1}/kernel"]))
303
- getattr(block, f"norm{r + 1}").weight.copy_(_n2p(w[f"{bp}gn{r + 1}/scale"]))
304
- getattr(block, f"norm{r + 1}").bias.copy_(_n2p(w[f"{bp}gn{r + 1}/bias"]))
305
- if block.downsample is not None:
306
- block.downsample.conv.weight.copy_(_n2p(w[f"{bp}conv_proj/kernel"]))
307
- block.downsample.norm.weight.copy_(_n2p(w[f"{bp}gn_proj/scale"]))
308
- block.downsample.norm.bias.copy_(_n2p(w[f"{bp}gn_proj/bias"]))
309
- embed_conv_w = _n2p(w[f"{prefix}embedding/kernel"])
310
- else:
311
- embed_conv_w = adapt_input_conv(model.patch_embed.proj.weight.shape[1], _n2p(w[f"{prefix}embedding/kernel"]))
312
- model.patch_embed.proj.weight.copy_(embed_conv_w)
313
- model.patch_embed.proj.bias.copy_(_n2p(w[f"{prefix}embedding/bias"]))
314
- model.cls_token.copy_(_n2p(w[f"{prefix}cls"], t=False))
315
- pos_embed_w = _n2p(w[f"{prefix}Transformer/posembed_input/pos_embedding"], t=False)
316
- if pos_embed_w.shape != model.pos_embed.shape:
317
- pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights
318
- pos_embed_w,
319
- model.pos_embed,
320
- getattr(model, "num_tokens", 1),
321
- model.patch_embed.grid_size,
322
- )
323
- model.pos_embed.copy_(pos_embed_w)
324
- model.norm.weight.copy_(_n2p(w[f"{prefix}Transformer/encoder_norm/scale"]))
325
- model.norm.bias.copy_(_n2p(w[f"{prefix}Transformer/encoder_norm/bias"]))
326
- # if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:
327
- # model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))
328
- # model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))
329
- # if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:
330
- # model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))
331
- # model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))
332
- for i, block in enumerate(model.blocks.children()):
333
- block_prefix = f"{prefix}Transformer/encoderblock_{i}/"
334
- mha_prefix = block_prefix + "MultiHeadDotProductAttention_1/"
335
- block.norm1.weight.copy_(_n2p(w[f"{block_prefix}LayerNorm_0/scale"]))
336
- block.norm1.bias.copy_(_n2p(w[f"{block_prefix}LayerNorm_0/bias"]))
337
- block.attn.qkv.weight.copy_(
338
- torch.cat([_n2p(w[f"{mha_prefix}{n}/kernel"], t=False).flatten(1).T for n in ("query", "key", "value")]))
339
- block.attn.qkv.bias.copy_(
340
- torch.cat([_n2p(w[f"{mha_prefix}{n}/bias"], t=False).reshape(-1) for n in ("query", "key", "value")]))
341
- block.attn.proj.weight.copy_(_n2p(w[f"{mha_prefix}out/kernel"]).flatten(1))
342
- block.attn.proj.bias.copy_(_n2p(w[f"{mha_prefix}out/bias"]))
343
- for r in range(2):
344
- getattr(block.mlp, f"fc{r + 1}").weight.copy_(_n2p(w[f"{block_prefix}MlpBlock_3/Dense_{r}/kernel"]))
345
- getattr(block.mlp, f"fc{r + 1}").bias.copy_(_n2p(w[f"{block_prefix}MlpBlock_3/Dense_{r}/bias"]))
346
- block.norm2.weight.copy_(_n2p(w[f"{block_prefix}LayerNorm_2/scale"]))
347
- block.norm2.bias.copy_(_n2p(w[f"{block_prefix}LayerNorm_2/bias"]))
348
-
349
-
350
- def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):
351
- # Rescale the grid of position embeddings when loading from state_dict. Adapted from
352
- # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
353
- print("Resized position embedding: %s to %s", posemb.shape, posemb_new.shape)
354
- ntok_new = posemb_new.shape[1]
355
- if num_tokens:
356
- posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
357
- ntok_new -= num_tokens
358
- else:
359
- posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
360
- gs_old = int(math.sqrt(len(posemb_grid)))
361
- if not len(gs_new): # backwards compatibility
362
- gs_new = [int(math.sqrt(ntok_new))] * 2
363
- assert len(gs_new) >= 2
364
- print("Position embedding grid-size from %s to %s", [gs_old, gs_old], gs_new)
365
- posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
366
- posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode="bicubic", align_corners=False)
367
- posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)
368
- posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
369
- return
370
-
371
-
372
- def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
373
- # interpolate position embedding
374
- embedding_size = pos_embed_checkpoint.shape[-1]
375
- num_patches = visual_encoder.patch_embed.num_patches
376
- num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches
377
- # height (== width) for the checkpoint position embedding
378
- orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens)**0.5)
379
- # height (== width) for the new position embedding
380
- new_size = int(num_patches**0.5)
381
-
382
- if orig_size != new_size:
383
- # class_token and dist_token are kept unchanged
384
- extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
385
- # only the position tokens are interpolated
386
- pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
387
- pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
388
- pos_tokens = torch.nn.functional.interpolate(pos_tokens, size=(new_size, new_size), mode="bicubic", align_corners=False)
389
- pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
390
- new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
391
- print("reshape position embedding from %d to %d" % (orig_size**2, new_size**2))
392
-
393
- return new_pos_embed
394
- else:
395
- return pos_embed_checkpoint
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhaykoul/Merriam-webster_clone/app.py DELETED
@@ -1,39 +0,0 @@
1
- import streamlit as st
2
- import requests
3
- from bs4 import BeautifulSoup
4
-
5
- st.set_page_config(page_title="Word Definition App")
6
-
7
- def fetch_definition(word):
8
- base_url = f"https://www.merriam-webster.com/dictionary/{word}"
9
-
10
- response = requests.get(base_url)
11
-
12
- if response.status_code == 200:
13
- soup = BeautifulSoup(response.text, 'html.parser')
14
- definition_span = soup.find("span", class_="dtText")
15
-
16
- if definition_span:
17
- full_definition = definition_span.get_text()
18
- sentences = full_definition.split('. ')
19
- limited_definition = '. '.join(sentences[:3])
20
- return limited_definition
21
- else:
22
- return "Definition not found."
23
- else:
24
- return "Word not found or unable to retrieve data."
25
-
26
- def main():
27
- st.title("Word Definition App")
28
- word = st.text_input("Enter a word:")
29
-
30
- if word.lower() == 'quit':
31
- st.warning("You entered 'quit'. The app will not quit as this is a web application.")
32
- else:
33
- if st.button("Get Definition"):
34
- definition = fetch_definition(word)
35
- st.write("Definition:")
36
- st.write(definition)
37
-
38
- if __name__ == '__main__':
39
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/ChatgptDemo.py DELETED
@@ -1,61 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import time, json, re
4
- from aiohttp import ClientSession
5
- from typing import AsyncGenerator
6
-
7
- from .base_provider import AsyncGeneratorProvider
8
- from .helper import format_prompt
9
-
10
- class ChatgptDemo(AsyncGeneratorProvider):
11
- url = "https://chat.chatgptdemo.net"
12
- supports_gpt_35_turbo = True
13
- working = True
14
-
15
- @classmethod
16
- async def create_async_generator(
17
- cls,
18
- model: str,
19
- messages: list[dict[str, str]],
20
- proxy: str = None,
21
- **kwargs
22
- ) -> AsyncGenerator:
23
- headers = {
24
- "authority": "chat.chatgptdemo.net",
25
- "accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US",
26
- "origin": "https://chat.chatgptdemo.net",
27
- "referer": "https://chat.chatgptdemo.net/",
28
- "sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
29
- "sec-ch-ua-mobile": "?0",
30
- "sec-ch-ua-platform": '"Linux"',
31
- "sec-fetch-dest": "empty",
32
- "sec-fetch-mode": "cors",
33
- "sec-fetch-site": "same-origin",
34
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
35
- }
36
- async with ClientSession(headers=headers) as session:
37
- async with session.get(f"{cls.url}/", proxy=proxy) as response:
38
- response.raise_for_status()
39
- response = await response.text()
40
- result = re.search(r'<div id="USERID" style="display: none">(.*?)<\/div>', response)
41
- if not result:
42
- raise RuntimeError("No user id found")
43
- user_id = result.group(1)
44
- async with session.post(f"{cls.url}/new_chat", json={"user_id": user_id}, proxy=proxy) as response:
45
- response.raise_for_status()
46
- chat_id = (await response.json())["id_"]
47
- if not chat_id:
48
- raise RuntimeError("Could not create new chat")
49
- data = {
50
- "question": format_prompt(messages),
51
- "chat_id": chat_id,
52
- "timestamp": int(time.time()*1000),
53
- }
54
- async with session.post(f"{cls.url}/chat_api_stream", json=data, proxy=proxy) as response:
55
- response.raise_for_status()
56
- async for line in response.content:
57
- if line.startswith(b"data: "):
58
- line = json.loads(line[6:-1])
59
- chunk = line["choices"][0]["delta"].get("content")
60
- if chunk:
61
- yield chunk
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/box/Factory.d.ts DELETED
@@ -1,6 +0,0 @@
1
- import Box from './Box';
2
- import Base from '../base/Base';
3
-
4
- export default function Factory(
5
- config?: Base.IConfig
6
- ): Box;
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/buttons/AddChildMethods.js DELETED
@@ -1,85 +0,0 @@
1
- import Sizer from '../sizer/Sizer.js';
2
- import IsArray from '../../../plugins/utils/object/IsArray.js';
3
-
4
- const SizerAdd = Sizer.prototype.add;
5
- const SizerAddSpace = Sizer.prototype.addSpace;
6
-
7
- var Add = function (gameObject) {
8
- var isNormalGameObject = !gameObject.isRexSpace;
9
- var proportion = (!isNormalGameObject || this.buttonsExpand) ? 1 : 0;
10
-
11
- if (this.sizerChildren.length === 0) { // First element
12
- if (isNormalGameObject) {
13
- // Add space at head
14
- var hasHeadSpace = (!this.buttonsExpand) &&
15
- ((this.buttonsAlign === 'right') || (this.buttonsAlign === 'center') || (this.buttonsAlign === 'bottom'));
16
- if (hasHeadSpace) {
17
- SizerAddSpace.call(this);
18
- }
19
-
20
- SizerAdd.call(this,
21
- gameObject,
22
- { proportion: proportion, expand: true }
23
- );
24
-
25
- // Add space at tail
26
- var hasTailSpace = (!this.buttonsExpand) && (this.buttonsAlign === 'center');
27
- if (hasTailSpace) {
28
- SizerAddSpace.call(this);
29
- }
30
- this.hasTailSpace = hasTailSpace;
31
-
32
- } else { // A space
33
- SizerAdd.call(this,
34
- gameObject,
35
- { proportion: proportion, expand: true }
36
- );
37
- this.hasTailSpace = false;
38
-
39
- }
40
-
41
- } else { // Others
42
- if (this.hasTailSpace) {
43
- var lastIndex = this.sizerChildren.length - 1;
44
- SizerAdd.call(this,
45
- gameObject,
46
- { index: lastIndex, proportion: proportion, expand: true }
47
- );
48
-
49
- } else {
50
- SizerAdd.call(this,
51
- gameObject,
52
- { proportion: proportion, expand: true }
53
- );
54
- }
55
-
56
- }
57
-
58
- // Space or other game object as button
59
- if (isNormalGameObject) {
60
- this.buttonGroup.add(gameObject);
61
- }
62
-
63
- return this;
64
- };
65
-
66
- export default {
67
- addButton(gameObject) {
68
- if (IsArray(gameObject)) {
69
- var gameObjects = gameObject;
70
- for (var i = 0, cnt = gameObjects.length; i < cnt; i++) {
71
- Add.call(this, gameObjects[i]);
72
- }
73
- } else {
74
- Add.call(this, gameObject);
75
- }
76
- return this;
77
- },
78
-
79
- addButtons(gameObjects) {
80
- for (var i = 0, cnt = gameObjects.length; i < cnt; i++) {
81
- Add.call(this, gameObjects[i]);
82
- }
83
- return this;
84
- }
85
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/flip/Factory.d.ts DELETED
@@ -1,7 +0,0 @@
1
- // import * as Phaser from 'phaser';
2
- import Flip from "./Flip";
3
-
4
- export default function (
5
- gameObject: Phaser.GameObjects.GameObject,
6
- config?: Flip.IConfig
7
- ): Flip;
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/shake/Shake.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import Shake from '../../../plugins/shakeposition';
2
- export default Shake;
 
 
 
spaces/AiMimicry/sovits-models/modules/commons.py DELETED
@@ -1,188 +0,0 @@
1
- import math
2
- import numpy as np
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
- def slice_pitch_segments(x, ids_str, segment_size=4):
8
- ret = torch.zeros_like(x[:, :segment_size])
9
- for i in range(x.size(0)):
10
- idx_str = ids_str[i]
11
- idx_end = idx_str + segment_size
12
- ret[i] = x[i, idx_str:idx_end]
13
- return ret
14
-
15
- def rand_slice_segments_with_pitch(x, pitch, x_lengths=None, segment_size=4):
16
- b, d, t = x.size()
17
- if x_lengths is None:
18
- x_lengths = t
19
- ids_str_max = x_lengths - segment_size + 1
20
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
21
- ret = slice_segments(x, ids_str, segment_size)
22
- ret_pitch = slice_pitch_segments(pitch, ids_str, segment_size)
23
- return ret, ret_pitch, ids_str
24
-
25
- def init_weights(m, mean=0.0, std=0.01):
26
- classname = m.__class__.__name__
27
- if classname.find("Conv") != -1:
28
- m.weight.data.normal_(mean, std)
29
-
30
-
31
- def get_padding(kernel_size, dilation=1):
32
- return int((kernel_size*dilation - dilation)/2)
33
-
34
-
35
- def convert_pad_shape(pad_shape):
36
- l = pad_shape[::-1]
37
- pad_shape = [item for sublist in l for item in sublist]
38
- return pad_shape
39
-
40
-
41
- def intersperse(lst, item):
42
- result = [item] * (len(lst) * 2 + 1)
43
- result[1::2] = lst
44
- return result
45
-
46
-
47
- def kl_divergence(m_p, logs_p, m_q, logs_q):
48
- """KL(P||Q)"""
49
- kl = (logs_q - logs_p) - 0.5
50
- kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
51
- return kl
52
-
53
-
54
- def rand_gumbel(shape):
55
- """Sample from the Gumbel distribution, protect from overflows."""
56
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
57
- return -torch.log(-torch.log(uniform_samples))
58
-
59
-
60
- def rand_gumbel_like(x):
61
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
62
- return g
63
-
64
-
65
- def slice_segments(x, ids_str, segment_size=4):
66
- ret = torch.zeros_like(x[:, :, :segment_size])
67
- for i in range(x.size(0)):
68
- idx_str = ids_str[i]
69
- idx_end = idx_str + segment_size
70
- ret[i] = x[i, :, idx_str:idx_end]
71
- return ret
72
-
73
-
74
- def rand_slice_segments(x, x_lengths=None, segment_size=4):
75
- b, d, t = x.size()
76
- if x_lengths is None:
77
- x_lengths = t
78
- ids_str_max = x_lengths - segment_size + 1
79
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
80
- ret = slice_segments(x, ids_str, segment_size)
81
- return ret, ids_str
82
-
83
-
84
- def rand_spec_segments(x, x_lengths=None, segment_size=4):
85
- b, d, t = x.size()
86
- if x_lengths is None:
87
- x_lengths = t
88
- ids_str_max = x_lengths - segment_size
89
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
90
- ret = slice_segments(x, ids_str, segment_size)
91
- return ret, ids_str
92
-
93
-
94
- def get_timing_signal_1d(
95
- length, channels, min_timescale=1.0, max_timescale=1.0e4):
96
- position = torch.arange(length, dtype=torch.float)
97
- num_timescales = channels // 2
98
- log_timescale_increment = (
99
- math.log(float(max_timescale) / float(min_timescale)) /
100
- (num_timescales - 1))
101
- inv_timescales = min_timescale * torch.exp(
102
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
103
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
104
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
105
- signal = F.pad(signal, [0, 0, 0, channels % 2])
106
- signal = signal.view(1, channels, length)
107
- return signal
108
-
109
-
110
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
111
- b, channels, length = x.size()
112
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
113
- return x + signal.to(dtype=x.dtype, device=x.device)
114
-
115
-
116
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
117
- b, channels, length = x.size()
118
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
119
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
120
-
121
-
122
- def subsequent_mask(length):
123
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
124
- return mask
125
-
126
-
127
- @torch.jit.script
128
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
129
- n_channels_int = n_channels[0]
130
- in_act = input_a + input_b
131
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
132
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
133
- acts = t_act * s_act
134
- return acts
135
-
136
-
137
- def convert_pad_shape(pad_shape):
138
- l = pad_shape[::-1]
139
- pad_shape = [item for sublist in l for item in sublist]
140
- return pad_shape
141
-
142
-
143
- def shift_1d(x):
144
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
145
- return x
146
-
147
-
148
- def sequence_mask(length, max_length=None):
149
- if max_length is None:
150
- max_length = length.max()
151
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
152
- return x.unsqueeze(0) < length.unsqueeze(1)
153
-
154
-
155
- def generate_path(duration, mask):
156
- """
157
- duration: [b, 1, t_x]
158
- mask: [b, 1, t_y, t_x]
159
- """
160
- device = duration.device
161
-
162
- b, _, t_y, t_x = mask.shape
163
- cum_duration = torch.cumsum(duration, -1)
164
-
165
- cum_duration_flat = cum_duration.view(b * t_x)
166
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
167
- path = path.view(b, t_x, t_y)
168
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
169
- path = path.unsqueeze(1).transpose(2,3) * mask
170
- return path
171
-
172
-
173
- def clip_grad_value_(parameters, clip_value, norm_type=2):
174
- if isinstance(parameters, torch.Tensor):
175
- parameters = [parameters]
176
- parameters = list(filter(lambda p: p.grad is not None, parameters))
177
- norm_type = float(norm_type)
178
- if clip_value is not None:
179
- clip_value = float(clip_value)
180
-
181
- total_norm = 0
182
- for p in parameters:
183
- param_norm = p.grad.data.norm(norm_type)
184
- total_norm += param_norm.item() ** norm_type
185
- if clip_value is not None:
186
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
187
- total_norm = total_norm ** (1. / norm_type)
188
- return total_norm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aki004/herta-so-vits/vdecoder/nsf_hifigan/env.py DELETED
@@ -1,15 +0,0 @@
1
- import os
2
- import shutil
3
-
4
-
5
- class AttrDict(dict):
6
- def __init__(self, *args, **kwargs):
7
- super(AttrDict, self).__init__(*args, **kwargs)
8
- self.__dict__ = self
9
-
10
-
11
- def build_env(config, config_name, path):
12
- t_path = os.path.join(path, config_name)
13
- if config != t_path:
14
- os.makedirs(path, exist_ok=True)
15
- shutil.copyfile(config, os.path.join(path, config_name))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexReverie/ImageSonification/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: ImageSonification
3
- emoji: 🐨
4
- colorFrom: purple
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.23.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/models/base_model.py DELETED
@@ -1,316 +0,0 @@
1
- """This script defines the base network model for Deep3DFaceRecon_pytorch
2
- """
3
-
4
- import os
5
- import numpy as np
6
- import torch
7
- from collections import OrderedDict
8
- from abc import ABC, abstractmethod
9
- from . import networks
10
-
11
-
12
- class BaseModel(ABC):
13
- """This class is an abstract base class (ABC) for models.
14
- To create a subclass, you need to implement the following five functions:
15
- -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
16
- -- <set_input>: unpack data from dataset and apply preprocessing.
17
- -- <forward>: produce intermediate results.
18
- -- <optimize_parameters>: calculate losses, gradients, and update network weights.
19
- -- <modify_commandline_options>: (optionally) add model-specific options and set default options.
20
- """
21
-
22
- def __init__(self, opt):
23
- """Initialize the BaseModel class.
24
-
25
- Parameters:
26
- opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
27
-
28
- When creating your custom class, you need to implement your own initialization.
29
- In this fucntion, you should first call <BaseModel.__init__(self, opt)>
30
- Then, you need to define four lists:
31
- -- self.loss_names (str list): specify the training losses that you want to plot and save.
32
- -- self.model_names (str list): specify the images that you want to display and save.
33
- -- self.visual_names (str list): define networks used in our training.
34
- -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
35
- """
36
- self.opt = opt
37
- self.isTrain = False
38
- self.device = torch.device('cpu')
39
- self.save_dir = " " # os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
40
- self.loss_names = []
41
- self.model_names = []
42
- self.visual_names = []
43
- self.parallel_names = []
44
- self.optimizers = []
45
- self.image_paths = []
46
- self.metric = 0 # used for learning rate policy 'plateau'
47
-
48
- @staticmethod
49
- def dict_grad_hook_factory(add_func=lambda x: x):
50
- saved_dict = dict()
51
-
52
- def hook_gen(name):
53
- def grad_hook(grad):
54
- saved_vals = add_func(grad)
55
- saved_dict[name] = saved_vals
56
- return grad_hook
57
- return hook_gen, saved_dict
58
-
59
- @staticmethod
60
- def modify_commandline_options(parser, is_train):
61
- """Add new model-specific options, and rewrite default values for existing options.
62
-
63
- Parameters:
64
- parser -- original option parser
65
- is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
66
-
67
- Returns:
68
- the modified parser.
69
- """
70
- return parser
71
-
72
- @abstractmethod
73
- def set_input(self, input):
74
- """Unpack input data from the dataloader and perform necessary pre-processing steps.
75
-
76
- Parameters:
77
- input (dict): includes the data itself and its metadata information.
78
- """
79
- pass
80
-
81
- @abstractmethod
82
- def forward(self):
83
- """Run forward pass; called by both functions <optimize_parameters> and <test>."""
84
- pass
85
-
86
- @abstractmethod
87
- def optimize_parameters(self):
88
- """Calculate losses, gradients, and update network weights; called in every training iteration"""
89
- pass
90
-
91
- def setup(self, opt):
92
- """Load and print networks; create schedulers
93
-
94
- Parameters:
95
- opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
96
- """
97
- if self.isTrain:
98
- self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
99
-
100
- if not self.isTrain or opt.continue_train:
101
- load_suffix = opt.epoch
102
- self.load_networks(load_suffix)
103
-
104
-
105
- # self.print_networks(opt.verbose)
106
-
107
- def parallelize(self, convert_sync_batchnorm=True):
108
- if not self.opt.use_ddp:
109
- for name in self.parallel_names:
110
- if isinstance(name, str):
111
- module = getattr(self, name)
112
- setattr(self, name, module.to(self.device))
113
- else:
114
- for name in self.model_names:
115
- if isinstance(name, str):
116
- module = getattr(self, name)
117
- if convert_sync_batchnorm:
118
- module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module)
119
- setattr(self, name, torch.nn.parallel.DistributedDataParallel(module.to(self.device),
120
- device_ids=[self.device.index],
121
- find_unused_parameters=True, broadcast_buffers=True))
122
-
123
- # DistributedDataParallel is not needed when a module doesn't have any parameter that requires a gradient.
124
- for name in self.parallel_names:
125
- if isinstance(name, str) and name not in self.model_names:
126
- module = getattr(self, name)
127
- setattr(self, name, module.to(self.device))
128
-
129
- # put state_dict of optimizer to gpu device
130
- if self.opt.phase != 'test':
131
- if self.opt.continue_train:
132
- for optim in self.optimizers:
133
- for state in optim.state.values():
134
- for k, v in state.items():
135
- if isinstance(v, torch.Tensor):
136
- state[k] = v.to(self.device)
137
-
138
- def data_dependent_initialize(self, data):
139
- pass
140
-
141
- def train(self):
142
- """Make models train mode"""
143
- for name in self.model_names:
144
- if isinstance(name, str):
145
- net = getattr(self, name)
146
- net.train()
147
-
148
- def eval(self):
149
- """Make models eval mode"""
150
- for name in self.model_names:
151
- if isinstance(name, str):
152
- net = getattr(self, name)
153
- net.eval()
154
-
155
- def test(self):
156
- """Forward function used in test time.
157
-
158
- This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
159
- It also calls <compute_visuals> to produce additional visualization results
160
- """
161
- with torch.no_grad():
162
- self.forward()
163
- self.compute_visuals()
164
-
165
- def compute_visuals(self):
166
- """Calculate additional output images for visdom and HTML visualization"""
167
- pass
168
-
169
- def get_image_paths(self, name='A'):
170
- """ Return image paths that are used to load current data"""
171
- return self.image_paths if name =='A' else self.image_paths_B
172
-
173
- def update_learning_rate(self):
174
- """Update learning rates for all the networks; called at the end of every epoch"""
175
- for scheduler in self.schedulers:
176
- if self.opt.lr_policy == 'plateau':
177
- scheduler.step(self.metric)
178
- else:
179
- scheduler.step()
180
-
181
- lr = self.optimizers[0].param_groups[0]['lr']
182
- print('learning rate = %.7f' % lr)
183
-
184
- def get_current_visuals(self):
185
- """Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
186
- visual_ret = OrderedDict()
187
- for name in self.visual_names:
188
- if isinstance(name, str):
189
- visual_ret[name] = getattr(self, name)[:, :3, ...]
190
- return visual_ret
191
-
192
- def get_current_losses(self):
193
- """Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
194
- errors_ret = OrderedDict()
195
- for name in self.loss_names:
196
- if isinstance(name, str):
197
- errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
198
- return errors_ret
199
-
200
- def save_networks(self, epoch):
201
- """Save all the networks to the disk.
202
-
203
- Parameters:
204
- epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
205
- """
206
- if not os.path.isdir(self.save_dir):
207
- os.makedirs(self.save_dir)
208
-
209
- save_filename = 'epoch_%s.pth' % (epoch)
210
- save_path = os.path.join(self.save_dir, save_filename)
211
-
212
- save_dict = {}
213
- for name in self.model_names:
214
- if isinstance(name, str):
215
- net = getattr(self, name)
216
- if isinstance(net, torch.nn.DataParallel) or isinstance(net,
217
- torch.nn.parallel.DistributedDataParallel):
218
- net = net.module
219
- save_dict[name] = net.state_dict()
220
-
221
-
222
- for i, optim in enumerate(self.optimizers):
223
- save_dict['opt_%02d'%i] = optim.state_dict()
224
-
225
- for i, sched in enumerate(self.schedulers):
226
- save_dict['sched_%02d'%i] = sched.state_dict()
227
-
228
- torch.save(save_dict, save_path)
229
-
230
- def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
231
- """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
232
- key = keys[i]
233
- if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
234
- if module.__class__.__name__.startswith('InstanceNorm') and \
235
- (key == 'running_mean' or key == 'running_var'):
236
- if getattr(module, key) is None:
237
- state_dict.pop('.'.join(keys))
238
- if module.__class__.__name__.startswith('InstanceNorm') and \
239
- (key == 'num_batches_tracked'):
240
- state_dict.pop('.'.join(keys))
241
- else:
242
- self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
243
-
244
- def load_networks(self, epoch):
245
- """Load all the networks from the disk.
246
-
247
- Parameters:
248
- epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
249
- """
250
- if self.opt.isTrain and self.opt.pretrained_name is not None:
251
- load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name)
252
- else:
253
- load_dir = self.save_dir
254
- load_filename = 'epoch_%s.pth' % (epoch)
255
- load_path = os.path.join(load_dir, load_filename)
256
- state_dict = torch.load(load_path, map_location=self.device)
257
- print('loading the model from %s' % load_path)
258
-
259
- for name in self.model_names:
260
- if isinstance(name, str):
261
- net = getattr(self, name)
262
- if isinstance(net, torch.nn.DataParallel):
263
- net = net.module
264
- net.load_state_dict(state_dict[name])
265
-
266
- if self.opt.phase != 'test':
267
- if self.opt.continue_train:
268
- print('loading the optim from %s' % load_path)
269
- for i, optim in enumerate(self.optimizers):
270
- optim.load_state_dict(state_dict['opt_%02d'%i])
271
-
272
- try:
273
- print('loading the sched from %s' % load_path)
274
- for i, sched in enumerate(self.schedulers):
275
- sched.load_state_dict(state_dict['sched_%02d'%i])
276
- except:
277
- print('Failed to load schedulers, set schedulers according to epoch count manually')
278
- for i, sched in enumerate(self.schedulers):
279
- sched.last_epoch = self.opt.epoch_count - 1
280
-
281
-
282
-
283
-
284
- def print_networks(self, verbose):
285
- """Print the total number of parameters in the network and (if verbose) network architecture
286
-
287
- Parameters:
288
- verbose (bool) -- if verbose: print the network architecture
289
- """
290
- print('---------- Networks initialized -------------')
291
- for name in self.model_names:
292
- if isinstance(name, str):
293
- net = getattr(self, name)
294
- num_params = 0
295
- for param in net.parameters():
296
- num_params += param.numel()
297
- if verbose:
298
- print(net)
299
- print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
300
- print('-----------------------------------------------')
301
-
302
- def set_requires_grad(self, nets, requires_grad=False):
303
- """Set requies_grad=Fasle for all the networks to avoid unnecessary computations
304
- Parameters:
305
- nets (network list) -- a list of networks
306
- requires_grad (bool) -- whether the networks require gradients or not
307
- """
308
- if not isinstance(nets, list):
309
- nets = [nets]
310
- for net in nets:
311
- if net is not None:
312
- for param in net.parameters():
313
- param.requires_grad = requires_grad
314
-
315
- def generate_visuals_for_evaluation(self, data, mode):
316
- return {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ame42/rwms/datastore.py DELETED
@@ -1,252 +0,0 @@
1
- # 'dataset' holds the input data for this script
2
- import re
3
- import math
4
- import pandas
5
- from local_utils import date_time_col, time_col, man_col, dur_col, round_to_n, to_sec
6
- import json
7
- import os
8
- import re
9
- import pandas
10
- from local_utils import date_time_col, dur_col, date_col, s1, s2, l1, l2, well_key, flow_key
11
- from local_utils import restructure, try_key, column_matcher, file_matcher, file_matcher2, split_join as spj
12
- id_col = "index"
13
-
14
-
15
- def split_join(dt, info):
16
- joined = []
17
- for i in info:
18
- print(f'\n\nNow working on {i["data-column"]} column\n')
19
- data = dt.drop(axis=1, columns=i["non-columns"])
20
- data.rename(columns={i["data-column"]: man_col}, inplace=True)
21
- data.insert(2, "Well index", [i["well-index"] for x in range(data.shape[0])], True)
22
- print(f"{data.shape[0]} rows before drop and merge")
23
- data.drop_duplicates(inplace=True, subset=[time_col])
24
- i["dataset"].drop_duplicates(inplace=True, subset=[time_col])
25
- print(data.head())
26
- print(i["dataset"].head())
27
- data = data.merge(i["dataset"], how='left', on=[time_col, dur_col])
28
- print(data.head())
29
- print(f"{data.shape[0]} rows after drop and merge")
30
- joined.append(data)
31
-
32
- return pandas.concat(joined, ignore_index=True)
33
-
34
-
35
- def get_22_data():
36
- dataset = pandas.read_csv("input/flowstation.csv")
37
- dataset_1S = pandas.read_csv("input/1S.csv")
38
- dataset_1L = pandas.read_csv("input/1L.csv")
39
- dataset_2S = pandas.read_csv("input/2S.csv")
40
- dataset_2L = pandas.read_csv("input/2L.csv")
41
-
42
- print(dataset.head())
43
-
44
- for dat in [dataset, dataset_1S, dataset_1L, dataset_2L, dataset_2S]:
45
- count = 0
46
- duration = []
47
- times = []
48
- dat.dropna(axis=0, how="any", inplace=True)
49
- dat.reset_index(inplace=True)
50
- print(".", end="\t")
51
-
52
- for datetime in dat[date_time_col]:
53
- date_time = re.sub("\.0(?=\\s)", "", datetime)
54
- datetime_array = date_time.split()
55
- date = datetime_array[0].split("/")
56
-
57
- time_array = datetime_array[1].split(":")
58
-
59
- if datetime_array[2] == "PM" and time_array[0] != "12":
60
- hour = int(time_array[0]) + 12
61
- elif datetime_array[2] == "AM" and time_array[0] == "12":
62
- hour = int(time_array[0]) - 12
63
- else:
64
- hour = time_array[0]
65
-
66
- minutes = time_array[1]
67
- sec = round_to_n(int(time_array[2]), 1)
68
-
69
- if sec == 60:
70
- sec = "00"
71
- minutes = int(minutes) + 1
72
-
73
- if minutes == 60:
74
- minutes = "00"
75
- hour = int(hour) + 1
76
-
77
- if hour == 24:
78
- hour = "00"
79
- date[1] = int(date[1]) + 1
80
-
81
- duration.append(to_sec(hour, minutes, sec))
82
- times.append(f"{hour}:{minutes}:{sec}")
83
- date_time = f"{date[1]}/{date[0]}/{date[2]} {datetime_array[1]} {datetime_array[2]}"
84
-
85
- dat.loc[count, date_time_col] = date_time
86
- count += 1
87
-
88
- dat.insert(1, dur_col, duration, True)
89
- dat.insert(2, time_col, times, True)
90
-
91
- dat.drop(axis=1, columns=["#", date_time_col], inplace=True, errors="ignore")
92
-
93
- info_1S = {
94
- "non-columns": ["index", "NW1L, PSI (LGR S/N: 20705686)", "NW2L, PSI (LGR S/N: 20705686)",
95
- "NW2S, PSI (LGR S/N: 20705686)"],
96
- "data-column": 'NW1S, PSI (LGR S/N: 20705686)',
97
- "well-index": '1S',
98
- "dataset": dataset_1S
99
- }
100
-
101
- info_1L = {
102
- "non-columns": ["index", "NW1S, PSI (LGR S/N: 20705686)", "NW2L, PSI (LGR S/N: 20705686)",
103
- "NW2S, PSI (LGR S/N: 20705686)"],
104
- "data-column": 'NW1L, PSI (LGR S/N: 20705686)',
105
- "well-index": '1L',
106
- "dataset": dataset_1L
107
- }
108
-
109
- info_2S = {
110
- "non-columns": ["index", "NW1S, PSI (LGR S/N: 20705686)", "NW1L, PSI (LGR S/N: 20705686)",
111
- "NW2L, PSI (LGR S/N: 20705686)"],
112
- "data-column": 'NW2S, PSI (LGR S/N: 20705686)',
113
- "well-index": '2S',
114
- "dataset": dataset_2S
115
- }
116
-
117
- info_2L = {
118
- "non-columns": ["index", "NW1S, PSI (LGR S/N: 20705686)", "NW1L, PSI (LGR S/N: 20705686)",
119
- "NW2S, PSI (LGR S/N: 20705686)"],
120
- "data-column": 'NW2L, PSI (LGR S/N: 20705686)',
121
- "well-index": '2L',
122
- "dataset": dataset_2L
123
- }
124
-
125
- dataset = split_join(dataset, [info_1S, info_1L, info_2S, info_2L])
126
- dataset.drop(axis=1, columns=[id_col], inplace=True, errors="ignore")
127
- dataset.insert(0, id_col, [x for x in range(dataset.shape[0])], True)
128
-
129
- return dataset.drop(axis=1, columns="level_0")
130
-
131
-
132
- def get_all_data():
133
- # get the list of data files and load each into a dataframe, put the dataframes in a list called files.
134
- alldata = pandas.read_csv("input/files.csv")
135
- files = []
136
- i = 0
137
- for filepath in alldata["File Path"]:
138
- i += 1
139
- try:
140
- files.append((pandas.read_csv(filepath)))
141
- except UnicodeDecodeError:
142
- os.write(2, bytearray(f"Failed {i} - {filepath}\n", encoding="UTF-8", errors="e"))
143
- raise
144
- print(f"{len(files)} of {alldata.shape[0]} loaded")
145
- # for each dataframe in files, standardize the column names and remove columns with too few values
146
- truth = 0
147
- total = 0
148
- cut_off = 0.4
149
- temp = []
150
- for file, name in zip(files, alldata["Name"]):
151
- for col in file.columns:
152
- total += 1
153
- result = column_matcher(col)
154
- if not result:
155
- file.drop(axis=1, columns=[col], inplace=True)
156
- else:
157
- file.rename(columns={col: result}, inplace=True)
158
- if file[result].isna().sum() / file.shape[0] > cut_off:
159
- truth += 1
160
- file.drop(axis=1, columns=[result], inplace=True)
161
- file.dropna(axis=0, how="any", inplace=True)
162
- temp.append(file)
163
- files = temp
164
- print(f"{truth}/{total} columns dropped due to insufficient data at {cut_off * 100:.0f}% cut-off")
165
- temp = dict()
166
- # restructure data (extract and correct time, remove unnecessary column and add new once)
167
- # remove null rows, drop first row in case of gibberish headers and group dataframes by day in dict
168
- for index, dat, name, path in zip(range(len(files)), files, alldata["Name"], alldata["File Path"]):
169
- try:
170
- count = 0
171
- duration = []
172
- times = []
173
- dates = []
174
- dat.dropna(axis=0, how="any", inplace=True)
175
- dat.drop(0, axis=0, inplace=True, errors="ignore")
176
- dat.reset_index(inplace=True)
177
- print(f"•{index + 1:^4}•", end=" ")
178
- dat = restructure(dat, count, duration, times, dates)
179
-
180
- dat.drop(axis=1, columns=date_time_col, inplace=True, errors="ignore")
181
- print(f"{str(sorted([x for x in dat.columns])):<123} •")
182
- # dat.to_csv(f"output/temp/{name}", index=False)
183
-
184
- if file_matcher(name):
185
- # flowstation file
186
- key = re.split("(?<=\\d{2})(-|_|\\s)(?=flow.*)", string=name.lower(), maxsplit=2)[0][:8]
187
- try_key(temp, key)
188
-
189
- temp[key][flow_key] = dat
190
-
191
- else:
192
- # wellhead file
193
- key = re.split("(?<=\\d{2})(\\s-|-|_|\\s)?(?=(t|\\d[ls]).*)", string=name.lower(), maxsplit=2)[0][:8]
194
- try_key(temp, key)
195
-
196
- try:
197
- temp[key][well_key]
198
- except KeyError:
199
- temp[key][well_key] = []
200
-
201
- temp[key][well_key].append((file_matcher2(name.lower()), dat))
202
-
203
- except KeyError:
204
- print(f"\n\n{path}", flush=True)
205
- print("Columns:", dat.columns, end="\n\n", flush=True)
206
-
207
- return temp
208
-
209
-
210
- def get_conversion_factors():
211
- # get conversion_factors.csv from input in a dataframe
212
- return pandas.read_csv("input/conversion_factors.csv", encoding="UTF-8")
213
- # return dataframe
214
- pass
215
-
216
-
217
- def offset_wells(agg_data, how=None):
218
- # Prepare aggregated data for each well from all days
219
- if how is None:
220
- how = [0, 0, 0, 0]
221
-
222
- data_1S = []
223
- data_2S = []
224
- data_1L = []
225
- data_2L = []
226
- for key in agg_data.keys():
227
- try:
228
- temp = spj(agg_data[key][flow_key], agg_data[key][well_key], how) # returns list of (well id, data) tuples
229
- for t in temp:
230
-
231
- if t[0] == s1:
232
- data_1S.append(t[1])
233
- elif t[0] == s2:
234
- data_2S.append(t[1])
235
- elif t[0] == l1:
236
- data_1L.append(t[1])
237
- elif t[0] == l2:
238
- data_2L.append(t[1])
239
- except KeyError:
240
- pass
241
- data_1L = pandas.concat(data_1L, ignore_index=True)
242
- data_2L = pandas.concat(data_2L, ignore_index=True)
243
- data_1S = pandas.concat(data_1S, ignore_index=True)
244
- data_2S = pandas.concat(data_2S, ignore_index=True)
245
-
246
- data_s = []
247
- for name, data in zip([l1, l2, s1, s2], [data_1L, data_2L, data_1S, data_2S]):
248
- data.drop_duplicates(subset=[dur_col, date_col], inplace=True)
249
- data_s.append(data)
250
- # data.to_csv(f"output/{name}.csv", index=False)
251
-
252
- return pandas.concat(data_s, ignore_index=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/evaluation/experiment_setting_creator.py DELETED
@@ -1,43 +0,0 @@
1
- import glob
2
- import os
3
- from configs import global_config, paths_config, hyperparameters
4
- from scripts.latent_creators.sg2_plus_latent_creator import SG2PlusLatentCreator
5
- from scripts.latent_creators.e4e_latent_creator import E4ELatentCreator
6
- from scripts.run_pti import run_PTI
7
- import pickle
8
- import torch
9
- from utils.models_utils import toogle_grad, load_old_G
10
-
11
-
12
- class ExperimentRunner:
13
-
14
- def __init__(self, run_id=''):
15
- self.images_paths = glob.glob(f'{paths_config.input_data_path}/*')
16
- self.target_paths = glob.glob(f'{paths_config.input_data_path}/*')
17
- self.run_id = run_id
18
- self.sampled_ws = None
19
-
20
- self.old_G = load_old_G()
21
-
22
- toogle_grad(self.old_G, False)
23
-
24
- def run_experiment(self, run_pt, create_other_latents, use_multi_id_training, use_wandb=False):
25
- if run_pt:
26
- self.run_id = run_PTI(self.run_id, use_wandb=use_wandb, use_multi_id_training=use_multi_id_training)
27
- if create_other_latents:
28
- sg2_plus_latent_creator = SG2PlusLatentCreator(use_wandb=use_wandb)
29
- sg2_plus_latent_creator.create_latents()
30
- e4e_latent_creator = E4ELatentCreator(use_wandb=use_wandb)
31
- e4e_latent_creator.create_latents()
32
-
33
- torch.cuda.empty_cache()
34
-
35
- return self.run_id
36
-
37
-
38
- if __name__ == '__main__':
39
- os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
40
- os.environ['CUDA_VISIBLE_DEVICES'] = global_config.cuda_visible_devices
41
-
42
- runner = ExperimentRunner()
43
- runner.run_experiment(True, False, False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/PP_HumanSeg/export_model/download_export_model.py DELETED
@@ -1,44 +0,0 @@
1
- # coding: utf8
2
- # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from paddleseg.utils.download import download_file_and_uncompress
17
- import sys
18
- import os
19
-
20
- LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
21
- TEST_PATH = os.path.join(LOCAL_PATH, "../../../", "test")
22
- sys.path.append(TEST_PATH)
23
-
24
-
25
- model_urls = {
26
- "pphumanseg_lite_portrait_398x224_with_softmax":
27
- "https://paddleseg.bj.bcebos.com/dygraph/ppseg/ppseg_lite_portrait_398x224_with_softmax.tar.gz",
28
- "deeplabv3p_resnet50_os8_humanseg_512x512_100k_with_softmax":
29
- "https://paddleseg.bj.bcebos.com/dygraph/humanseg/export/deeplabv3p_resnet50_os8_humanseg_512x512_100k_with_softmax.zip",
30
- "fcn_hrnetw18_small_v1_humanseg_192x192_with_softmax":
31
- "https://paddleseg.bj.bcebos.com/dygraph/humanseg/export/fcn_hrnetw18_small_v1_humanseg_192x192_with_softmax.zip",
32
- "pphumanseg_lite_generic_humanseg_192x192_with_softmax":
33
- "https://paddleseg.bj.bcebos.com/dygraph/humanseg/export/pphumanseg_lite_generic_192x192_with_softmax.zip",
34
- }
35
-
36
- if __name__ == "__main__":
37
- for model_name, url in model_urls.items():
38
- download_file_and_uncompress(
39
- url=url,
40
- savepath=LOCAL_PATH,
41
- extrapath=LOCAL_PATH,
42
- extraname=model_name)
43
-
44
- print("Export model download success!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/conv2d_gradfix.py DELETED
@@ -1,196 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
4
- #
5
- # NVIDIA CORPORATION and its licensors retain all intellectual property
6
- # and proprietary rights in and to this software, related documentation
7
- # and any modifications thereto. Any use, reproduction, disclosure or
8
- # distribution of this software and related documentation without an express
9
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
10
-
11
- """Custom replacement for `torch.nn.functional.conv2d` that supports
12
- arbitrarily high order gradients with zero performance penalty."""
13
-
14
- import warnings
15
- import contextlib
16
- import torch
17
-
18
- # pylint: disable=redefined-builtin
19
- # pylint: disable=arguments-differ
20
- # pylint: disable=protected-access
21
-
22
- # ----------------------------------------------------------------------------
23
-
24
- # Enable the custom op by setting this to true.
25
- enabled = False
26
- # Forcefully disable computation of gradients with respect to the weights.
27
- weight_gradients_disabled = False
28
-
29
-
30
- @contextlib.contextmanager
31
- def no_weight_gradients():
32
- global weight_gradients_disabled
33
- old = weight_gradients_disabled
34
- weight_gradients_disabled = True
35
- yield
36
- weight_gradients_disabled = old
37
-
38
- # ----------------------------------------------------------------------------
39
-
40
-
41
- def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
42
- if _should_use_custom_op(input):
43
- return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias)
44
- return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
45
-
46
-
47
- def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
48
- if _should_use_custom_op(input):
49
- return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias)
50
- return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation)
51
-
52
- # ----------------------------------------------------------------------------
53
-
54
-
55
- def _should_use_custom_op(input):
56
- assert isinstance(input, torch.Tensor)
57
- if (not enabled) or (not torch.backends.cudnn.enabled):
58
- return False
59
- if input.device.type != 'cuda':
60
- return False
61
- if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']):
62
- return True
63
- warnings.warn(
64
- f'conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d().')
65
- return False
66
-
67
-
68
- def _tuple_of_ints(xs, ndim):
69
- xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim
70
- assert len(xs) == ndim
71
- assert all(isinstance(x, int) for x in xs)
72
- return xs
73
-
74
- # ----------------------------------------------------------------------------
75
-
76
-
77
- _conv2d_gradfix_cache = dict()
78
-
79
-
80
- def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups):
81
- # Parse arguments.
82
- ndim = 2
83
- weight_shape = tuple(weight_shape)
84
- stride = _tuple_of_ints(stride, ndim)
85
- padding = _tuple_of_ints(padding, ndim)
86
- output_padding = _tuple_of_ints(output_padding, ndim)
87
- dilation = _tuple_of_ints(dilation, ndim)
88
-
89
- # Lookup from cache.
90
- key = (transpose, weight_shape, stride, padding,
91
- output_padding, dilation, groups)
92
- if key in _conv2d_gradfix_cache:
93
- return _conv2d_gradfix_cache[key]
94
-
95
- # Validate arguments.
96
- assert groups >= 1
97
- assert len(weight_shape) == ndim + 2
98
- assert all(stride[i] >= 1 for i in range(ndim))
99
- assert all(padding[i] >= 0 for i in range(ndim))
100
- assert all(dilation[i] >= 0 for i in range(ndim))
101
- if not transpose:
102
- assert all(output_padding[i] == 0 for i in range(ndim))
103
- else: # transpose
104
- assert all(0 <= output_padding[i] < max(
105
- stride[i], dilation[i]) for i in range(ndim))
106
-
107
- # Helpers.
108
- common_kwargs = dict(stride=stride, padding=padding,
109
- dilation=dilation, groups=groups)
110
-
111
- def calc_output_padding(input_shape, output_shape):
112
- if transpose:
113
- return [0, 0]
114
- return [
115
- input_shape[i + 2]
116
- - (output_shape[i + 2] - 1) * stride[i]
117
- - (1 - 2 * padding[i])
118
- - dilation[i] * (weight_shape[i + 2] - 1)
119
- for i in range(ndim)
120
- ]
121
-
122
- # Forward & backward.
123
- class Conv2d(torch.autograd.Function):
124
- @staticmethod
125
- def forward(ctx, input, weight, bias):
126
- assert weight.shape == weight_shape
127
- if not transpose:
128
- output = torch.nn.functional.conv2d(
129
- input=input, weight=weight, bias=bias, **common_kwargs)
130
- else: # transpose
131
- output = torch.nn.functional.conv_transpose2d(
132
- input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs)
133
- ctx.save_for_backward(input, weight)
134
- return output
135
-
136
- @staticmethod
137
- def backward(ctx, grad_output):
138
- input, weight = ctx.saved_tensors
139
- grad_input = None
140
- grad_weight = None
141
- grad_bias = None
142
-
143
- if ctx.needs_input_grad[0]:
144
- p = calc_output_padding(
145
- input_shape=input.shape, output_shape=grad_output.shape)
146
- grad_input = _conv2d_gradfix(transpose=(
147
- not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, weight, None)
148
- assert grad_input.shape == input.shape
149
-
150
- if ctx.needs_input_grad[1] and not weight_gradients_disabled:
151
- grad_weight = Conv2dGradWeight.apply(grad_output, input)
152
- assert grad_weight.shape == weight_shape
153
-
154
- if ctx.needs_input_grad[2]:
155
- grad_bias = grad_output.sum([0, 2, 3])
156
-
157
- return grad_input, grad_weight, grad_bias
158
-
159
- # Gradient with respect to the weights.
160
- class Conv2dGradWeight(torch.autograd.Function):
161
- @staticmethod
162
- def forward(ctx, grad_output, input):
163
- op = torch._C._jit_get_operation(
164
- 'aten::cudnn_convolution_backward_weight' if not transpose else 'aten::cudnn_convolution_transpose_backward_weight')
165
- flags = [torch.backends.cudnn.benchmark,
166
- torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32]
167
- grad_weight = op(weight_shape, grad_output, input,
168
- padding, stride, dilation, groups, *flags)
169
- assert grad_weight.shape == weight_shape
170
- ctx.save_for_backward(grad_output, input)
171
- return grad_weight
172
-
173
- @staticmethod
174
- def backward(ctx, grad2_grad_weight):
175
- grad_output, input = ctx.saved_tensors
176
- grad2_grad_output = None
177
- grad2_input = None
178
-
179
- if ctx.needs_input_grad[0]:
180
- grad2_grad_output = Conv2d.apply(
181
- input, grad2_grad_weight, None)
182
- assert grad2_grad_output.shape == grad_output.shape
183
-
184
- if ctx.needs_input_grad[1]:
185
- p = calc_output_padding(
186
- input_shape=input.shape, output_shape=grad_output.shape)
187
- grad2_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape,
188
- output_padding=p, **common_kwargs).apply(grad_output, grad2_grad_weight, None)
189
- assert grad2_input.shape == input.shape
190
-
191
- return grad2_grad_output, grad2_input
192
-
193
- _conv2d_gradfix_cache[key] = Conv2d
194
- return Conv2d
195
-
196
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/QR-code-AI-art-generator/README.md DELETED
@@ -1,15 +0,0 @@
1
- ---
2
- title: QR Code AI Art Generator
3
- emoji: 📱🔲
4
- colorFrom: MediumSeaGreen
5
- colorTo: CornflowerBlue
6
- sdk: gradio
7
- sdk_version: 3.35.2
8
- app_file: app.py
9
- pinned: false
10
- suggested_hardware: t4-medium
11
- startup_duration_timeout: 1h
12
- duplicated_from: huggingface-projects/QR-code-AI-art-generator
13
- ---
14
-
15
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x.py DELETED
@@ -1,43 +0,0 @@
1
- _base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py']
2
- teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa
3
- model = dict(
4
- pretrained='torchvision://resnet101',
5
- teacher_config='configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py',
6
- teacher_ckpt=teacher_ckpt,
7
- backbone=dict(
8
- type='ResNet',
9
- depth=101,
10
- num_stages=4,
11
- out_indices=(0, 1, 2, 3),
12
- frozen_stages=1,
13
- norm_cfg=dict(type='BN', requires_grad=True),
14
- norm_eval=True,
15
- style='pytorch'),
16
- neck=dict(
17
- type='FPN',
18
- in_channels=[256, 512, 1024, 2048],
19
- out_channels=256,
20
- start_level=1,
21
- add_extra_convs='on_output',
22
- num_outs=5))
23
-
24
- lr_config = dict(step=[16, 22])
25
- runner = dict(type='EpochBasedRunner', max_epochs=24)
26
- # multi-scale training
27
- img_norm_cfg = dict(
28
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
29
- train_pipeline = [
30
- dict(type='LoadImageFromFile'),
31
- dict(type='LoadAnnotations', with_bbox=True),
32
- dict(
33
- type='Resize',
34
- img_scale=[(1333, 480), (1333, 800)],
35
- multiscale_mode='range',
36
- keep_ratio=True),
37
- dict(type='RandomFlip', flip_ratio=0.5),
38
- dict(type='Normalize', **img_norm_cfg),
39
- dict(type='Pad', size_divisor=32),
40
- dict(type='DefaultFormatBundle'),
41
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
42
- ]
43
- data = dict(train=dict(pipeline=train_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/yolo/yolov3_d53_320_273e_coco.py DELETED
@@ -1,42 +0,0 @@
1
- _base_ = './yolov3_d53_mstrain-608_273e_coco.py'
2
- # dataset settings
3
- img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)
4
- train_pipeline = [
5
- dict(type='LoadImageFromFile', to_float32=True),
6
- dict(type='LoadAnnotations', with_bbox=True),
7
- dict(type='PhotoMetricDistortion'),
8
- dict(
9
- type='Expand',
10
- mean=img_norm_cfg['mean'],
11
- to_rgb=img_norm_cfg['to_rgb'],
12
- ratio_range=(1, 2)),
13
- dict(
14
- type='MinIoURandomCrop',
15
- min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
16
- min_crop_size=0.3),
17
- dict(type='Resize', img_scale=(320, 320), keep_ratio=True),
18
- dict(type='RandomFlip', flip_ratio=0.5),
19
- dict(type='Normalize', **img_norm_cfg),
20
- dict(type='Pad', size_divisor=32),
21
- dict(type='DefaultFormatBundle'),
22
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
23
- ]
24
- test_pipeline = [
25
- dict(type='LoadImageFromFile'),
26
- dict(
27
- type='MultiScaleFlipAug',
28
- img_scale=(320, 320),
29
- flip=False,
30
- transforms=[
31
- dict(type='Resize', keep_ratio=True),
32
- dict(type='RandomFlip'),
33
- dict(type='Normalize', **img_norm_cfg),
34
- dict(type='Pad', size_divisor=32),
35
- dict(type='ImageToTensor', keys=['img']),
36
- dict(type='Collect', keys=['img'])
37
- ])
38
- ]
39
- data = dict(
40
- train=dict(pipeline=train_pipeline),
41
- val=dict(pipeline=test_pipeline),
42
- test=dict(pipeline=test_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/docs/low_vram.md DELETED
@@ -1,15 +0,0 @@
1
- # Enable Low VRAM Mode
2
-
3
- If you are using 8GB GPU card (or if you want larger batch size), please open "config.py", and then set
4
-
5
- ```python
6
- save_memory = True
7
- ```
8
-
9
- This feature is still being tested - not all graphics cards are guaranteed to succeed.
10
-
11
- But it should be neat as I can diffuse at a batch size of 12 now.
12
-
13
- (prompt "man")
14
-
15
- ![p](../github_page/ram12.jpg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AshutoshPattanayak/LangchainDemo/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: LangchainDemo
3
- emoji: 🐨
4
- colorFrom: gray
5
- colorTo: red
6
- sdk: streamlit
7
- sdk_version: 1.28.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aveygo/AstroSleuth/main.py DELETED
@@ -1,154 +0,0 @@
1
- import math, numpy as np, requests, os, time, warnings, json
2
- from PIL import Image
3
- from importlib import import_module
4
- import torch
5
-
6
- class AstroSleuth():
7
- def __init__(self, tile_size=256, tile_pad=16, wrk_dir="models/", model="astrosleuthv2", force_cpu=False, on_download=None, off_download=None):
8
- # Device selection
9
- self.device = "cpu" if force_cpu else ("cuda" if torch.cuda.is_available() else "cpu")
10
- print(self.device)
11
-
12
- # Check if model name is known
13
- model_src:dict = json.load(open("models.json"))["data"]
14
- assert model in model_src, f"Model {model} not found! Available models: {list(model_src.keys())}"
15
-
16
- # Load model module
17
- module_path = model_src[model]["src"]["module"]
18
-
19
- self.model_module:torch.nn.Module = getattr(
20
- import_module(module_path.split("/")[0]),
21
- module_path.split("/")[1]
22
- )
23
-
24
- # Download model if not available
25
- self.model_pth = os.path.join(wrk_dir, f"{model}/model.pth")
26
- self.download(model_src[model]["src"]["url"], self.model_pth, on_download, off_download)
27
-
28
- self.wrk_dir = wrk_dir
29
- self.progress = None
30
-
31
- # Set tile processing parameters
32
- self.scale = model_src[model]["scale"]
33
- self.tile_size = tile_size
34
- self.tile_pad = tile_pad
35
-
36
- def download(self, src, dst, on_download=None, off_download=None):
37
- if not os.path.exists(dst):
38
- os.makedirs(os.path.dirname(dst), exist_ok=True)
39
-
40
- if on_download is not None:
41
- on_download()
42
-
43
- with open(dst, 'wb') as f:
44
- f.write(requests.get(src, allow_redirects=True, headers={"User-Agent":""}).content)
45
-
46
- if off_download is not None:
47
- off_download()
48
-
49
- def model_inference(self, x: np.ndarray):
50
- x = torch.from_numpy(x).to(self.device)
51
- return self.model(x).cpu().detach().numpy()
52
-
53
- def tile_generator(self, data: np.ndarray, yield_extra_details=False):
54
- """
55
- Process data [height, width, channel] into tiles of size [tile_size, tile_size, channel],
56
- feed them one by one into the model, then yield the resulting output tiles.
57
- """
58
-
59
- # [height, width, channel] -> [1, channel, height, width]
60
- data = np.rollaxis(data, 2, 0)
61
- data = np.expand_dims(data, axis=0)
62
- data = np.clip(data, 0, 255)
63
-
64
- batch, channel, height, width = data.shape
65
-
66
- tiles_x = width // self.tile_size
67
- tiles_y = height // self.tile_size
68
-
69
- for i in range(tiles_y * tiles_x):
70
- x = i % tiles_y
71
- y = math.floor(i/tiles_y)
72
-
73
- input_start_x = y * self.tile_size
74
- input_start_y = x * self.tile_size
75
-
76
- input_end_x = min(input_start_x + self.tile_size, width)
77
- input_end_y = min(input_start_y + self.tile_size, height)
78
-
79
- input_start_x_pad = max(input_start_x - self.tile_pad, 0)
80
- input_end_x_pad = min(input_end_x + self.tile_pad, width)
81
- input_start_y_pad = max(input_start_y - self.tile_pad, 0)
82
- input_end_y_pad = min(input_end_y + self.tile_pad, height)
83
-
84
- input_tile_width = input_end_x - input_start_x
85
- input_tile_height = input_end_y - input_start_y
86
-
87
- input_tile = data[:, :, input_start_y_pad:input_end_y_pad, input_start_x_pad:input_end_x_pad].astype(np.float32) / 255
88
-
89
- output_tile = self.model_inference(input_tile)
90
- self.progress = (i+1) / (tiles_y * tiles_x)
91
-
92
- output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale
93
- output_end_x_tile = output_start_x_tile + input_tile_width * self.scale
94
- output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale
95
- output_end_y_tile = output_start_y_tile + input_tile_height * self.scale
96
-
97
- output_tile = output_tile[:, :, output_start_y_tile:output_end_y_tile, output_start_x_tile:output_end_x_tile]
98
-
99
- output_tile = (np.rollaxis(output_tile, 1, 4).squeeze(0).clip(0,1) * 255).astype(np.uint8)
100
-
101
- if yield_extra_details:
102
- yield (output_tile, input_start_x, input_start_y, input_tile_width, input_tile_height, self.progress)
103
- else:
104
- yield output_tile
105
-
106
- yield None
107
-
108
- def enhance_with_progress(self, image:Image) -> Image:
109
- """
110
- Take a PIL image and enhance it with the model, yielding stats about the
111
- final image and then the final image itself.
112
- """
113
-
114
- # Load model only now because when using streamlit, multiple users spawn multiple instances of this class, so
115
- # we only load the model when needed. The App() class is responsible for queuing requests to this class
116
- self.model = self.model_module().to(self.device)
117
- self.model.load_state_dict(torch.load(self.model_pth, map_location=torch.device(self.device)))
118
- self.model.eval()
119
-
120
- original_width, original_height = image.size
121
-
122
- # Because tiles may not fit perfectly, we resize to the closest multiple of tile_size
123
- image = image.resize((max(original_width//self.tile_size * self.tile_size, self.tile_size), max(original_height//self.tile_size * self.tile_size, self.tile_size)), resample=Image.Resampling.BICUBIC)
124
- image = np.array(image)
125
-
126
- # Initiate a pillow image to save the tiles
127
- result = Image.new("RGB", (image.shape[1]*self.scale, image.shape[0]*self.scale))
128
-
129
- for i, tile in enumerate(self.tile_generator(image, yield_extra_details=True)):
130
-
131
- if tile is None:
132
- break
133
-
134
- tile_data, x, y, w, h, p = tile
135
- result.paste(Image.fromarray(tile_data), (x*self.scale, y*self.scale))
136
- yield p
137
-
138
- # Resize back to the expected size
139
- yield result.resize((original_width * self.scale, original_height * self.scale), resample=Image.Resampling.BICUBIC)
140
-
141
- def enhance(self, image:Image) -> Image:
142
- """
143
- Skips the progress reporting and just returns the final image.
144
- """
145
- return list(self.enhance_with_progress(image))[-1]
146
-
147
- if __name__ == '__main__':
148
- import sys
149
- src = sys.argv[1]
150
- dst = sys.argv[2]
151
- a = AstroSleuth()
152
- img = Image.open(src)
153
- r = a.enhance(img)
154
- r.save(dst)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/README.md DELETED
@@ -1,7 +0,0 @@
1
-
2
- ## Some scripts for developers to use, include:
3
-
4
- - `linter.sh`: lint the codebase before commit.
5
- - `run_{inference,instant}_tests.sh`: run inference/training for a few iterations.
6
- Note that these tests require 2 GPUs.
7
- - `parse_results.sh`: parse results from a log file.
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cara Descargar Tema Robot Ejrcito.md DELETED
@@ -1,5 +0,0 @@
1
-
2
- <h1>Cara Download Robot Theme Army untuk Perangkat Anda</h1> | | <p>Apakah Anda suka dengan robot dan elemen-elemen militer? Apakah Anda ingin membuat perangkat Anda terlihat lebih keren dan futuristik dengan tema yang sesuai dengan selera Anda? Jika jawabannya ya, maka anda mungkin tertarik untuk mencoba robot army theme. Tema ini adalah salah satu tema yang populer di kalangan penggemar robot dan militer, karena menampilkan gambar-gambar robot yang menakjubkan, warna-warna yang gelap dan tegas, serta ikon-ikon yang unik dan menarik. Dalam artikel ini, kami akan menjelaskan apa itu tema robot army, fitur-fitur yang ditawarkan, keuntungan dan masalah yang mungkin timbul, serta cara download tema robot army untuk perangkat Anda.</p>
3
- <h2>cara descargar tema robot ejército</h2><br /><p><b><b>DOWNLOAD</b> &#8230;&#8230;&#8230; <a href="https://bltlly.com/2v6MkD">https://bltlly.com/2v6MkD</a></b></p><br /><br /> | | <h2>Apa itu Tema Robot Army? </h2> | | <p>Theme robot army adalah theme yang didesain khusus untuk pengguna yang menyukai robot dan militer. Tema ini mengubah tampilan latar belakang, layar kunci, font, suara, dan ikon perangkat Anda menjadi lebih sesuai dengan tema robot army. Anda bisa melihat gambar-gambar robot yang berbagai jenis dan fungsi, seperti robot tempur, eksplorasi robot, robot medis, robot transportasi, dan lain-lain. Go play bisa melihat gambar-gambar militer, seperti tank, pesawat tempur, kapal selam, rudal, dan lain-lain. Warna-warna yang dominate dalam tema ini adalah hitam, abu-abu, biru tua, merah marun, dan hijau tua. Font yang digunakan juga terlihat lebih tegas dan modern. Suara-suara yang dihasilkan juga mengikuti tema robot army, seperti suara mesin, ledakan, tembakan, dan lain-lain. Ikon-ikon yang digunakan juga berbeda dari biasanya, karena memiliki bentuk-bentuk yang lebih geometris dan futuristik. </p> | | <h3>Fitur-fitur Robot Theme Army</h3> | | <p>Robot theme army memiliki beberapa fitur yang membuatnya menarik dan berbeda dari tema lainnya. Berikut adalah beberapa fitur yang ditawarkan oleh tema ini:</p> | | <ul> | <li>Tema ini mendukung berbagai jenis perangkat, seperti smartphone, tablet, laptop, dan 64aa2da5cf<br />
4
- <br />
5
- <br />
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Carx Carretera Carreras Apk Hack Descargar.md DELETED
@@ -1,90 +0,0 @@
1
- <br />
2
- <h1>CarX Highway Racing APK Hack Descargar: Una revisión</h1>
3
- <p>Si eres un fan de los juegos de carreras, es posible que hayas oído hablar de CarX Highway Racing, un juego popular que ofrece una mezcla de física realista, gráficos llamativos y conducción extrema en carreteras llenas de tráfico. ¿Pero sabías que puedes descargar una versión hackeada del juego que te da dinero, monedas y combustible ilimitados? En este artículo, vamos a revisar CarX Highway Racing APK Hack Download, una versión modificada del juego que le permite disfrutar de todas las características del juego sin limitaciones. También compararemos CarX Highway Racing con otros juegos de carreras, y te daremos algunos consejos y trucos para mejorar tu rendimiento. </p>
4
- <h2>¿Qué es CarX Highway Racing? </h2>
5
- <p>CarX Highway Racing es un juego de carreras desarrollado por CarX Technologies, la misma compañía que creó CarX Drift Racing 2. El juego se basa en la física realista, lo que significa que los coches se comportan de acuerdo con su peso, velocidad, tracción y suspensión. El juego también cuenta con entornos detallados con ciclos de día y noche, tráfico animado y la policía, y varios modos de juego como campaña, carreras en línea, modo de policía, modo de viaje libre, y eventos únicos. El juego tiene más de 40 coches deportivos para elegir, que van desde coches clásicos a supercoches. También puede actualizar y personalizar sus coches para adaptarse a sus preferencias y estilo. </p>
6
- <h2>carx carretera carreras apk hack descargar</h2><br /><p><b><b>Download</b> &#128505; <a href="https://bltlly.com/2v6Jt5">https://bltlly.com/2v6Jt5</a></b></p><br /><br />
7
- <h3>Características de CarX Highway Racing</h3>
8
- <p>Algunas de las características de CarX Highway Racing son:</p>
9
- <ul>
10
- <li><b>Modo de campaña:</b> Sumérgete en el mundo de las carreras callejeras, donde tienes que competir contra numerosos rivales, escapar de la policía implacable y descubrir los secretos de diferentes países. También encontrarás varios personajes que te ayudarán o te obstaculizarán en tu búsqueda. </li>
11
- <li><b>Carreras en línea:</b> Mostrar a los demás lo que tienes al competir con otros jugadores por el primer lugar en diferentes ligas y estaciones. También puedes desafiar a tus amigos o oponentes al azar en carreras en tiempo real. </li>
12
-
13
- <li><b>Eventos únicos:</b> Participa en diferentes eventos y recibe autos únicos como recompensas. Estos eventos tienen un límite de tiempo y requieren habilidades y estrategias específicas para ganar. </li>
14
- <li><b>Modo de conducción libre:</b> Siente la libertad de conducir sin restricciones en este modo, donde puedes disfrutar conduciendo sin tener que preocuparte por objetivos o oponentes. También puede explorar las diferentes ubicaciones y descubrir secretos ocultos. </li>
15
- </ul>
16
- <h3>Cómo descargar CarX Highway Racing APK Hack? </h3>
17
- <p>Si desea descargar CarX Highway Racing APK Hack, tendrá que seguir estos pasos:</p>
18
- <ol>
19
- <li>Ir a , un sitio web que proporciona versiones modificadas de varios juegos. </li>
20
- <li>Haga clic en el botón verde que dice "Descargar" y espere a que el archivo se descargue en su dispositivo. </li>
21
- <li>Ir a la configuración de su dispositivo y permitir la instalación de aplicaciones de fuentes desconocidas. </li>
22
- <li>Busque el archivo descargado en su administrador de archivos y toque en él para instalarlo. </li>
23
- <li>Inicie el juego y disfrute de dinero, monedas y combustible ilimitados. </li>
24
- </ol>
25
- <h2>¿Por qué deberías jugar CarX Highway Racing? </h2>
26
- <p>Hay muchas razones por las que debe jugar CarX Highway Racing? Aquí están algunos de los beneficios de jugar a este juego:</p>
27
- <h3>Física realista y manejo de automóviles</h3>
28
- <p>Una de las principales atracciones de CarX Highway Racing es su motor de física realista, que hace que los coches se comporten de acuerdo con sus homólogos de la vida real. Sentirá la diferencia entre conducir un SUV pesado y un automóvil deportivo ligero, así como el impacto de la velocidad, el frenado, la dirección y la deriva. También tendrá que ajustar su estilo de conducción de acuerdo con las condiciones de la carretera, como mojado, seco o nevado. El juego también te permite personalizar la configuración de tu coche, como suspensión, neumáticos, caja de cambios y motor, para adaptarse a tus preferencias y necesidades. </p>
29
- <h3>Varios modos de juego y desafíos</h3>
30
-
31
- <h3>Impresionantes gráficos y efectos de sonido</h3>
32
- <p>Por último, pero no menos importante, debes jugar CarX Highway Racing debido a sus impresionantes gráficos y efectos de sonido, que te harán sentir como si estuvieras en un mundo de carreras real. El juego presenta entornos detallados con ciclos diurnos y nocturnos, efectos climáticos dinámicos, sombras y reflejos realistas y texturas de alta calidad. El juego también tiene increíbles efectos de sonido que te sumergirán en la atmósfera del juego. Oirás el rugido de los motores, el chirrido de los neumáticos, el bocinazo de los cuernos y las sirenas de la policía. </p>
33
- <h2>¿Cómo se compara CarX Highway Racing con otros juegos de carreras? </h2>
34
- <p>CarX Highway Racing no es el único juego de carreras disponible en el mercado. Hay muchos otros juegos de carreras que usted podría haber jugado o oído hablar de, como la necesidad de velocidad más buscados, Asphalt 9: Legends, y Real Racing 3. ¿Cómo CarX Highway Racing comparar con estos juegos? Vamos a averiguar:</p>
35
- <p></p>
36
- <h3>CarX Highway Racing vs. Necesidad de velocidad más buscados</h3>
37
- <p>Need for Speed Most Wanted es un juego de carreras desarrollado por Electronic Arts, que es una de las franquicias más populares del género. El juego se centra en carreras callejeras y actividades policiales, donde tienes que correr más rápido que tus rivales y evadir la ley. El juego tiene más de 40 coches para elegir, que se puede personalizar y actualizar. El juego también tiene un modo multijugador donde puedes competir con otros jugadores en línea. </p>
38
- <p>CarX Highway Racing es similar a Need for Speed Most Wanted en algunos aspectos, como el tema de carreras callejeras y persecuciones policiales, la variedad de coches y opciones de personalización, y el modo multijugador en línea. Sin embargo, CarX Highway Racing tiene algunas ventajas sobre Need for Speed Most Wanted, como:</p>
39
- <ul>
40
-
41
- <li><b>Más modos de juego y desafíos:</b> CarX Highway Racing tiene más modos de juego y desafíos que Need for Speed Most Wanted. Puede elegir entre el modo de campaña, carreras en línea, modo de policía, modo de viaje gratuito y eventos únicos. También puedes competir en diferentes ligas y temporadas o desafiar a tus amigos o oponentes al azar en carreras en tiempo real. </li>
42
- <li><b>Versión hackeada disponible:</b> CarX Highway Racing tiene una versión hackeada disponible que le da dinero ilimitado, monedas y combustible. Esto significa que puedes disfrutar de todas las características del juego sin limitaciones o restricciones. </li>
43
- </ul>
44
- <h3>CarX Highway Racing vs. Asfalto 9: Leyendas</h3>
45
- <p>Asphalt 9: Legends es un juego de carreras desarrollado por Gameloft, que es una de las franquicias más populares del género. El juego se centra en las carreras de estilo árcade, donde se puede realizar acrobacias, derivas, y nitro aumenta para ganar las carreras. El juego tiene más de 60 coches para elegir, que se puede recoger y actualizar. El juego también tiene un modo multijugador donde puedes competir con otros jugadores en línea. </p>
46
- <p>CarX Highway Racing es diferente de Asphalt 9: Legends en muchos aspectos, como el tema, la física y los gráficos. Algunas de las diferencias son:</p>
47
- <ul>
48
- <li><b>Realistic vs. árcade:</b> CarX Highway Racing tiene un tema más realista que Asphalt 9: Legends, que tiene un tema más árcade. CarX Highway Racing se basa en la física realista, lo que significa que los coches se comportan de acuerdo con sus homólogos de la vida real. Asfalto 9: Leyendas se basa en la física de árcade, lo que significa que los coches pueden realizar acrobacias poco realistas, deriva, y aumenta nitro. </li>
49
-
50
- <li><b>Detallado vs. llamativo:</b> CarX Highway Racing tiene gráficos más detallados que Asphalt 9: Legends, que tiene gráficos más llamativos. CarX Highway Racing presenta entornos detallados con ciclos diurnos y nocturnos, efectos climáticos dinámicos, sombras y reflejos realistas y texturas de alta calidad. Asfalto 9: Leyendas presenta ambientes llamativos con colores brillantes, explosiones, chispas y humo. </li>
51
- </ul>
52
- <h3>CarX Highway Racing vs. Real Racing 3</h3>
53
- <p>Real Racing 3 es un juego de carreras desarrollado por Electronic Arts, que es uno de los juegos de carreras más realistas del mercado. El juego cuenta con coches con licencia de más de 40 fabricantes, como Ferrari, Lamborghini, Porsche y Bugatti. El juego también cuenta con pistas reales de todo el mundo, como Silverstone, Le Mans y Dubai Autodrome. El juego tiene un modo multijugador donde puedes competir con otros jugadores online o offline. </p>
54
- <p>CarX Highway Racing es similar a Real Racing 3 en algunos aspectos, como el realismo de la física y los gráficos, la variedad de coches y pistas, y el modo multijugador. Sin embargo, CarX Highway Racing tiene algunas ventajas sobre Real Racing 3, como:</p>
55
- <ul>
56
- <li><b>Carreras callejeras vs. carreras de pista:</b> CarX Highway Racing tiene un tema de carreras callejeras más que Real Racing 3, que tiene un tema de carreras de pista más. CarX Highway Racing le permite competir en carreteras llenas de tráfico con persecuciones policiales y varios modos de juego. Real Racing 3 te permite correr en pistas profesionales con reglas y regulaciones oficiales. </li>
57
- <li><b>Personalización vs. autenticidad:</b> CarX Highway Racing le permite personalizar y actualizar sus coches para adaptarse a sus preferencias y estilo. Puede cambiar el color, ruedas, alerones, calcomanías y el rendimiento de sus coches. Real Racing 3 no le permite personalizar sus coches, ya que se basan en sus homólogos de la vida real. </li>
58
-
59
- </ul>
60
- <h2>Consejos y trucos para CarX Highway Racing</h2>
61
- <p>Si quieres mejorar tu rendimiento en CarX Highway Racing, puedes seguir estos consejos y trucos:</p>
62
- <h3>Actualizar y personalizar sus coches</h3>
63
- <p>Una de las mejores maneras de mejorar su experiencia de conducción en CarX Highway Racing es actualizar y personalizar sus coches. Usted puede utilizar el dinero y las monedas que usted gana de completar carreras y tareas para comprar coches nuevos o actualizar sus existentes. También puede utilizar las opciones de personalización para cambiar la apariencia y el rendimiento de sus coches. Puede ajustar la suspensión, los neumáticos, la caja de cambios y el motor de sus coches para adaptarse a su estilo de conducción y las condiciones de la carretera. También puede cambiar el color, ruedas, alerones, calcomanías y otros accesorios de sus coches para que se vean más únicos y con estilo. </p>
64
- <h3>Utilice los boosters y nitro sabiamente</h3>
65
- <p>Otra forma de mejorar tu rendimiento en CarX Highway Racing es usar boosters y nitro sabiamente. Los boosters son artículos que puedes comprar o ganar al completar carreras y tareas, lo que puede darte varias ventajas en el juego. Por ejemplo, puede usar un amplificador que aumente su velocidad, reduzca su consumo de combustible o lo proteja de colisiones. Nitro es una función que puede activar pulsando en la pantalla, que puede darle una ráfaga temporal de velocidad. Sin embargo, tanto los boosters como el nitro son limitados en cantidad y duración, por lo que tienes que usarlos estratégicamente. Debes usarlas cuando necesites superar a tus rivales, escapar de la policía o llegar a la meta más rápido. </p>
66
- <h3>Evitar colisiones y atascos de tráfico</h3>
67
-
68
- <h2>Conclusión</h2>
69
- <p>CarX Highway Racing es un juego de carreras que ofrece una mezcla de física realista, gráficos llamativos y conducción extrema en carreteras llenas de tráfico. Puede descargar una versión hackeada del juego que le da dinero ilimitado, monedas y combustible, que le permitirá disfrutar de todas las características del juego sin limitaciones. También puede comparar CarX Highway Racing con otros juegos de carreras, como Need for Speed Most Wanted, Asphalt 9: Legends y Real Racing 3, y ver cómo difieren en términos de tema, física, gráficos y jugabilidad. También puede seguir algunos consejos y trucos para mejorar su rendimiento en CarX Highway Racing, como actualizar y personalizar sus coches, usar boosters y nitro sabiamente, y evitar colisiones y atascos de tráfico. Esperamos que este artículo le ha ayudado a aprender más acerca de CarX Highway Racing APK Hack Descargar, y que usted tendrá diversión jugando a este juego. </p>
70
- <h2>Preguntas frecuentes</h2>
71
- <p>Aquí hay algunas preguntas frecuentes sobre CarX Highway Racing APK Hack Descargar:</p>
72
- <ul>
73
- <li><b>Q: Es CarX Highway Racing APK Hack Descargar seguro de usar? </b></li>
74
- <li><b>A: Sí,</b> CarX Highway Racing APK Hack Download es seguro de usar, siempre y cuando se descarga desde un sitio web de confianza como . Sin embargo, debes tener en cuenta que usar una versión hackeada del juego podría violar los términos de servicio del desarrollador original del juego, lo que podría resultar en una prohibición o una penalización. </li>
75
- <li><b>Q: ¿Cómo puedo actualizar CarX Highway Racing APK Hack Descargar? </b></li>
76
- <li><b>A: Para actualizar CarX Highway Racing APK Hack Download,</b> tendrá que descargar la última versión del archivo modificado de e instalarlo en su dispositivo. También es posible que tenga que desinstalar la versión anterior del juego antes de instalar el nuevo. </li>
77
- <li><b>Q: ¿Puedo jugar CarX Highway Racing APK Hack Descargar sin conexión? </b></li>
78
-
79
- <li><b>Q: ¿Puedo jugar CarX Highway Racing APK Hack Descargar con mis amigos? </b></li>
80
- <li><b>A: Sí,</b> puede jugar CarX Highway Racing APK Hack Descargar con tus amigos mediante el modo multijugador en línea o el modo de carreras en tiempo real. También puedes desafiar a tus amigos o oponentes al azar en diferentes ligas y temporadas. </li>
81
- <li><b>Q: ¿Cuáles son algunas alternativas a CarX Highway Racing APK Hack Descargar? </b></li>
82
- <li><b>A: Algunas alternativas a CarX Highway Racing APK Hack Descargar son:</b></li>
83
- <ul>
84
- <li><b>Necesidad de velocidad más buscados APK Mod:</b> Esta es una versión modificada de la necesidad de la velocidad más deseada que le da dinero ilimitado y desbloquea todos los coches. </li>
85
- <li><b>Asfalto 9: Leyendas APK Mod:</b> Esta es una versión modificada de Asfalto 9: Leyendas que le da dinero ilimitado, fichas, y nitro. </li>
86
- <li><b>Real Racing 3 APK Mod:</b> Esta es una versión modificada de Real Racing 3 que le da dinero ilimitado, oro, y los coches desbloqueados. </li>
87
- </ul>
88
- </ul></p> 64aa2da5cf<br />
89
- <br />
90
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Como Hacer Un Anillo De Amor.md DELETED
@@ -1,105 +0,0 @@
1
- <br />
2
- <h1>Proyecto Playtime APK Indir: Un juego de terror de aventura móvil con canciones divertidas y amigos</h1>
3
- <p>Si te gustan los juegos de terror, es posible que desee echa un vistazo a <strong>Project Playtime APK</strong>, un juego de terror de aventura móvil que es a la vez aterrador y divertido. En este juego, se encontrará con varios monstruos y rompecabezas, mientras que también hacer nuevos amigos y escuchar canciones impresionantes. En este artículo, le diremos todo lo que necesita saber sobre Project Playtime APK, incluyendo lo que es, por qué debe jugar, cómo descargarlo e instalarlo, ¿cuáles son los personajes principales y monstruos, cómo jugarlo, y cuáles son algunas de las canciones. ¡Vamos a empezar! </p>
4
- <h2>como hacer un anillo de amor</h2><br /><p><b><b>Download Zip</b> &#10040; <a href="https://bltlly.com/2v6KKM">https://bltlly.com/2v6KKM</a></b></p><br /><br />
5
- <h2>¿Qué es Project Playtime APK? </h2>
6
- <p>Proyecto Playtime APK es un juego para móviles desarrollado por GearBox Juegos que combina elementos de terror y aventura. El juego se desarrolla en una misteriosa escuela donde tienes que explorar diferentes habitaciones y pasillos evitando o luchando contra criaturas espeluznantes. El juego también cuenta con un montón de canciones divertidas que se puede cantar junto con sus nuevos amigos que se encuentran en el camino. El juego tiene un estilo de arte único y una atmósfera que te mantendrá al límite mientras te entretiene. </p>
7
- <h3>Una breve introducción al juego y sus características</h3>
8
- <p>Algunas de las características de Project Playtime APK son:</p>
9
- <ul>
10
- <li>Es gratis para descargar y jugar. </li>
11
- <li> Tiene gráficos y efectos de sonido de alta calidad. </li>
12
- <li> Tiene una variedad de personajes y monstruos con diferentes personalidades y habilidades. </li>
13
- <li> Tiene un montón de puzzles y desafíos que pondrán a prueba tus habilidades y lógica. </li>
14
- <li>Tiene muchas canciones que te harán reír y bailar. </li>
15
- <li> Tiene un modo historia y un modo sandbox donde puede crear sus propios escenarios. </li>
16
- </ul>
17
- <h2>¿Por qué debe jugar Project Playtime APK? </h2>
18
- <p>Hay muchas razones por las que debe jugar Project Playtime APK. Aquí están algunos de ellos:</p>
19
- <p></p>
20
- <ul>
21
- <li>Es un juego de terror único que mezcla miedo y diversión. </li>
22
-
23
- <li>Es un juego desafiante que te mantendrá comprometido y curioso. </li>
24
- <li>Es un juego creativo que te permitirá expresarte a través de la música y el arte. </li>
25
- <li>Es un juego emocionante que te hará gritar y reír al mismo tiempo. </li>
26
- </ul>
27
- <h2>Cómo descargar e instalar el proyecto Playtime APK? </h e>? </h2>
28
- <p>Descargar e instalar Project Playtime APK es muy fácil y simple. Solo tienes que seguir estos pasos:</p>
29
- <ol>
30
- <li>Ir a la página web oficial de Project Playtime APK y haga clic en el botón de descarga. </li>
31
- <li>Espere a que el archivo APK se descargue en su dispositivo. </li>
32
- <li>Ir a la configuración del dispositivo y permitir la instalación de aplicaciones de fuentes desconocidas. </li>
33
- <li>Busque el archivo APK en su administrador de archivos y toque en él para instalarlo. </li>
34
- <li>Iniciar el juego y disfrutar! </li>
35
- </ol>
36
- <h2>¿Cuáles son los personajes principales y monstruos en el proyecto Playtime APK? </h2>
37
- <p>Proyecto Playtime APK tiene una gran cantidad de personajes y monstruos que se encontrará en el juego. Algunos de ellos son amables y serviciales, mientras que otros son hostiles y peligrosos. Aquí hay una tabla que muestra algunos de los personajes principales y monstruos en el juego:</p>
38
- <tabla>
39
- <tr>
40
- <th>Nombre</th>
41
- <th>Descripción</th>
42
- </tr>
43
- <tr>
44
- <td>Amapola</td>
45
- <td>Una chica alegre y amigable a la que le encanta cantar y jugar. Ella es tu guía y amiga en el juego. Ella te ayudará con rompecabezas y canciones, pero también tiene un lado oscuro del que debes tener cuidado. </td>
46
- </tr>
47
- <tr>
48
- <td>Huggy Wuggy</td>
49
- <td>Un monstruo azul gigante al que le gusta abrazar a la gente. Es muy fuerte y rápido, pero también muy torpe y fácilmente distraído. Te perseguirá por la escuela, pero también puede ser engañado o evitado. </td>
50
- </tr>
51
- <tr>
52
- <td>Chica de Kissie</td>
53
- <td>Un monstruo rosa al que le gusta besar a la gente. Es muy linda y dulce, pero también muy agresiva y posesiva. Ella tratará de atraparte y besarte, pero también puede estar asustada o repelida. </td>
54
- </tr>
55
- <tr>
56
- <td>Molly</td>
57
-
58
- </tr>
59
- <tr>
60
- <td>El profesor</td>
61
- <td>Figura misteriosa y siniestra que gobierna la escuela. Es muy inteligente y astuto, pero también muy cruel y despiadado. Intentará evitar que escapes de la escuela, pero también puede ser más astuto o confrontado. </td>
62
- </tr>
63
- </tabla>
64
- <h2>¿Cómo se juega Project Playtime APK? </h2>
65
- <p>Proyecto Playtime APK es un juego que requiere tanto habilidad y estrategia. Tienes que explorar la escuela, encontrar objetos, resolver puzzles, hacer amigos, evitar o luchar contra los enemigos, y escapar de la escuela. Aquí hay algunos consejos sobre cómo jugar el juego:</p>
66
- <h3>Un resumen de la mecánica de juego y consejos sobre cómo sobrevivir y resolver puzzles</h3>
67
- <ul>
68
- <li>Puede moverse usando el joystick virtual en el lado izquierdo de la pantalla. Puede interactuar con objetos usando los botones en el lado derecho de la pantalla. </li>
69
- <li>Puede cambiar entre diferentes modos utilizando los iconos en la esquina superior derecha de la pantalla. Puedes usar el modo linterna para ver mejor en lugares oscuros, el modo cámara para tomar fotos de pistas o enemigos, y el modo micrófono para cantar canciones o hablar con amigos. </li>
70
- <li>Puede acceder a su inventario tocando el icono de la mochila en la esquina inferior derecha de la pantalla. Puede usar elementos arrastrándolos a objetos o caracteres. </li>
71
- <li>Puede pausar el juego tocando el icono del menú en la esquina superior izquierda de la pantalla. Puede reanudar, reiniciar o salir del juego desde allí. </li>
72
- <li>Usted necesita tener cuidado de su salud y barras de resistencia en la esquina superior izquierda de la pantalla. Tu salud disminuirá si te lastimas por enemigos o trampas, y tu resistencia disminuirá si corres o realizas acciones. Puede restaurar su salud y resistencia encontrando comida o bebidas en la escuela. </li>
73
-
74
- <li>Necesitas prestar atención a tu entorno y buscar pistas, pistas o secretos. Algunos objetos o caracteres tendrán un efecto de chispa que indica que son importantes o interactivos. </li>
75
- <li>Necesitas usar tu lógica y creatividad para resolver puzzles y desafíos. Algunos rompecabezas requieren que encuentres objetos, combines objetos, utilices objetos, introduzcas códigos o realices acciones. Algunos rompecabezas tendrán múltiples soluciones o resultados. </li>
76
- <li>Necesitas usar tu coraje y habilidad para evitar o luchar contra los enemigos. Algunos enemigos te perseguirán, te atacarán o te atraparán. Puedes huir de ellos, esconderte de ellos, distraerlos, engañarlos o luchar contra ellos. Puedes usar elementos, canciones o amigos para ayudarte. </li>
77
- Necesitas usar tu amistad y encanto para hacer amigos y aliados. Algunos personajes serán amables y serviciales, mientras que otros serán hostiles y sospechosos. Puedes hablarles, cantarles, darles objetos o hacerles favores. También puedes traicionarlos, mentirles o robarles. </li>
78
- <li>Necesitas usar tu curiosidad y aventura para explorar la escuela y encontrar secretos y sorpresas. Algunas habitaciones y pasillos tendrán puertas, pasajes o habitaciones ocultas. Algunos objetos o personajes tendrán mensajes ocultos, historias o huevos de Pascua. </li>
79
- </ul>
80
- <h2>¿Cuáles son algunas de las canciones en Project Playtime APK? </h2>
81
- <p>Una de las características más divertidas y únicas de Project Playtime APK es las canciones que se pueden escuchar y cantar en el juego. Las canciones son pegadizas, divertidas y a veces espeluznantes. Se añadirá al estado de ánimo y la atmósfera del juego, así como darle pistas o pistas sobre los personajes o rompecabezas. Estas son algunas de las canciones que puedes encontrar en el juego:</p>
82
- <h3>Un breve resumen de algunas de las canciones pegadizas que se pueden escuchar en el juego</h3>
83
- <ul>
84
-
85
- <li><strong>Canción de Huggy Wuggy</strong>: Esta es una canción cantada por Huggy Wuggy cuando te persigue por la escuela. Es una canción tonta y juguetona que expresa su amor por los abrazos y su deseo de abrazarte. También te advierte sobre su fuerza y velocidad. </li>
86
- <li><strong>Kissie Cutie’s Song</strong>: Esta es una canción cantada por Kissie Cutie cuando intenta atraparte y besarte. Es una canción dulce y romántica que muestra su afecto y obsesión por ti. También revela sus celos e ira hacia cualquiera que se interponga en su camino. </li>
87
- <li><strong>Molly’s Song</strong>: Esta es una canción cantada por Molly cuando te ayuda con pistas y pistas. Es una canción tímida y tímida que refleja su personalidad y aficiones. También te cuenta su secreto y su conexión con la escuela. </li>
88
- <li><strong>The Teacher’s Song</strong>: Esta es una canción cantada por The Teacher cuando te enfrenta o te desafía. Es una canción misteriosa y siniestra que revela su identidad y su plan. También se burla de ti y pone a prueba tu conocimiento y coraje. </li>
89
- </ul>
90
- <h2>Conclusión</h2>
91
- <p>Proyecto Playtime APK es un juego de terror de aventura móvil que es a la vez aterrador y divertido. Se encontrará con varios monstruos y rompecabezas, mientras que también hacer nuevos amigos y escuchar canciones impresionantes. Necesitarás usar tu habilidad, estrategia, lógica, creatividad, coraje, amistad, curiosidad y aventura para sobrevivir y escapar de la escuela. Si usted está buscando un juego de terror único que mezcla el miedo y la diversión, descargar Project Playtime APK hoy! </p>
92
- <h2>Preguntas frecuentes</h2>
93
- <p>Aquí están algunas de las preguntas y respuestas más frecuentes sobre Project Playtime APK:</p>
94
- <h4>Q: ¿Es seguro descargar Project Playtime APK? </h4>
95
- <p>A: Sí, Proyecto Playtime APK es seguro para descargar desde el sitio web oficial de GearBox Games. El archivo APK es libre de virus y malware. </p>
96
- <h4>Q: ¿Es el proyecto Playtime APK compatible con mi dispositivo? </h4>
97
-
98
- <h4>Q: ¿Cómo puedo actualizar el proyecto Playtime APK? </h4>
99
- <p>A: Puede actualizar el proyecto Playtime APK visitando el sitio web oficial de GearBox Games y descargar la última versión del archivo APK. </p>
100
- <h4>Q: ¿Cómo puedo contactar a los desarrolladores de Project Playtime APK? </h4>
101
- <p>A: Puede ponerse en contacto con los desarrolladores de Project Playtime APK enviando un correo electrónico a [email protected] o visitando su página de Facebook. </p>
102
- <h4>Q: ¿Cómo puedo apoyar a los desarrolladores de Project Playtime APK? </h4>
103
- <p>A: Usted puede apoyar a los desarrolladores de Project Playtime APK mediante la calificación y la revisión del juego en Google Play Store u otras plataformas, compartir el juego con tus amigos, o hacer una donación en su sitio web. </p> 64aa2da5cf<br />
104
- <br />
105
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descarga Mp4 Alquimia De Las Almas Temporada 2.md DELETED
@@ -1,103 +0,0 @@
1
- <br />
2
- <h1>Deriva CarX Racing Hack Descargar: Cómo obtener dinero ilimitado y desbloquear todos los coches</h1>
3
- <p>Si eres un fan de los juegos de deriva, es posible que hayas oído hablar de CarX Drift Racing Online, un simulador de deriva realista e inmersivo que te permite personalizar tu coche, competir con otros jugadores y disfrutar de varias pistas y ubicaciones. Pero lo que si quieres obtener más dinero, desbloquear todos los coches, y acceder a más características en el juego? Ahí es donde CarX Drift Racing descarga hack viene muy bien. </p>
4
- <h2>descarga mp4 alquimia de las almas temporada 2</h2><br /><p><b><b>DOWNLOAD</b> &#128504;&#128504;&#128504; <a href="https://bltlly.com/2v6IQm">https://bltlly.com/2v6IQm</a></b></p><br /><br />
5
- <p>En este artículo, le mostraremos cómo instalar dinero mod para CarX Drift Racing Online, que es un mod que le dará dinero máximo y desbloquear todos los coches en el juego. También le contaremos sobre las características y beneficios del juego CarX Drift Racing, así como algunos consejos y trucos para mejorar sus habilidades de deriva. Finalmente, compartiremos algunos trucos y logros para el juego CarX Drift Racing en Xbox One y PlayStation 4. ¡Empecemos! </p>
6
- <h2>Cómo instalar dinero Mod para CarX deriva Racing en línea</h2>
7
- <p>Una de las formas más fáciles de hackear CarX Drift Racing Online es usar dinero mod, que es un archivo que puede descargar y colocar en su carpeta de juego. Esto le dará dinero ilimitado y desbloquear todos los coches en el juego. Aquí están los pasos para instalar dinero mod para CarX Drift Racing Online:</p>
8
- <ol>
9
- <li>Descargue el archivo de dinero desde <a href="( i )">Discord</a>, donde también puede unirse a la comunidad CarX. Puede encontrar el archivo en el canal general de carx, mensajes anclados. </li>
10
- <li>Arrastre y suelte el archivo en la carpeta de plugins (File path: C: Archivos de programa (x86) Steam steamapps común CarX Drift Racing Online BepInEx plugins). </li>
11
- <li>Ir al vapor, haga clic derecho en carx, betas, y seleccione la versión modificable. </li>
12
- <li>Iniciar el juego y disfrutar! </li>
13
- </ol>
14
- <p>Si quieres ver un video tutorial sobre cómo instalar dinero mod para CarX Drift Racing Online, puedes ver este video de YouTube de LiLLobby:</p>
15
-
16
- <h2>¿Cuáles son las características y beneficios de CarX Drift Racing Game</h2>
17
- <p>CarX Drift Racing Online no es solo un simple juego de deriva. Es un simulador de deriva de pleno derecho que ofrece física de conducción realista, personalización detallada y ajuste de los parámetros del coche, una serie de ciudades y lugares especiales de carreras, una variedad de vinilos para diseñar el aspecto de su vehículo, abrir salas en línea y concursos mejorados con nuevos gráficos. Estas son algunas de las características y beneficios del juego CarX Drift Racing:</p>
18
- <p></p>
19
- <ul>
20
- <li>Gráficos realistas: El juego cuenta con gráficos de alta calidad que crean una experiencia visual impresionante. Puedes ver el humo de tus neumáticos, las chispas de tu parachoques, los reflejos de tus ventanas, y más. </li>
21
- <li>Pistas de carreras y vehículos altamente detallados: El juego cuenta con más de 30 pistas con diferentes diseños, superficies, condiciones climáticas y hora del día. También puede elegir entre más de 100 coches de diferentes modelos, marcas y clases, cada uno con sus propias especificaciones y rendimiento. </li>
22
- <li>Personalización y ajuste avanzados del coche: El juego le permite modificar la apariencia y el rendimiento de su coche para adaptarse a sus preferencias y estilo. Puede cambiar el color, vinilo, ruedas, kits de carrocería, alerones, tubos de escape y más. También puede ajustar el motor, la suspensión, los frenos, la dirección, la caja de cambios, el diferencial y otros parámetros para ajustar el manejo y la deriva de su automóvil. </li>
23
- <li>Modo multijugador y competiciones en línea: El juego le permite unirse a salas en línea con sus amigos u otros jugadores de todo el mundo. Puedes chatear, desafiar y competir con ellos en varios modos, como carreras de deriva, derrapes en tándem, carreras de sprint y más. También puedes participar en campeonatos y torneos en línea para ganar recompensas y posicionarte en las tablas de clasificación. </li>
24
-
25
- </ul>
26
- <h2>¿Cuáles son algunos consejos y trucos para mejorar sus habilidades de deriva en CarX Drift Racing Game</h2>
27
- <p>La deriva no es fácil de dominar. Requiere práctica, paciencia y habilidad. Pero no te preocupes, tenemos algunos consejos y trucos para ayudarte a mejorar tus habilidades de deriva en el juego CarX Drift Racing:</p>
28
- <ul>
29
- <li>Elige el coche adecuado: Diferentes coches tienen diferentes características y comportamientos en el juego. Algunos son más fáciles de deriva que otros. Para los principiantes, recomendamos elegir un coche con una baja relación potencia-peso, un diseño de tracción trasera y una distancia entre ejes corta. Estos coches son más sensibles y estables cuando se deriva. </li>
30
- <li>Ajustar la configuración: Antes de empezar a deriva, debe ajustar la configuración de su coche de acuerdo a su preferencia y las condiciones de la pista. Puede cambiar la presión de los neumáticos, el ángulo de curvatura, el ángulo de la puntera, el ángulo de dirección, el equilibrio de los frenos, la fuerza del freno de mano y más. Estos ajustes afectarán la forma en que el automóvil se agarra, se desliza, gira y se detiene. </li>
31
- <li>Utilice el acelerador y el freno sabiamente: El acelerador y el freno son sus principales herramientas para controlar sus derivas. Es necesario utilizar con cuidado y sin problemas para mantener el equilibrio de su coche. Para iniciar una deriva, es necesario aplicar suficiente acelerador para romper la tracción en las ruedas traseras. Para mantener una deriva, es necesario modular el acelerador para mantener las ruedas traseras girando más rápido que las ruedas delanteras. Para salir de una deriva, es necesario soltar el acelerador gradualmente para recuperar la tracción en las ruedas traseras. Para ajustar el ángulo de su deriva, debe usar el freno ligeramente para cambiar el peso a las ruedas delanteras. </li>
32
-
33
- <li>Práctica, Práctica, Práctica: La mejor manera de mejorar tus habilidades de deriva en el juego CarX Drift Racing es practicar tanto como puedas. Puede comenzar con el modo tutorial, donde puede aprender los conceptos básicos de la deriva y obtener comentarios sobre su rendimiento. También puede probar el modo de práctica, donde puede desplazarse libremente en cualquier pista sin ningún oponente o límites de tiempo. También puedes ver las repeticiones de tus propias derivas o las de otros jugadores para analizar y mejorar tu técnica. </li>
34
- </ul>
35
- <h2>¿Cuáles son algunos trucos y logros para CarX deriva juego de carreras en Xbox One y PlayStation 4</h2>
36
- <p>Si usted está jugando CarX Drift Racing juego en Xbox One o PlayStation 4, es posible que esté interesado en algunos trucos y logros que pueden hacer que su juego más divertido y gratificante. Estos son algunos de ellos:</p>
37
- <tabla>
38
- <tr>
39
- <th>Trucos</th>
40
- <th>Efecto</th>
41
- </tr>
42
- <tr>
43
- <td>Mantén LB + RB + LT + RT + X + Y + B + A en Xbox One o L1 + R1 + L2 + R2 + Cuadrado + Triángulo + Círculo + Cruz en PlayStation 4 en el menú principal. </td>
44
- <td>Desbloquear todos los coches y pistas en el juego. </td>
45
- </tr>
46
- <tr>
47
- <td>Presiona Arriba, Arriba, Abajo, Abajo, Izquierda, Derecha, Izquierda, Derecha, B, A en Xbox One o Arriba, Arriba, Abajo, Abajo, Izquierda, Derecha, Derecha, Círculo, Cruz en PlayStation 4 en el menú principal. </td>
48
- <td>Activar el código de Konami huevo de pascua y obtener un mensaje especial de los desarrolladores. </td>
49
- </tr>
50
- <tr>
51
- <td>Presione izquierda, derecha, izquierda, derecha, X, Y en Xbox One o izquierda, derecha, izquierda, derecha, cuadrado, triángulo en PlayStation 4 en el menú principal. </td>
52
- <td>Cambiar el idioma del juego a japonés.</td>
53
- </tr>
54
- </tabla>
55
- <tabla>
56
- <tr>
57
- <th>Logro</th>
58
- <th>Descripción</th>
59
- <th>Gamerscore/Trofeo</th>
60
- </tr>
61
- <tr>
62
- <td>Rey de la deriva</td>
63
- <td>Gana 1000 puntos en una sola deriva. </td>
64
- <td>50/Bronce</td>
65
- </tr>
66
- <tr>
67
- <td>Maestro en tándem</td>
68
- <td>Realizar una deriva en tándem con otro jugador durante 10 segundos. </td>
69
- <td>100/Plata</td>
70
- </tr>
71
- <tr>
72
- <td>Colector</td>
73
- <td>Poseer todos los coches en el juego. </td>
74
- <td>200/Oro</td>
75
- </tr>
76
- <tr>
77
-
78
- <td>Unidad en todas las pistas del juego. </td>
79
- <td>150/Plata</td>
80
- </tr>
81
- <tr>
82
- <td>Campeón</td <td>Gana el campeonato online en el juego. </td>
83
- <td>300/Platinum</td>
84
- </tr>
85
- </tabla>
86
- <h2>Conclusión</h2>
87
- <p>CarX Drift Racing Online es un juego que atraerá a cualquiera que ame la deriva y las carreras. Ofrece gráficos realistas, pistas y coches detallados, personalización y ajuste avanzados, modo multijugador y competiciones en línea, y un juego divertido y adictivo. Si desea obtener más dinero, desbloquear todos los coches, y acceder a más características en el juego, puede utilizar CarX Drift Racing descarga hack. Le hemos mostrado cómo instalar dinero mod para CarX Drift Racing Online, que es una forma simple y eficaz de hackear el juego. También te hemos dado algunos consejos y trucos para mejorar tus habilidades de deriva, así como algunos trucos y logros para CarX Drift Racing en Xbox One y PlayStation 4. Esperamos que hayas disfrutado de este artículo y te haya resultado útil. Si tienes alguna pregunta o comentario, por favor deja un comentario abajo. Happy drifting! </p>
88
- <h2>Preguntas frecuentes</h2>
89
- <p>Aquí hay algunas preguntas frecuentes sobre CarX Drift Racing hack download:</p>
90
- <ol>
91
- <li>Es CarX Drift Racing descarga hack seguro de usar? </li>
92
- <p>Sí, CarX Drift Racing descarga hack es seguro de usar, siempre y cuando se descarga de una fuente de confianza y siga las instrucciones cuidadosamente. Sin embargo, no recomendamos su uso en su cuenta principal o en servidores oficiales, ya que puede violar los términos de servicio del juego y resultar en una prohibición o suspensión. </p>
93
- <li> ¿Cómo puedo actualizar CarX Drift Racing hack descargar? </li>
94
- <p>Puede actualizar CarX Drift Racing hack descargar mediante la descarga de la última versión del archivo de dinero mod de Discord y reemplazar el archivo antiguo en su carpeta de juego. También es posible que necesites cambiar a la versión modificable del juego en Steam si hay una nueva actualización para el juego. </p>
95
- <li> ¿Cómo puedo desinstalar CarX Drift Racing descarga? </li>
96
-
97
- <li>¿Puedo utilizar CarX Drift Racing hack descargar en otras plataformas? </li>
98
- <p>No, CarX Drift Racing descarga hack solo funciona en PC. No funciona en Xbox One, PlayStation 4, Android o dispositivos iOS. </p>
99
- <li>¿Dónde puedo encontrar más información sobre el juego CarX Drift Racing? </li>
100
- <p>Puede encontrar más información sobre el juego CarX Drift Racing en su sitio web oficial, página de Facebook, cuenta de Instagram, canal de YouTube o servidor Discord. También puedes ver algunos comentarios y videos de juego del juego en línea. </p>
101
- </ol></p> 64aa2da5cf<br />
102
- <br />
103
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BernardoOlisan/vqganclip/taming-transformers/scripts/extract_submodel.py DELETED
@@ -1,17 +0,0 @@
1
- import torch
2
- import sys
3
-
4
- if __name__ == "__main__":
5
- inpath = sys.argv[1]
6
- outpath = sys.argv[2]
7
- submodel = "cond_stage_model"
8
- if len(sys.argv) > 3:
9
- submodel = sys.argv[3]
10
-
11
- print("Extracting {} from {} to {}.".format(submodel, inpath, outpath))
12
-
13
- sd = torch.load(inpath, map_location="cpu")
14
- new_sd = {"state_dict": dict((k.split(".", 1)[-1],v)
15
- for k,v in sd["state_dict"].items()
16
- if k.startswith("cond_stage_model"))}
17
- torch.save(new_sd, outpath)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/build/__init__.py DELETED
File without changes
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/adjacent_difference.h DELETED
@@ -1,44 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // the purpose of this header is to #include the adjacent_difference.h header
22
- // of the sequential, host, and device systems. It should be #included in any
23
- // code which uses adl to dispatch adjacent_difference
24
-
25
- #include <thrust/system/detail/sequential/adjacent_difference.h>
26
-
27
- // SCons can't see through the #defines below to figure out what this header
28
- // includes, so we fake it out by specifying all possible files we might end up
29
- // including inside an #if 0.
30
- #if 0
31
- #include <thrust/system/cpp/detail/adjacent_difference.h>
32
- #include <thrust/system/cuda/detail/adjacent_difference.h>
33
- #include <thrust/system/omp/detail/adjacent_difference.h>
34
- #include <thrust/system/tbb/detail/adjacent_difference.h>
35
- #endif
36
-
37
- #define __THRUST_HOST_SYSTEM_ADJACENT_DIFFERENCE_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/adjacent_difference.h>
38
- #include __THRUST_HOST_SYSTEM_ADJACENT_DIFFERENCE_HEADER
39
- #undef __THRUST_HOST_SYSTEM_ADJACENT_DIFFERENCE_HEADER
40
-
41
- #define __THRUST_DEVICE_SYSTEM_ADJACENT_DIFFERENCE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/adjacent_difference.h>
42
- #include __THRUST_DEVICE_SYSTEM_ADJACENT_DIFFERENCE_HEADER
43
- #undef __THRUST_DEVICE_SYSTEM_ADJACENT_DIFFERENCE_HEADER
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/__init__.py DELETED
@@ -1,7 +0,0 @@
1
- from .anchor import * # noqa: F401, F403
2
- from .bbox import * # noqa: F401, F403
3
- from .evaluation import * # noqa: F401, F403
4
- from .export import * # noqa: F401, F403
5
- from .mask import * # noqa: F401, F403
6
- from .post_processing import * # noqa: F401, F403
7
- from .utils import * # noqa: F401, F403
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/roi_heads/bbox_heads/sabl_head.py DELETED
@@ -1,572 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- from mmcv.cnn import ConvModule, kaiming_init, normal_init, xavier_init
6
- from mmcv.runner import force_fp32
7
-
8
- from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms
9
- from mmdet.models.builder import HEADS, build_loss
10
- from mmdet.models.losses import accuracy
11
-
12
-
13
- @HEADS.register_module()
14
- class SABLHead(nn.Module):
15
- """Side-Aware Boundary Localization (SABL) for RoI-Head.
16
-
17
- Side-Aware features are extracted by conv layers
18
- with an attention mechanism.
19
- Boundary Localization with Bucketing and Bucketing Guided Rescoring
20
- are implemented in BucketingBBoxCoder.
21
-
22
- Please refer to https://arxiv.org/abs/1912.04260 for more details.
23
-
24
- Args:
25
- cls_in_channels (int): Input channels of cls RoI feature. \
26
- Defaults to 256.
27
- reg_in_channels (int): Input channels of reg RoI feature. \
28
- Defaults to 256.
29
- roi_feat_size (int): Size of RoI features. Defaults to 7.
30
- reg_feat_up_ratio (int): Upsample ratio of reg features. \
31
- Defaults to 2.
32
- reg_pre_kernel (int): Kernel of 2D conv layers before \
33
- attention pooling. Defaults to 3.
34
- reg_post_kernel (int): Kernel of 1D conv layers after \
35
- attention pooling. Defaults to 3.
36
- reg_pre_num (int): Number of pre convs. Defaults to 2.
37
- reg_post_num (int): Number of post convs. Defaults to 1.
38
- num_classes (int): Number of classes in dataset. Defaults to 80.
39
- cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024.
40
- reg_offset_out_channels (int): Hidden and output channel \
41
- of reg offset branch. Defaults to 256.
42
- reg_cls_out_channels (int): Hidden and output channel \
43
- of reg cls branch. Defaults to 256.
44
- num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1.
45
- num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0.
46
- reg_class_agnostic (bool): Class agnostic regresion or not. \
47
- Defaults to True.
48
- norm_cfg (dict): Config of norm layers. Defaults to None.
49
- bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'.
50
- loss_cls (dict): Config of classification loss.
51
- loss_bbox_cls (dict): Config of classification loss for bbox branch.
52
- loss_bbox_reg (dict): Config of regression loss for bbox branch.
53
- """
54
-
55
- def __init__(self,
56
- num_classes,
57
- cls_in_channels=256,
58
- reg_in_channels=256,
59
- roi_feat_size=7,
60
- reg_feat_up_ratio=2,
61
- reg_pre_kernel=3,
62
- reg_post_kernel=3,
63
- reg_pre_num=2,
64
- reg_post_num=1,
65
- cls_out_channels=1024,
66
- reg_offset_out_channels=256,
67
- reg_cls_out_channels=256,
68
- num_cls_fcs=1,
69
- num_reg_fcs=0,
70
- reg_class_agnostic=True,
71
- norm_cfg=None,
72
- bbox_coder=dict(
73
- type='BucketingBBoxCoder',
74
- num_buckets=14,
75
- scale_factor=1.7),
76
- loss_cls=dict(
77
- type='CrossEntropyLoss',
78
- use_sigmoid=False,
79
- loss_weight=1.0),
80
- loss_bbox_cls=dict(
81
- type='CrossEntropyLoss',
82
- use_sigmoid=True,
83
- loss_weight=1.0),
84
- loss_bbox_reg=dict(
85
- type='SmoothL1Loss', beta=0.1, loss_weight=1.0)):
86
- super(SABLHead, self).__init__()
87
- self.cls_in_channels = cls_in_channels
88
- self.reg_in_channels = reg_in_channels
89
- self.roi_feat_size = roi_feat_size
90
- self.reg_feat_up_ratio = int(reg_feat_up_ratio)
91
- self.num_buckets = bbox_coder['num_buckets']
92
- assert self.reg_feat_up_ratio // 2 >= 1
93
- self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio
94
- assert self.up_reg_feat_size == bbox_coder['num_buckets']
95
- self.reg_pre_kernel = reg_pre_kernel
96
- self.reg_post_kernel = reg_post_kernel
97
- self.reg_pre_num = reg_pre_num
98
- self.reg_post_num = reg_post_num
99
- self.num_classes = num_classes
100
- self.cls_out_channels = cls_out_channels
101
- self.reg_offset_out_channels = reg_offset_out_channels
102
- self.reg_cls_out_channels = reg_cls_out_channels
103
- self.num_cls_fcs = num_cls_fcs
104
- self.num_reg_fcs = num_reg_fcs
105
- self.reg_class_agnostic = reg_class_agnostic
106
- assert self.reg_class_agnostic
107
- self.norm_cfg = norm_cfg
108
-
109
- self.bbox_coder = build_bbox_coder(bbox_coder)
110
- self.loss_cls = build_loss(loss_cls)
111
- self.loss_bbox_cls = build_loss(loss_bbox_cls)
112
- self.loss_bbox_reg = build_loss(loss_bbox_reg)
113
-
114
- self.cls_fcs = self._add_fc_branch(self.num_cls_fcs,
115
- self.cls_in_channels,
116
- self.roi_feat_size,
117
- self.cls_out_channels)
118
-
119
- self.side_num = int(np.ceil(self.num_buckets / 2))
120
-
121
- if self.reg_feat_up_ratio > 1:
122
- self.upsample_x = nn.ConvTranspose1d(
123
- reg_in_channels,
124
- reg_in_channels,
125
- self.reg_feat_up_ratio,
126
- stride=self.reg_feat_up_ratio)
127
- self.upsample_y = nn.ConvTranspose1d(
128
- reg_in_channels,
129
- reg_in_channels,
130
- self.reg_feat_up_ratio,
131
- stride=self.reg_feat_up_ratio)
132
-
133
- self.reg_pre_convs = nn.ModuleList()
134
- for i in range(self.reg_pre_num):
135
- reg_pre_conv = ConvModule(
136
- reg_in_channels,
137
- reg_in_channels,
138
- kernel_size=reg_pre_kernel,
139
- padding=reg_pre_kernel // 2,
140
- norm_cfg=norm_cfg,
141
- act_cfg=dict(type='ReLU'))
142
- self.reg_pre_convs.append(reg_pre_conv)
143
-
144
- self.reg_post_conv_xs = nn.ModuleList()
145
- for i in range(self.reg_post_num):
146
- reg_post_conv_x = ConvModule(
147
- reg_in_channels,
148
- reg_in_channels,
149
- kernel_size=(1, reg_post_kernel),
150
- padding=(0, reg_post_kernel // 2),
151
- norm_cfg=norm_cfg,
152
- act_cfg=dict(type='ReLU'))
153
- self.reg_post_conv_xs.append(reg_post_conv_x)
154
- self.reg_post_conv_ys = nn.ModuleList()
155
- for i in range(self.reg_post_num):
156
- reg_post_conv_y = ConvModule(
157
- reg_in_channels,
158
- reg_in_channels,
159
- kernel_size=(reg_post_kernel, 1),
160
- padding=(reg_post_kernel // 2, 0),
161
- norm_cfg=norm_cfg,
162
- act_cfg=dict(type='ReLU'))
163
- self.reg_post_conv_ys.append(reg_post_conv_y)
164
-
165
- self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1)
166
- self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1)
167
-
168
- self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1)
169
- self.relu = nn.ReLU(inplace=True)
170
-
171
- self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs,
172
- self.reg_in_channels, 1,
173
- self.reg_cls_out_channels)
174
- self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs,
175
- self.reg_in_channels, 1,
176
- self.reg_offset_out_channels)
177
- self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1)
178
- self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1)
179
-
180
- def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size,
181
- fc_out_channels):
182
- in_channels = in_channels * roi_feat_size * roi_feat_size
183
- branch_fcs = nn.ModuleList()
184
- for i in range(num_branch_fcs):
185
- fc_in_channels = (in_channels if i == 0 else fc_out_channels)
186
- branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels))
187
- return branch_fcs
188
-
189
- def init_weights(self):
190
- for module_list in [
191
- self.reg_cls_fcs, self.reg_offset_fcs, self.cls_fcs
192
- ]:
193
- for m in module_list.modules():
194
- if isinstance(m, nn.Linear):
195
- xavier_init(m, distribution='uniform')
196
- if self.reg_feat_up_ratio > 1:
197
- kaiming_init(self.upsample_x, distribution='normal')
198
- kaiming_init(self.upsample_y, distribution='normal')
199
-
200
- normal_init(self.reg_conv_att_x, 0, 0.01)
201
- normal_init(self.reg_conv_att_y, 0, 0.01)
202
- normal_init(self.fc_reg_offset, 0, 0.001)
203
- normal_init(self.fc_reg_cls, 0, 0.01)
204
- normal_init(self.fc_cls, 0, 0.01)
205
-
206
- def cls_forward(self, cls_x):
207
- cls_x = cls_x.view(cls_x.size(0), -1)
208
- for fc in self.cls_fcs:
209
- cls_x = self.relu(fc(cls_x))
210
- cls_score = self.fc_cls(cls_x)
211
- return cls_score
212
-
213
- def attention_pool(self, reg_x):
214
- """Extract direction-specific features fx and fy with attention
215
- methanism."""
216
- reg_fx = reg_x
217
- reg_fy = reg_x
218
- reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid()
219
- reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid()
220
- reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2)
221
- reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3)
222
- reg_fx = (reg_fx * reg_fx_att).sum(dim=2)
223
- reg_fy = (reg_fy * reg_fy_att).sum(dim=3)
224
- return reg_fx, reg_fy
225
-
226
- def side_aware_feature_extractor(self, reg_x):
227
- """Refine and extract side-aware features without split them."""
228
- for reg_pre_conv in self.reg_pre_convs:
229
- reg_x = reg_pre_conv(reg_x)
230
- reg_fx, reg_fy = self.attention_pool(reg_x)
231
-
232
- if self.reg_post_num > 0:
233
- reg_fx = reg_fx.unsqueeze(2)
234
- reg_fy = reg_fy.unsqueeze(3)
235
- for i in range(self.reg_post_num):
236
- reg_fx = self.reg_post_conv_xs[i](reg_fx)
237
- reg_fy = self.reg_post_conv_ys[i](reg_fy)
238
- reg_fx = reg_fx.squeeze(2)
239
- reg_fy = reg_fy.squeeze(3)
240
- if self.reg_feat_up_ratio > 1:
241
- reg_fx = self.relu(self.upsample_x(reg_fx))
242
- reg_fy = self.relu(self.upsample_y(reg_fy))
243
- reg_fx = torch.transpose(reg_fx, 1, 2)
244
- reg_fy = torch.transpose(reg_fy, 1, 2)
245
- return reg_fx.contiguous(), reg_fy.contiguous()
246
-
247
- def reg_pred(self, x, offset_fcs, cls_fcs):
248
- """Predict bucketing estimation (cls_pred) and fine regression (offset
249
- pred) with side-aware features."""
250
- x_offset = x.view(-1, self.reg_in_channels)
251
- x_cls = x.view(-1, self.reg_in_channels)
252
-
253
- for fc in offset_fcs:
254
- x_offset = self.relu(fc(x_offset))
255
- for fc in cls_fcs:
256
- x_cls = self.relu(fc(x_cls))
257
- offset_pred = self.fc_reg_offset(x_offset)
258
- cls_pred = self.fc_reg_cls(x_cls)
259
-
260
- offset_pred = offset_pred.view(x.size(0), -1)
261
- cls_pred = cls_pred.view(x.size(0), -1)
262
-
263
- return offset_pred, cls_pred
264
-
265
- def side_aware_split(self, feat):
266
- """Split side-aware features aligned with orders of bucketing
267
- targets."""
268
- l_end = int(np.ceil(self.up_reg_feat_size / 2))
269
- r_start = int(np.floor(self.up_reg_feat_size / 2))
270
- feat_fl = feat[:, :l_end]
271
- feat_fr = feat[:, r_start:].flip(dims=(1, ))
272
- feat_fl = feat_fl.contiguous()
273
- feat_fr = feat_fr.contiguous()
274
- feat = torch.cat([feat_fl, feat_fr], dim=-1)
275
- return feat
276
-
277
- def bbox_pred_split(self, bbox_pred, num_proposals_per_img):
278
- """Split batch bbox prediction back to each image."""
279
- bucket_cls_preds, bucket_offset_preds = bbox_pred
280
- bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0)
281
- bucket_offset_preds = bucket_offset_preds.split(
282
- num_proposals_per_img, 0)
283
- bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds))
284
- return bbox_pred
285
-
286
- def reg_forward(self, reg_x):
287
- outs = self.side_aware_feature_extractor(reg_x)
288
- edge_offset_preds = []
289
- edge_cls_preds = []
290
- reg_fx = outs[0]
291
- reg_fy = outs[1]
292
- offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs,
293
- self.reg_cls_fcs)
294
- offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs,
295
- self.reg_cls_fcs)
296
- offset_pred_x = self.side_aware_split(offset_pred_x)
297
- offset_pred_y = self.side_aware_split(offset_pred_y)
298
- cls_pred_x = self.side_aware_split(cls_pred_x)
299
- cls_pred_y = self.side_aware_split(cls_pred_y)
300
- edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1)
301
- edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1)
302
-
303
- return (edge_cls_preds, edge_offset_preds)
304
-
305
- def forward(self, x):
306
-
307
- bbox_pred = self.reg_forward(x)
308
- cls_score = self.cls_forward(x)
309
-
310
- return cls_score, bbox_pred
311
-
312
- def get_targets(self, sampling_results, gt_bboxes, gt_labels,
313
- rcnn_train_cfg):
314
- pos_proposals = [res.pos_bboxes for res in sampling_results]
315
- neg_proposals = [res.neg_bboxes for res in sampling_results]
316
- pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]
317
- pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
318
- cls_reg_targets = self.bucket_target(pos_proposals, neg_proposals,
319
- pos_gt_bboxes, pos_gt_labels,
320
- rcnn_train_cfg)
321
- (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
322
- bucket_offset_targets, bucket_offset_weights) = cls_reg_targets
323
- return (labels, label_weights, (bucket_cls_targets,
324
- bucket_offset_targets),
325
- (bucket_cls_weights, bucket_offset_weights))
326
-
327
- def bucket_target(self,
328
- pos_proposals_list,
329
- neg_proposals_list,
330
- pos_gt_bboxes_list,
331
- pos_gt_labels_list,
332
- rcnn_train_cfg,
333
- concat=True):
334
- (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
335
- bucket_offset_targets, bucket_offset_weights) = multi_apply(
336
- self._bucket_target_single,
337
- pos_proposals_list,
338
- neg_proposals_list,
339
- pos_gt_bboxes_list,
340
- pos_gt_labels_list,
341
- cfg=rcnn_train_cfg)
342
-
343
- if concat:
344
- labels = torch.cat(labels, 0)
345
- label_weights = torch.cat(label_weights, 0)
346
- bucket_cls_targets = torch.cat(bucket_cls_targets, 0)
347
- bucket_cls_weights = torch.cat(bucket_cls_weights, 0)
348
- bucket_offset_targets = torch.cat(bucket_offset_targets, 0)
349
- bucket_offset_weights = torch.cat(bucket_offset_weights, 0)
350
- return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
351
- bucket_offset_targets, bucket_offset_weights)
352
-
353
- def _bucket_target_single(self, pos_proposals, neg_proposals,
354
- pos_gt_bboxes, pos_gt_labels, cfg):
355
- """Compute bucketing estimation targets and fine regression targets for
356
- a single image.
357
-
358
- Args:
359
- pos_proposals (Tensor): positive proposals of a single image,
360
- Shape (n_pos, 4)
361
- neg_proposals (Tensor): negative proposals of a single image,
362
- Shape (n_neg, 4).
363
- pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals
364
- of a single image, Shape (n_pos, 4).
365
- pos_gt_labels (Tensor): gt labels assigned to positive proposals
366
- of a single image, Shape (n_pos, ).
367
- cfg (dict): Config of calculating targets
368
-
369
- Returns:
370
- tuple:
371
-
372
- - labels (Tensor): Labels in a single image. \
373
- Shape (n,).
374
- - label_weights (Tensor): Label weights in a single image.\
375
- Shape (n,)
376
- - bucket_cls_targets (Tensor): Bucket cls targets in \
377
- a single image. Shape (n, num_buckets*2).
378
- - bucket_cls_weights (Tensor): Bucket cls weights in \
379
- a single image. Shape (n, num_buckets*2).
380
- - bucket_offset_targets (Tensor): Bucket offset targets \
381
- in a single image. Shape (n, num_buckets*2).
382
- - bucket_offset_targets (Tensor): Bucket offset weights \
383
- in a single image. Shape (n, num_buckets*2).
384
- """
385
- num_pos = pos_proposals.size(0)
386
- num_neg = neg_proposals.size(0)
387
- num_samples = num_pos + num_neg
388
- labels = pos_gt_bboxes.new_full((num_samples, ),
389
- self.num_classes,
390
- dtype=torch.long)
391
- label_weights = pos_proposals.new_zeros(num_samples)
392
- bucket_cls_targets = pos_proposals.new_zeros(num_samples,
393
- 4 * self.side_num)
394
- bucket_cls_weights = pos_proposals.new_zeros(num_samples,
395
- 4 * self.side_num)
396
- bucket_offset_targets = pos_proposals.new_zeros(
397
- num_samples, 4 * self.side_num)
398
- bucket_offset_weights = pos_proposals.new_zeros(
399
- num_samples, 4 * self.side_num)
400
- if num_pos > 0:
401
- labels[:num_pos] = pos_gt_labels
402
- label_weights[:num_pos] = 1.0
403
- (pos_bucket_offset_targets, pos_bucket_offset_weights,
404
- pos_bucket_cls_targets,
405
- pos_bucket_cls_weights) = self.bbox_coder.encode(
406
- pos_proposals, pos_gt_bboxes)
407
- bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets
408
- bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights
409
- bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets
410
- bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights
411
- if num_neg > 0:
412
- label_weights[-num_neg:] = 1.0
413
- return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
414
- bucket_offset_targets, bucket_offset_weights)
415
-
416
- def loss(self,
417
- cls_score,
418
- bbox_pred,
419
- rois,
420
- labels,
421
- label_weights,
422
- bbox_targets,
423
- bbox_weights,
424
- reduction_override=None):
425
- losses = dict()
426
- if cls_score is not None:
427
- avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
428
- losses['loss_cls'] = self.loss_cls(
429
- cls_score,
430
- labels,
431
- label_weights,
432
- avg_factor=avg_factor,
433
- reduction_override=reduction_override)
434
- losses['acc'] = accuracy(cls_score, labels)
435
-
436
- if bbox_pred is not None:
437
- bucket_cls_preds, bucket_offset_preds = bbox_pred
438
- bucket_cls_targets, bucket_offset_targets = bbox_targets
439
- bucket_cls_weights, bucket_offset_weights = bbox_weights
440
- # edge cls
441
- bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num)
442
- bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num)
443
- bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num)
444
- losses['loss_bbox_cls'] = self.loss_bbox_cls(
445
- bucket_cls_preds,
446
- bucket_cls_targets,
447
- bucket_cls_weights,
448
- avg_factor=bucket_cls_targets.size(0),
449
- reduction_override=reduction_override)
450
-
451
- losses['loss_bbox_reg'] = self.loss_bbox_reg(
452
- bucket_offset_preds,
453
- bucket_offset_targets,
454
- bucket_offset_weights,
455
- avg_factor=bucket_offset_targets.size(0),
456
- reduction_override=reduction_override)
457
-
458
- return losses
459
-
460
- @force_fp32(apply_to=('cls_score', 'bbox_pred'))
461
- def get_bboxes(self,
462
- rois,
463
- cls_score,
464
- bbox_pred,
465
- img_shape,
466
- scale_factor,
467
- rescale=False,
468
- cfg=None):
469
- if isinstance(cls_score, list):
470
- cls_score = sum(cls_score) / float(len(cls_score))
471
- scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
472
-
473
- if bbox_pred is not None:
474
- bboxes, confids = self.bbox_coder.decode(rois[:, 1:], bbox_pred,
475
- img_shape)
476
- else:
477
- bboxes = rois[:, 1:].clone()
478
- confids = None
479
- if img_shape is not None:
480
- bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)
481
- bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)
482
-
483
- if rescale and bboxes.size(0) > 0:
484
- if isinstance(scale_factor, float):
485
- bboxes /= scale_factor
486
- else:
487
- bboxes /= torch.from_numpy(scale_factor).to(bboxes.device)
488
-
489
- if cfg is None:
490
- return bboxes, scores
491
- else:
492
- det_bboxes, det_labels = multiclass_nms(
493
- bboxes,
494
- scores,
495
- cfg.score_thr,
496
- cfg.nms,
497
- cfg.max_per_img,
498
- score_factors=confids)
499
-
500
- return det_bboxes, det_labels
501
-
502
- @force_fp32(apply_to=('bbox_preds', ))
503
- def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
504
- """Refine bboxes during training.
505
-
506
- Args:
507
- rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,
508
- and bs is the sampled RoIs per image.
509
- labels (Tensor): Shape (n*bs, ).
510
- bbox_preds (list[Tensor]): Shape [(n*bs, num_buckets*2), \
511
- (n*bs, num_buckets*2)].
512
- pos_is_gts (list[Tensor]): Flags indicating if each positive bbox
513
- is a gt bbox.
514
- img_metas (list[dict]): Meta info of each image.
515
-
516
- Returns:
517
- list[Tensor]: Refined bboxes of each image in a mini-batch.
518
- """
519
- img_ids = rois[:, 0].long().unique(sorted=True)
520
- assert img_ids.numel() == len(img_metas)
521
-
522
- bboxes_list = []
523
- for i in range(len(img_metas)):
524
- inds = torch.nonzero(
525
- rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
526
- num_rois = inds.numel()
527
-
528
- bboxes_ = rois[inds, 1:]
529
- label_ = labels[inds]
530
- edge_cls_preds, edge_offset_preds = bbox_preds
531
- edge_cls_preds_ = edge_cls_preds[inds]
532
- edge_offset_preds_ = edge_offset_preds[inds]
533
- bbox_pred_ = [edge_cls_preds_, edge_offset_preds_]
534
- img_meta_ = img_metas[i]
535
- pos_is_gts_ = pos_is_gts[i]
536
-
537
- bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
538
- img_meta_)
539
- # filter gt bboxes
540
- pos_keep = 1 - pos_is_gts_
541
- keep_inds = pos_is_gts_.new_ones(num_rois)
542
- keep_inds[:len(pos_is_gts_)] = pos_keep
543
-
544
- bboxes_list.append(bboxes[keep_inds.type(torch.bool)])
545
-
546
- return bboxes_list
547
-
548
- @force_fp32(apply_to=('bbox_pred', ))
549
- def regress_by_class(self, rois, label, bbox_pred, img_meta):
550
- """Regress the bbox for the predicted class. Used in Cascade R-CNN.
551
-
552
- Args:
553
- rois (Tensor): shape (n, 4) or (n, 5)
554
- label (Tensor): shape (n, )
555
- bbox_pred (list[Tensor]): shape [(n, num_buckets *2), \
556
- (n, num_buckets *2)]
557
- img_meta (dict): Image meta info.
558
-
559
- Returns:
560
- Tensor: Regressed bboxes, the same shape as input rois.
561
- """
562
- assert rois.size(1) == 4 or rois.size(1) == 5
563
-
564
- if rois.size(1) == 4:
565
- new_rois, _ = self.bbox_coder.decode(rois, bbox_pred,
566
- img_meta['img_shape'])
567
- else:
568
- bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred,
569
- img_meta['img_shape'])
570
- new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
571
-
572
- return new_rois
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Carlosito16/HXM-summarization/app.py DELETED
@@ -1,321 +0,0 @@
1
- print("Libraries installed succesfully!!!!")
2
-
3
- import torch
4
- from transformers import RobertaTokenizerFast, EncoderDecoderModel, AutoTokenizer, AutoModelForSeq2SeqLM
5
- import re
6
- import gradio as gr
7
-
8
- from helper_function import *
9
-
10
- import numpy as np
11
- from sklearn.feature_extraction.text import CountVectorizer
12
-
13
- import spacy
14
- from sentence_transformers import SentenceTransformer
15
- from sklearn.metrics.pairwise import cosine_similarity
16
-
17
- import requests
18
- from pathlib import Path
19
-
20
- print('Libraries called succesfully!!!!"')
21
-
22
-
23
-
24
- class keyWordExtractor():
25
-
26
- def __init__(self,
27
- article_text,
28
- similarity_model,
29
- n_gram = 1,
30
- top_n = 3,
31
- french_stopwords = None,
32
- ner= None,
33
- ):
34
- self.article_text = article_text
35
- self.french_stopwords = french_stopwords
36
- self.candidates = self.count_vectorizer(n_gram)
37
- self.noun_candidates, self.proper_noun_candidates = self.slice_only_noun_token(ner, self.candidates)
38
- self.top_n_keywords = self.top_n_extractor(similarity_model, top_n)
39
-
40
- def count_vectorizer(self, n_gram):
41
- n_gram_range = (n_gram, n_gram)
42
- # Extract candidate words/phrases
43
- count = CountVectorizer(ngram_range=n_gram_range,
44
- stop_words = self.french_stopwords).fit([self.article_text]) #Main change
45
- candidates = count.get_feature_names_out()
46
-
47
- return candidates
48
-
49
- def slice_only_noun_token(self, ner, token_list):
50
- """
51
- Given the tokenized list, this function returns only the "NOUN" token
52
- Args:
53
- ner (spacy): The NER class to detect the `token.pos_`
54
- token_list (list): List of token from the full article
55
- Returns:
56
- slice_list (list): List of token containing only "NOUN" part of speech
57
- """
58
-
59
- noun_slice_list = []
60
- proper_noun_slice_list = []
61
- for word_idx in range(len(token_list)):
62
- doc = ner(token_list[word_idx])
63
-
64
- for token in doc:
65
- if token.pos_ == 'NOUN':
66
- noun_slice_list.append(token.text)
67
- elif token.pos_ == 'PROPN':
68
- proper_noun_slice_list.append(token.text)
69
-
70
- return noun_slice_list, proper_noun_slice_list
71
-
72
- def top_n_extractor(self, model, top_n):
73
- doc_embedding = model.encode([self.article_text])
74
- candidate_embeddings = model.encode(self.noun_candidates)
75
- distances = cosine_similarity(doc_embedding, candidate_embeddings)
76
- keywords = [self.noun_candidates[index] for index in distances.argsort()[0][-top_n:]]
77
-
78
- return keywords
79
-
80
- def clear_input():
81
- return ("", "")
82
-
83
-
84
- def camembert_generate_summary(article_text):
85
- inputs = cmb_tokenizer([article_text], padding="max_length", truncation=True,
86
- max_length=50,
87
- return_tensors="pt")
88
- input_ids = inputs.input_ids.to(device)
89
- attention_mask = inputs.attention_mask.to(device)
90
- output = cmb_model.generate(input_ids, attention_mask=attention_mask, max_length = 50)
91
- return cmb_tokenizer.decode(output[0], skip_special_tokens=True)
92
-
93
-
94
- def t5_generate_summary(article_text):
95
- input_ids = t5_tokenizer(
96
- [WHITESPACE_HANDLER(article_text)],
97
- return_tensors="pt",
98
- padding="max_length",
99
- truncation=True,
100
- max_length=512)["input_ids"]
101
-
102
- output_ids = t5_model.generate(
103
- input_ids=input_ids,
104
- max_length=84,
105
- no_repeat_ngram_size=2,
106
- num_beams=4
107
- )[0]
108
-
109
- output = t5_tokenizer.decode(
110
- output_ids,
111
- skip_special_tokens=True,
112
- clean_up_tokenization_spaces=False
113
- )
114
-
115
- return output
116
-
117
-
118
- def summarizer(dropdown_model, article_text):
119
- """
120
- Ruturs a summarized version from the full article based on the selected pretrained-model
121
- """
122
-
123
- if dropdown_model == 'camembert':
124
- summary = camembert_generate_summary(article_text)
125
-
126
- elif dropdown_model == 'T5':
127
- summary = t5_generate_summary(article_text)
128
-
129
- return summary
130
-
131
- def extract_top_3(article):
132
- nlp = spacy.load("fr_core_news_md")
133
- # model = SentenceTransformer("dangvantuan/sentence-camembert-large") #
134
-
135
- a= keyWordExtractor(article,
136
- n_gram = 1,
137
- top_n = 3,
138
- ner = nlp,
139
- similarity_model = model)
140
- keyword = ", ".join(a.top_n_keywords) #to return ['a' , 'b'] >> "a, b"
141
- proper_nonuns = ", ".join(a.proper_noun_candidates)
142
-
143
- return keyword, proper_nonuns
144
-
145
-
146
- def runall(dropdown_model, article_text):
147
- summary = summarizer(dropdown_model, article_text)
148
- keywords, proper_n = extract_top_3(article_text)
149
-
150
- return summary, keywords, proper_n
151
-
152
-
153
-
154
- #set the device agnostics code
155
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
156
- test_article ="""\"Un nuage de fumée juste après l’explosion, le 1er juin 2019. Une déflagration dans une importante usine d’explosifs du centre de la Russie a fait au moins 79 blessés samedi 1er juin. L’explosion a eu lieu dans l’usine Kristall à Dzerzhinsk, une ville située à environ 400 kilomètres à l’est de Moscou, dans la région de Nijni-Novgorod. « Il y a eu une explosion technique dans l’un des ateliers, suivie d’un incendie qui s’est propagé sur une centaine de mètres carrés », a expliqué un porte-parole des services d’urgence. Des images circulant sur les réseaux sociaux montraient un énorme nuage de fumée après l’explosion. Cinq bâtiments de l’usine et près de 180 bâtiments résidentiels ont été endommagés par l’explosion, selon les autorités municipales. Une enquête pour de potentielles violations des normes de sécurité a été ouverte. Fragments de shrapnel Les blessés ont été soignés après avoir été atteints par des fragments issus de l’explosion, a précisé une porte-parole des autorités sanitaires citée par Interfax. « Nous parlons de blessures par shrapnel d’une gravité moyenne et modérée », a-t-elle précisé. Selon des représentants de Kristall, cinq personnes travaillaient dans la zone où s’est produite l’explosion. Elles ont pu être évacuées en sécurité. Les pompiers locaux ont rapporté n’avoir aucune information sur des personnes qui se trouveraient encore dans l’usine."""
157
-
158
- # whitespace handler to be used in `t5 model`
159
- WHITESPACE_HANDLER = lambda k: re.sub('\s+', ' ', re.sub('\n+', ' ', k.strip()))
160
-
161
- article_text = """\"Un nuage de fumée juste après l’explosion, le 1er juin 2019. Une déflagration dans une importante usine d’explosifs du centre de la Russie a fait au moins 79 blessés samedi 1er juin. L’explosion a eu lieu dans l’usine Kristall à Dzerzhinsk, une ville située à environ 400 kilomètres à l’est de Moscou, dans la région de Nijni-Novgorod. « Il y a eu une explosion technique dans l’un des ateliers, suivie d’un incendie qui s’est propagé sur une centaine de mètres carrés », a expliqué un porte-parole des services d’urgence. Des images circulant sur les réseaux sociaux montraient un énorme nuage de fumée après l’explosion. Cinq bâtiments de l’usine et près de 180 bâtiments résidentiels ont été endommagés par l’explosion, selon les autorités municipales. Une enquête pour de potentielles violations des normes de sécurité a été ouverte. Fragments de shrapnel Les blessés ont été soignés après avoir été atteints par des fragments issus de l’explosion, a précisé une porte-parole des autorités sanitaires citée par Interfax. « Nous parlons de blessures par shrapnel d’une gravité moyenne et modérée », a-t-elle précisé. Selon des représentants de Kristall, cinq personnes travaillaient dans la zone où s’est produite l’explosion. Elles ont pu être évacuées en sécurité. Les pompiers locaux ont rapporté n’avoir aucune information sur des personnes qui se trouveraient encore dans l’usine."""
162
-
163
-
164
- cmb_ckpt = 'mrm8488/camembert2camembert_shared-finetuned-french-summarization'
165
- cmb_tokenizer = RobertaTokenizerFast.from_pretrained(cmb_ckpt)
166
- cmb_model = EncoderDecoderModel.from_pretrained(cmb_ckpt).to(device)
167
-
168
-
169
- t5_model_name = "csebuetnlp/mT5_multilingual_XLSum"
170
- t5_tokenizer = AutoTokenizer.from_pretrained(t5_model_name)
171
- t5_model = AutoModelForSeq2SeqLM.from_pretrained(t5_model_name)
172
-
173
- print(summarizer("camembert", test_article))
174
-
175
-
176
- ###
177
- import requests
178
- from pathlib import Path
179
-
180
- if Path('french_stopword.txt').is_file():
181
- print('already exists')
182
- else:
183
- print('not existed yet')
184
- request = requests.get("https://raw.githubusercontent.com/stopwords-iso/stopwords-fr/master/stopwords-fr.txt")
185
- with open('french_stopword.txt', "wb") as f:
186
- f.write(request.content)
187
-
188
-
189
- # opening the file in read mode
190
- my_file = open("french_stopword.txt", "r")
191
-
192
- # reading the file
193
- data = my_file.read()
194
-
195
- # replacing end of line('/n') with ' ' and
196
- # splitting the text it further when '.' is seen.
197
- french_stopwords_list = data.replace('\n', ' ').split(" ")
198
-
199
- # # printing the data
200
- print(french_stopwords_list)
201
- my_file.close()
202
-
203
- ###
204
-
205
- nlp = spacy.load("fr_core_news_md")
206
- model = SentenceTransformer("dangvantuan/sentence-camembert-large")
207
-
208
- print(extract_top_3(test_article))
209
-
210
- some_examples = [
211
-
212
- ["camembert", """
213
- Carrefour livraison service honteux ! Ne livre jamais dans les temps, plusieurs heures de retard et si on a le malheur de travailler et de ne pas être chez nous toute la journée on peut dire au revoir à notre commande !
214
- Service client délocalisé qui insulte et raccroche au nez quand on demande ou est notre commande !
215
- Je ne recommande absolument pas, j'ai pourtant laisser plusieurs chance mais à chaque fois extrêmement déçu...
216
- """],
217
- ["camembert", """
218
- Très mauvaise expérience !!!
219
- Commande livré par terre sans rien dire alors que nous étions présents.
220
- En plein centre ville de Dijon.
221
- Merci pour l’hygiène et la compétence du livreur.
222
- Sans parler des surgelés en plein soleil qui ont finis à la poubelle.
223
- Merci Carrefour Quetigny.
224
- """]
225
-
226
- ]
227
-
228
-
229
-
230
- ###
231
-
232
-
233
-
234
- ####
235
-
236
- with gr.Blocks() as demo:
237
- gr.Markdown("Summarize the article text.")
238
-
239
- with gr.Tab("Summarization + Extraction"):
240
- with gr.Row():
241
- with gr.Column():
242
- input_models = gr.Dropdown(['camembert'], value = 'camembert')
243
- input_article = gr.TextArea(label = 'Article to be summarized')
244
-
245
- with gr.Column():
246
- summarized_output = gr.TextArea(label= 'Generated summary')
247
- output_keyword = gr.Textbox(label="Top 3 keywords")
248
- output_proper_nouns = gr.Textbox(label="Proper Nouns")
249
-
250
- with gr.Row():
251
- clear_button = gr.Button("Clear")
252
- summarize_button = gr.Button("Summarize!")
253
- extract_button = gr.Button("Extract!")
254
- run_button = gr.Button("Run all!")
255
-
256
- examples = gr.Examples(examples= some_examples,
257
- inputs=[input_models, input_article])
258
-
259
-
260
- summarize_button.click(summarizer,
261
- inputs = [input_models, input_article] ,
262
- outputs = summarized_output)
263
-
264
-
265
- extract_button.click(extract_top_3,
266
- inputs = input_article ,
267
- outputs = [output_keyword, output_proper_nouns])
268
-
269
- run_button.click(runall,
270
- inputs = [input_models, input_article],
271
- outputs = [summarized_output, output_keyword, output_proper_nouns ])
272
-
273
- clear_button.click(clear_input,
274
- outputs = [input_models, input_article])
275
-
276
- with gr.Tab("Summarization"):
277
- with gr.Row():
278
- with gr.Column():
279
- input_models = gr.Dropdown(['camembert', 'T5'])
280
- input_article = gr.TextArea(label = 'Article to be summarized')
281
- with gr.Column():
282
- summarized_output = gr.TextArea(label= 'Generated summa')
283
- with gr.Row():
284
-
285
- clear_button = gr.Button("Clear")
286
- summarize_button = gr.Button("Summarize!")
287
-
288
- summarize_button.click(summarizer,
289
- inputs = [input_models, input_article] ,
290
- outputs = summarized_output)
291
-
292
- clear_button.click(clear_input,
293
- outputs = [input_models, input_article])
294
-
295
- example = "Un nuage de fumée juste après l’explosion, le 1er juin 2019. Une déflagration dans une importante usine d’explosifs du centre de la Russie a fait au moins 79 blessés samedi 1er juin. L’explosion a eu lieu dans l’usine Kristall à Dzerzhinsk, une ville située à environ 400 kilomètres à l’est de Moscou, dans la région de Nijni-Novgorod. « Il y a eu une explosion technique dans l’un des ateliers, suivie d’un incendie qui s’est propagé sur une centaine de mètres carrés », a expliqué un porte-parole des services d’urgence. Des images circulant sur les réseaux sociaux montraient un énorme nuage de fumée après l’explosion. Cinq bâtiments de l’usine et près de 180 bâtiments résidentiels ont été endommagés par l’explosion, selon les autorités municipales. Une enquête pour de potentielles violations des normes de sécurité a été ouverte. Fragments de shrapnel Les blessés ont été soignés après avoir été atteints par des fragments issus de l’explosion, a précisé une porte-parole des autorités sanitaires citée par Interfax. « Nous parlons de blessures par shrapnel d’une gravité moyenne et modérée », a-t-elle précisé. Selon des représentants de Kristall, cinq personnes travaillaient dans la zone où s’est produite l’explosion. Elles ont pu être évacuées en sécurité. Les pompiers locaux ont rapporté n’avoir aucune information sur des personnes qui se trouveraient encore dans l’usine."
296
- examples = gr.Examples(examples=[ ["camembert",example],
297
- ["T5",example]],
298
- inputs=[input_models, input_article])
299
-
300
- with gr.Tab("Key word extraction"):
301
-
302
-
303
- with gr.Row():
304
- with gr.Column():
305
- input_article = gr.TextArea(label = 'Article to be extracted')
306
- with gr.Column():
307
- output_1 = gr.Textbox(label="Top 3 keywords")
308
- output_2 = gr.Textbox(label="Proper Nouns")
309
-
310
- with gr.Row():
311
-
312
- clear_button = gr.Button("Clear")
313
- extract_button = gr.Button("Extract!")
314
-
315
- extract_button.click(extract_top_3,
316
- inputs = input_article ,
317
- outputs = [output_1, output_2])
318
-
319
-
320
-
321
- demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/tests/smoke_test.py DELETED
@@ -1,59 +0,0 @@
1
- """Smoke test for the autogpt package."""
2
- import os
3
- import subprocess
4
- import sys
5
-
6
- import pytest
7
-
8
- from autogpt.commands.file_operations import delete_file, read_file
9
-
10
-
11
- @pytest.mark.integration_test
12
- def test_write_file() -> None:
13
- """
14
- Test case to check if the write_file command can successfully write 'Hello World' to a file
15
- named 'hello_world.txt'.
16
-
17
- Read the current ai_settings.yaml file and store its content.
18
- """
19
- env_vars = {"MEMORY_BACKEND": "no_memory", "TEMPERATURE": "0"}
20
- ai_settings = None
21
- if os.path.exists("ai_settings.yaml"):
22
- with open("ai_settings.yaml", "r") as f:
23
- ai_settings = f.read()
24
- os.remove("ai_settings.yaml")
25
-
26
- try:
27
- if os.path.exists("hello_world.txt"):
28
- # Clean up any existing 'hello_world.txt' file before testing.
29
- delete_file("hello_world.txt")
30
- # Prepare input data for the test.
31
- input_data = """write_file-GPT
32
- an AI designed to use the write_file command to write 'Hello World' into a file named "hello_world.txt" and then use the task_complete command to complete the task.
33
- Use the write_file command to write 'Hello World' into a file named "hello_world.txt".
34
- Use the task_complete command to complete the task.
35
- Do not use any other commands.
36
-
37
- y -5
38
- EOF"""
39
- command = f"{sys.executable} -m autogpt"
40
-
41
- # Execute the script with the input data.
42
- process = subprocess.Popen(
43
- command,
44
- stdin=subprocess.PIPE,
45
- shell=True,
46
- env={**os.environ, **env_vars},
47
- )
48
- process.communicate(input_data.encode())
49
-
50
- # Read the content of the 'hello_world.txt' file created during the test.
51
- content = read_file("hello_world.txt")
52
- finally:
53
- if ai_settings:
54
- # Restore the original ai_settings.yaml file.
55
- with open("ai_settings.yaml", "w") as f:
56
- f.write(ai_settings)
57
-
58
- # Check if the content of the 'hello_world.txt' file is equal to 'Hello World'.
59
- assert content == "Hello World", f"Expected 'Hello World', got {content}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/distracted/__init__.py DELETED
@@ -1,23 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from pil_utils import BuildImage
5
-
6
- from meme_generator import add_meme
7
- from meme_generator.utils import make_jpg_or_gif
8
-
9
- img_dir = Path(__file__).parent / "images"
10
-
11
-
12
- def distracted(images: List[BuildImage], texts, args):
13
- frame = BuildImage.open(img_dir / "1.png")
14
- label = BuildImage.open(img_dir / "0.png")
15
-
16
- def make(img: BuildImage) -> BuildImage:
17
- img = img.convert("RGBA").square().resize((500, 500))
18
- return frame.copy().paste(img, below=True).paste(label, (140, 320), alpha=True)
19
-
20
- return make_jpg_or_gif(images[0], make)
21
-
22
-
23
- add_meme("distracted", distracted, min_images=1, max_images=1, keywords=["注意力涣散"])