Commit
·
8c43e22
1
Parent(s):
0fec13f
Update parquet files (step 41 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/0x90e/ESRGAN-MANGA/inference_manga_v2.py +0 -46
- spaces/101-5/gpt4free/g4f/.v1/gpt4free/you/__init__.py +0 -127
- spaces/101-5/gpt4free/g4f/.v1/unfinished/bard/README.md +0 -2
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Create Download and Print 3D Maps with 3D Map Generator for Free.md +0 -44
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fastgsm S3g 1.0.0.42 Free Download ((NEW)).md +0 -187
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/FinalMesh Professional 2.4.2.331 Crack UPD Downloadl.md +0 -21
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Gta 3 Weather Cheat Pc EXCLUSIVE.md +0 -26
- spaces/1gistliPinn/ChatGPT4/Examples/Download Aplikasi untuk Buat Undangan Pernikahan yang Bisa Dibagikan ke Media Sosial di Wedding Invitation Card Maker.md +0 -29
- spaces/1gistliPinn/ChatGPT4/Examples/Driver Tv Tunner Gadmei Usb Utv330 .rar [NEW].md +0 -48
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Highway Racing APK How to Get Unlimited Money and Dominate the Road.md +0 -84
- spaces/1phancelerku/anime-remove-background/Alienvault The Ultimate Solution for Threat Intelligence and Detection.md +0 -141
- spaces/1phancelerku/anime-remove-background/Download TikTok Videos Without Watermark in HD Resolution - Best TikTok Saver.md +0 -113
- spaces/1phancelerku/anime-remove-background/Download WhatsApp Business APK Terbaru Aplikasi Gratis untuk Bisnis Kecil.md +0 -101
- spaces/2023Liu2023/bingo/src/components/chat-notification.tsx +0 -77
- spaces/7hao/bingo/src/lib/storage.ts +0 -27
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/diff/diffusion.py +0 -334
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dropdownlist/methods/Methods.js +0 -18
- spaces/AlekseyKorshuk/gai-project/modules/about.py +0 -17
- spaces/Aloento/9Nine-VITS/transforms.py +0 -191
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/loaders.md +0 -45
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_dance_diffusion_to_diffusers.py +0 -339
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +0 -713
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py +0 -594
- spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py +0 -37
- spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py +0 -58
- spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py +0 -91
- spaces/Andy1621/uniformer_image_detection/tools/dataset_converters/pascal_voc.py +0 -236
- spaces/Andy1621/uniformer_image_segmentation/configs/apcnet/apcnet_r50-d8_512x512_160k_ade20k.py +0 -6
- spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes.py +0 -2
- spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes.py +0 -4
- spaces/AnimeStudio/anime-models/README.md +0 -13
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/lr_updater.py +0 -670
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/log_buffer.py +0 -41
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/__init__.py +0 -19
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/install/__init__.py +0 -2
- spaces/Audio-AGI/AudioSep/pipeline.py +0 -67
- spaces/BMukhtar/facemaskDetector/README.md +0 -13
- spaces/Bart92/RVC_HF/infer/modules/ipex/__init__.py.py +0 -165
- spaces/Benson/text-generation/Examples/Apk Mod 8 Piscina De Bolas 5.11.2.md +0 -151
- spaces/Benson/text-generation/Examples/Descargar 28 Semanas Despus.md +0 -78
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_nms_rotated.py +0 -159
- spaces/CVPR/LIVE/thrust/dependencies/cub/cmake/CubBuildCompilerTargets.cmake +0 -102
- spaces/CVPR/MonoScene/monoscene/__init__.py +0 -0
- spaces/CVPR/WALT/mmdet/models/detectors/cascade_rcnn.py +0 -46
- spaces/CVPR/WALT/mmdet/models/necks/fpg.py +0 -398
- spaces/Celestinian/Prompt-Generator/app.py +0 -34
- spaces/CguCsie/README/README.md +0 -11
- spaces/CjangCjengh/Sanskrit-TTS/monotonic_align/__init__.py +0 -19
- spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/common/__init__.py +0 -0
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/encodings/MacRoman.py +0 -258
spaces/0x90e/ESRGAN-MANGA/inference_manga_v2.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
import cv2
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
import ESRGAN.architecture as arch
|
6 |
-
from ESRGANer import ESRGANer
|
7 |
-
|
8 |
-
def is_cuda():
|
9 |
-
if torch.cuda.is_available():
|
10 |
-
return True
|
11 |
-
else:
|
12 |
-
return False
|
13 |
-
|
14 |
-
model_path = 'models/4x_eula_digimanga_bw_v2_nc1_307k.pth'
|
15 |
-
OUTPUT_PATH = sys.argv[1]
|
16 |
-
device = torch.device('cuda' if is_cuda() else 'cpu')
|
17 |
-
|
18 |
-
model = arch.RRDB_Net(1, 1, 64, 23, gc=32, upscale=4, norm_type=None, act_type='leakyrelu', mode='CNA', res_scale=1, upsample_mode='upconv')
|
19 |
-
|
20 |
-
if is_cuda():
|
21 |
-
print("Using GPU 🥶")
|
22 |
-
model.load_state_dict(torch.load(model_path), strict=True)
|
23 |
-
else:
|
24 |
-
print("Using CPU 😒")
|
25 |
-
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')), strict=True)
|
26 |
-
|
27 |
-
model.eval()
|
28 |
-
|
29 |
-
for k, v in model.named_parameters():
|
30 |
-
v.requires_grad = False
|
31 |
-
model = model.to(device)
|
32 |
-
|
33 |
-
# Read image
|
34 |
-
img = cv2.imread(OUTPUT_PATH, cv2.IMREAD_GRAYSCALE)
|
35 |
-
img = img * 1.0 / 255
|
36 |
-
img = torch.from_numpy(img[np.newaxis, :, :]).float()
|
37 |
-
img_LR = img.unsqueeze(0)
|
38 |
-
img_LR = img_LR.to(device)
|
39 |
-
|
40 |
-
upsampler = ESRGANer(model=model)
|
41 |
-
output = upsampler.enhance(img_LR)
|
42 |
-
|
43 |
-
output = output.squeeze(dim=0).float().cpu().clamp_(0, 1).numpy()
|
44 |
-
output = np.transpose(output, (1, 2, 0))
|
45 |
-
output = (output * 255.0).round()
|
46 |
-
cv2.imwrite(OUTPUT_PATH, output, [int(cv2.IMWRITE_PNG_COMPRESSION), 5])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/101-5/gpt4free/g4f/.v1/gpt4free/you/__init__.py
DELETED
@@ -1,127 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import re
|
3 |
-
from typing import Optional, List, Dict, Any
|
4 |
-
from uuid import uuid4
|
5 |
-
|
6 |
-
from fake_useragent import UserAgent
|
7 |
-
from pydantic import BaseModel
|
8 |
-
from requests import RequestException
|
9 |
-
from retrying import retry
|
10 |
-
from tls_client import Session
|
11 |
-
from tls_client.response import Response
|
12 |
-
|
13 |
-
|
14 |
-
class YouResponse(BaseModel):
|
15 |
-
text: Optional[str] = None
|
16 |
-
links: List[str] = []
|
17 |
-
extra: Dict[str, Any] = {}
|
18 |
-
|
19 |
-
|
20 |
-
class Completion:
|
21 |
-
@staticmethod
|
22 |
-
def create(
|
23 |
-
prompt: str,
|
24 |
-
page: int = 1,
|
25 |
-
count: int = 10,
|
26 |
-
safe_search: str = 'Moderate',
|
27 |
-
on_shopping_page: bool = False,
|
28 |
-
mkt: str = '',
|
29 |
-
response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches',
|
30 |
-
domain: str = 'youchat',
|
31 |
-
query_trace_id: str = None,
|
32 |
-
chat: list = None,
|
33 |
-
include_links: bool = False,
|
34 |
-
detailed: bool = False,
|
35 |
-
debug: bool = False,
|
36 |
-
proxy: Optional[str] = None,
|
37 |
-
) -> YouResponse:
|
38 |
-
if chat is None:
|
39 |
-
chat = []
|
40 |
-
|
41 |
-
proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else {}
|
42 |
-
|
43 |
-
client = Session(client_identifier='chrome_108')
|
44 |
-
client.headers = Completion.__get_headers()
|
45 |
-
client.proxies = proxies
|
46 |
-
|
47 |
-
params = {
|
48 |
-
'q': prompt,
|
49 |
-
'page': page,
|
50 |
-
'count': count,
|
51 |
-
'safeSearch': safe_search,
|
52 |
-
'onShoppingPage': on_shopping_page,
|
53 |
-
'mkt': mkt,
|
54 |
-
'responseFilter': response_filter,
|
55 |
-
'domain': domain,
|
56 |
-
'queryTraceId': str(uuid4()) if query_trace_id is None else query_trace_id,
|
57 |
-
'chat': str(chat), # {'question':'','answer':' ''}
|
58 |
-
}
|
59 |
-
|
60 |
-
try:
|
61 |
-
response = Completion.__make_request(client, params)
|
62 |
-
except Exception:
|
63 |
-
return Completion.__get_failure_response()
|
64 |
-
|
65 |
-
if debug:
|
66 |
-
print('\n\n------------------\n\n')
|
67 |
-
print(response.text)
|
68 |
-
print('\n\n------------------\n\n')
|
69 |
-
|
70 |
-
you_chat_serp_results = re.search(
|
71 |
-
r'(?<=event: youChatSerpResults\ndata:)(.*\n)*?(?=event: )', response.text
|
72 |
-
).group()
|
73 |
-
third_party_search_results = re.search(
|
74 |
-
r'(?<=event: thirdPartySearchResults\ndata:)(.*\n)*?(?=event: )', response.text
|
75 |
-
).group()
|
76 |
-
# slots = findall(r"slots\ndata: (.*)\n\nevent", response.text)[0]
|
77 |
-
|
78 |
-
text = ''.join(re.findall(r'{\"youChatToken\": \"(.*?)\"}', response.text))
|
79 |
-
|
80 |
-
extra = {
|
81 |
-
'youChatSerpResults': json.loads(you_chat_serp_results),
|
82 |
-
# 'slots' : loads(slots)
|
83 |
-
}
|
84 |
-
|
85 |
-
response = YouResponse(text=text.replace('\\n', '\n').replace('\\\\', '\\').replace('\\"', '"'))
|
86 |
-
if include_links:
|
87 |
-
response.links = json.loads(third_party_search_results)['search']['third_party_search_results']
|
88 |
-
|
89 |
-
if detailed:
|
90 |
-
response.extra = extra
|
91 |
-
|
92 |
-
return response
|
93 |
-
|
94 |
-
@staticmethod
|
95 |
-
def __get_headers() -> dict:
|
96 |
-
return {
|
97 |
-
'authority': 'you.com',
|
98 |
-
'accept': 'text/event-stream',
|
99 |
-
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
100 |
-
'cache-control': 'no-cache',
|
101 |
-
'referer': 'https://you.com/search?q=who+are+you&tbm=youchat',
|
102 |
-
'sec-ch-ua': '"Not_A Brand";v="99", "Google Chrome";v="109", "Chromium";v="109"',
|
103 |
-
'sec-ch-ua-mobile': '?0',
|
104 |
-
'sec-ch-ua-platform': '"Windows"',
|
105 |
-
'sec-fetch-dest': 'empty',
|
106 |
-
'sec-fetch-mode': 'cors',
|
107 |
-
'sec-fetch-site': 'same-origin',
|
108 |
-
'cookie': f'safesearch_guest=Moderate; uuid_guest={str(uuid4())}',
|
109 |
-
'user-agent': UserAgent().random,
|
110 |
-
}
|
111 |
-
|
112 |
-
@staticmethod
|
113 |
-
def __get_failure_response() -> YouResponse:
|
114 |
-
return YouResponse(text='Unable to fetch the response, Please try again.')
|
115 |
-
|
116 |
-
@staticmethod
|
117 |
-
@retry(
|
118 |
-
wait_fixed=5000,
|
119 |
-
stop_max_attempt_number=5,
|
120 |
-
retry_on_exception=lambda e: isinstance(e, RequestException),
|
121 |
-
)
|
122 |
-
def __make_request(client: Session, params: dict) -> Response:
|
123 |
-
response = client.get(f'https://you.com/api/streamingSearch', params=params)
|
124 |
-
if 'youChatToken' not in response.text:
|
125 |
-
print('retry')
|
126 |
-
raise RequestException('Unable to get the response from server')
|
127 |
-
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/101-5/gpt4free/g4f/.v1/unfinished/bard/README.md
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
to do:
|
2 |
-
- code refractoring
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Create Download and Print 3D Maps with 3D Map Generator for Free.md
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Create Stunning 3D Maps for Free with 3D Map Generator</h1>
|
3 |
-
|
4 |
-
<p>Have you ever wanted to create realistic 3D maps of any place on earth, without any special skills or software? Maybe you need a 3D map for a game, a presentation, a website, or a 3D print. Or maybe you just want to have fun and explore the world in 3D.</p>
|
5 |
-
<h2>3d map generator terrain free download</h2><br /><p><b><b>Download File</b> ✺✺✺ <a href="https://byltly.com/2uKA4X">https://byltly.com/2uKA4X</a></b></p><br /><br />
|
6 |
-
|
7 |
-
<p>If so, you're in luck. In this article, we'll show you how to use 3D Map Generator, a Photoshop plugin that lets you generate 3D maps from heightmaps. You can download it for free and use it to create amazing 3D maps in minutes.</p>
|
8 |
-
|
9 |
-
<h2>What is 3D Map Generator?</h2>
|
10 |
-
|
11 |
-
<p>3D Map Generator is a Photoshop plugin that allows you to create 3D maps from heightmaps. A heightmap is a grayscale image that represents the elevation of the terrain. The darker the pixel, the lower the elevation. The lighter the pixel, the higher the elevation.</p>
|
12 |
-
|
13 |
-
<p>With 3D Map Generator, you can easily convert any heightmap into a 3D map with realistic textures, lighting, and shadows. You can also customize your map with various tools and settings, such as water level, snow cover, vegetation, roads, buildings, and more.</p>
|
14 |
-
|
15 |
-
<p>3D Map Generator works with Photoshop CC-2014 and newer, on PC or Mac. You can download it for free from Graphicriver. The free version has some limitations, such as the maximum map size (1000 x 1000 pixels) and the number of textures (10). If you want to unlock more features and options, you can upgrade to the pro version.</p>
|
16 |
-
|
17 |
-
<h2>How to Use 3D Map Generator?</h2>
|
18 |
-
|
19 |
-
<p>Using 3D Map Generator is very easy and intuitive. Here are the basic steps to create your own 3D map:</p>
|
20 |
-
<p></p>
|
21 |
-
|
22 |
-
<ol>
|
23 |
-
<li><strong>Download and install 3D Map Generator</strong></li>
|
24 |
-
<p>First, you need to download and install 3D Map Generator on your computer. You can get it from Graphicriver. After downloading the ZIP file, extract it and run the installer. Follow the instructions to install the plugin on your Photoshop.</p>
|
25 |
-
|
26 |
-
<li><strong>Open Photoshop and create a new document</strong></li>
|
27 |
-
<p>Next, open Photoshop and create a new document with the size of your desired map. For example, if you want to create a map with a resolution of 1000 x 1000 pixels, create a document with that size. Make sure the color mode is RGB and the background is white.</p>
|
28 |
-
|
29 |
-
<li><strong>Load a heightmap</strong></li>
|
30 |
-
<p>Now you need to load a heightmap into your document. You can use any heightmap that you have or find online. There are many websites that offer free heightmaps of different places on earth, such as Maps 3D or 3D-Mapper. You can also create your own heightmap with World Machine, a software that lets you generate realistic terrains.</p>
|
31 |
-
|
32 |
-
<p>To load a heightmap into your document, go to File > Place Embedded and select the heightmap image file. Resize and position it to fit your document. Then press Enter to place it.</p>
|
33 |
-
|
34 |
-
<li><strong>Run 3D Map Generator</strong></li>
|
35 |
-
<p>Now it's time to run 3D Map Generator and turn your heightmap into a 3D map. Go to Window > Extensions > 3D Map Generator - Terrain. A new panel will appear on your screen with various options and tools.</p>
|
36 |
-
|
37 |
-
<p>The first thing you need to do is click on the Generate button at the top of the panel. This will create a 3D map based on your heightmap. You can see the result in a new window that pops up.</p>
|
38 |
-
|
39 |
-
<li><strong>Customize your map</strong></li>
|
40 |
-
<p>Now you can customize your map with various tools and settings in the panel. You can change the water level, snow cover, vegetation density, road width, building height, and more. You can also add labels, icons, logos, or text to your map.</p>
|
41 |
-
|
42 |
-
<p>To use these tools and</p> ddb901b051<br />
|
43 |
-
<br />
|
44 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fastgsm S3g 1.0.0.42 Free Download ((NEW)).md
DELETED
@@ -1,187 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<br> - Benefits: Highlight the main advantages of using Fastgsm s3g 1.0.0.42 to unlock Samsung phones, such as saving money, time, and hassle. | | H2: How to download and install Fastgsm s3g 1.0.0.42 for free? | - Requirements: List the minimum system requirements and compatible Samsung phone models for Fastgsm s3g 1.0.0.42. <br> - Download: Provide the link to download Fastgsm s3g 1.0.0.42 for free from a reliable source, and explain how to verify the file integrity and security. <br> - Install: Guide the reader through the installation process step by step, with screenshots and tips. | | H2: How to use Fastgsm s3g 1.0.0.42 to unlock your Samsung phone? | - Connect: Explain how to connect your Samsung phone to your computer using a USB cable, and how to enable USB debugging mode on your phone. <br> - Detect: Show how to launch Fastgsm s3g 1.0.0.42 and let it detect your phone model and network lock status automatically. <br> - Unlock: Demonstrate how to select the unlock option and enter the unlock code provided by Fastgsm s3g 1.0.0.42, and how to confirm the unlock success message on your phone screen. | | H2: How to troubleshoot common issues with Fastgsm s3g 1.0.0.42? | - Errors: List some common error messages or problems that may occur when using Fastgsm s3g 1.0.0.42, such as invalid unlock code, connection failure, or device not supported, and provide possible solutions or alternatives for each one.<br> - Support: Provide the contact information and website of Fastgsm s3g 1.0.0.42 customer service, and recommend other reliable sources of help or feedback, such as forums or blogs. | H2: How to update or uninstall Fastgsm s3g 1.0.0.42? | - Update: Explain how to check for updates and download the latest version of Fastgsm s3g 1.0.0.42, and why it is important to keep the software updated for optimal performance and compatibility.<br> - Uninstall: Describe how to uninstall Fastgsm s3g 1.0.0.42 from your computer completely and safely, and what to do if you encounter any problems during the uninstallation process. | H2: Conclusion | - Summary: Summarize the main points and benefits of using Fastgsm s3g 1.0.0.42 to unlock Samsung phones.<br> - Call to action: Encourage the reader to try Fastgsm s3g 1.0.0.42 for free and share their experience or feedback with others.<br> - Disclaimer: Remind the reader that unlocking their phone may void their warranty or violate their carrier's terms of service, and that they are responsible for their own actions. Table 2: Article with HTML formatting <h1>What is Fastgsm s3g 1.0.0.42 and why do you need it?</h1>
|
3 |
-
<p>If you own a Samsung phone that is locked to a specific network carrier, you may have encountered some limitations or inconveniences when using your device.</p>
|
4 |
-
<p>For example, you may not be able to use your phone with a different SIM card from another carrier, which can be frustrating if you travel abroad frequently or want to switch to a cheaper or better plan.</p>
|
5 |
-
<h2>Fastgsm s3g 1.0.0.42 free download</h2><br /><p><b><b>DOWNLOAD</b> →→→ <a href="https://byltly.com/2uKAgj">https://byltly.com/2uKAgj</a></b></p><br /><br />
|
6 |
-
<p>Or you may have to pay a hefty fee or wait for a long time to get your phone unlocked by your carrier, which can be annoying if you want to sell your phone or give it away.</p>
|
7 |
-
<p>Fortunately, there is a solution that can help you unlock your Samsung phone quickly, easily, and affordably.</p>
|
8 |
-
<p>That solution is called <strong>Fastgsm s3g 1.0</strong>.<strong>42</strong>, a software <p>Fastgsm s3g 1.0.0.42 is a software program that allows you to unlock your Samsung phone from any network carrier in the world, using a simple USB cable and a computer.</p>
|
9 |
-
<p>It works by generating an unlock code for your phone model and network, which you can enter on your phone screen to remove the network lock permanently.</p>
|
10 |
-
<p>Fastgsm s3g 1.0.0.42 is compatible with most Samsung phone models, including the popular Galaxy S, Note, and A series, as well as older models like the E, J, and C series.</p>
|
11 |
-
<p>By using Fastgsm s3g 1.0.0.42 to unlock your Samsung phone, you can enjoy the following benefits:</p>
|
12 |
-
<ul>
|
13 |
-
<li><strong>Save money:</strong> You can avoid paying expensive roaming fees or unlocking fees to your carrier, and you can choose the best SIM card deal for your needs.</li>
|
14 |
-
<li><strong>Save time:</strong> You can unlock your phone in minutes, without waiting for days or weeks for your carrier to process your request or send you the unlock code.</li>
|
15 |
-
<li><strong>Save hassle:</strong> You can unlock your phone from the comfort of your home or office, without visiting a store or sending your phone to a service center.</li>
|
16 |
-
<li><strong>Increase value:</strong> You can increase the resale value of your phone, as unlocked phones are more attractive and flexible for buyers.</li>
|
17 |
-
<li><strong>Increase freedom:</strong> You can use your phone with any SIM card from any carrier in any country, and switch between them as you please.</li>
|
18 |
-
</ul>
|
19 |
-
<p>As you can see, Fastgsm s3g 1.0.0.42 is a powerful and convenient tool that can help you unlock your Samsung phone with ease and confidence.</p>
|
20 |
-
<p>But how do you get it and use it? That's what we will show you in the next sections of this article.</p>
|
21 |
-
<p></p>
|
22 |
-
<h2>How to download and install Fastgsm s3g 1.0.0.42 for free?</h2>
|
23 |
-
<p>If you want to use Fastgsm s3g 1.0.0.42 to unlock your Samsung phone, you need to download and install it on your computer first.</p>
|
24 |
-
<p>Here are the steps you need to follow:</p>
|
25 |
-
<h3>Requirements</h3>
|
26 |
-
<p>Before you download and install Fastgsm s3g 1.0.0.42, make sure you have the following requirements:</p>
|
27 |
-
<ul>
|
28 |
-
<li>A Windows PC with at least 512 MB of RAM and 50 MB of free disk space.</li>
|
29 |
-
<li>A USB cable that is compatible with your Samsung phone.</li>
|
30 |
-
<li>A Samsung phone that is locked to a network carrier and supported by Fastgsm s3g 1.0.0.42. You can check the list of supported models <a href="">here</a>.</li>
|
31 |
-
<li>An internet connection to download the software and the unlock code.</li>
|
32 |
-
</ul>
|
33 |
-
<h3>Download</h3>
|
34 |
-
<p>To download Fastgsm s3g 1.0.0.42 for free, you need to visit the official website of Fastgsm s3g 1.0.0.42 <a href="">here</a>.</p>
|
35 |
-
<p>You will see a download button on the homepage that will direct you to a page where you can choose your Samsung phone model from a drop-down menu.</p>
|
36 |
-
<p>Select your phone model and click on the download button again to start downloading the software file.</p>
|
37 |
-
<p>The file name will be something like <em>fastgsms3g-1-0-0-42.exe</em>, and the file size will be around 10 MB.</p>
|
38 |
-
<p>Once the download is complete, you need to verify the file integrity and security before installing it.</p>
|
39 |
-
<p>You can do this by checking the file properties and comparing the file hash with the one provided on the website.</p>
|
40 |
-
<p>The file hash is a unique code that identifies the file and ensures that it has not been tampered with or corrupted during the download process.</p>
|
41 |
-
<p>To check the file hash, you can use a free online tool like <a href="">this one</a>.</p>
|
42 |
-
<p>Simply upload the file or enter its URL, and select the SHA-256 algorithm from the options.</p>
|
43 |
-
<p>The tool will generate a hash code for the file and display it on the screen.</p>
|
44 |
-
<p>You need to compare this hash code with the one provided on the website, which should be something like <em>d9f5c7f8f9b4c8e6f7d6e9c8f7e6d9f5c7f8 f9b4c8e6f7d6e9c8f7e6d9f5c7f8</em>.</p>
|
45 |
-
<p>If the hash codes match, it means that the file is authentic and safe to install.</p>
|
46 |
-
<p>If the hash codes do not match, it means that the file is corrupted or malicious, and you should delete it and download it again from a different source.</p>
|
47 |
-
<h3>Install</h3>
|
48 |
-
<p>To install Fastgsm s3g 1.0.0.42 on your computer, you need to follow these steps:</p>
|
49 |
-
<ol>
|
50 |
-
<li>Double-click on the downloaded file to launch the installation wizard.</li>
|
51 |
-
<li>Click on the <em>Next</em> button to proceed with the installation.</li>
|
52 |
-
<li>Read and accept the license agreement, and click on the <em>Next</em> button again.</li>
|
53 |
-
<li>Choose the destination folder where you want to install the software, and click on the <em>Next</em> button.</li>
|
54 |
-
<li>Click on the <em>Install</em> button to start the installation process.</li>
|
55 |
-
<li>Wait for a few minutes until the installation is complete, and click on the <em>Finish</em> button to exit the wizard.</li>
|
56 |
-
</ol>
|
57 |
-
<p>Congratulations! You have successfully installed Fastgsm s3g 1.0.0.42 on your computer.</p>
|
58 |
-
<p>You can now use it to unlock your Samsung phone in a matter of minutes.</p>
|
59 |
-
<h2>How to use Fastgsm s3g 1.0.0.42 to unlock your Samsung phone?</h2>
|
60 |
-
<p>Now that you have downloaded and installed Fastgsm s3g 1.0.0.42 on your computer, you are ready to use it to unlock your Samsung phone.</p>
|
61 |
-
<p>Here are the steps you need to follow:</p>
|
62 |
-
<h3>Connect</h3>
|
63 |
-
<p>The first step is to connect your Samsung phone to your computer using a USB cable.</p>
|
64 |
-
<p>Make sure that your phone is turned on and has enough battery power.</p>
|
65 |
-
<p>You also need to enable USB debugging mode on your phone, which allows your computer to communicate with your phone and access its data.</p>
|
66 |
-
<p>To enable USB debugging mode, you need to do the following:</p>
|
67 |
-
<ul>
|
68 |
-
<li>Go to <em>Settings</em> on your phone and tap on <em>About phone</em>.</li>
|
69 |
-
<li>Find the <em>Build number</em> option and tap on it seven times until you see a message that says <em>You are now a developer</em>.</li>
|
70 |
-
<li>Go back to <em>Settings</em> and tap on <em>Developer options</em>.</li>
|
71 |
-
<li>Find the <em>USB debugging</em> option and toggle it on.</li>
|
72 |
-
<li>A pop-up window will appear asking you to allow USB debugging. Tap on <em>OK</em>.</li>
|
73 |
-
</ul>
|
74 |
-
<p>You have now enabled USB debugging mode on your phone.</p>
|
75 |
-
<h3>Detect</h3>
|
76 |
-
<p>The next step is to launch Fastgsm s3g 1.0.0.42 on your computer and let it detect your phone model and network lock status automatically.</p>
|
77 |
-
<p>To do this, you need to do the following:</p>
|
78 |
-
<ul>
|
79 |
-
<li>Double-click on the Fastgsm s3g 1.0.0.42 icon on your desktop or in your start menu to open the software.</li>
|
80 |
-
<li>You will see a welcome screen with some instructions and information about the software. Click on the <em>Next</em> button to continue.</li>
|
81 |
-
<li>The software will scan your computer for connected devices and display them in a list. Select your Samsung phone from the list and click on the <em>Detect device</em> button.</li>
|
82 |
-
<li>The software will analyze your phone and display its model name, IMEI number, firmware version, and network lock status in a table. You can also see a picture of your phone on the right side of the screen.</li>
|
83 |
-
<li>If your phone is locked, you will see a red lock icon next to its network name. If your phone is unlocked, you will see a green check mark instead.</li>
|
84 |
-
<li>If your phone is not detected or supported by the software, you will see an error message or a question mark icon instead. In that case, you may need to try a different USB cable or port, update your phone drivers, or contact Fastgsm s3g 1.0.0.42 customer service for assistance.</li>
|
85 |
-
</ul>
|
86 |
-
<p>You have now detected your phone model and network lock status using Fastgsm s3g 1.0.0.42.</p>
|
87 |
-
<h3>Unlock</h3>
|
88 |
-
<p>The final step is to <p>The final step is to select the unlock option and enter the unlock code provided by Fastgsm s3g 1.0.0.42, and confirm the unlock success message on your phone screen.</p>
|
89 |
-
<p>To do this, you need to do the following:</p>
|
90 |
-
<ul>
|
91 |
-
<li>On the Fastgsm s3g 1.0.0.42 software screen, click on the <em>Unlock</em> button at the bottom.</li>
|
92 |
-
<li>The software will connect to the Fastgsm s3g 1.0.0.42 server and request an unlock code for your phone model and network.</li>
|
93 |
-
<li>You will see a progress bar and a message that says <em>Waiting for unlock code</em>. This may take a few seconds or minutes, depending on your internet speed and the availability of the server.</li>
|
94 |
-
<li>Once the unlock code is received, you will see it displayed on the screen, along with some instructions on how to enter it on your phone.</li>
|
95 |
-
<li>On your phone, you will see a prompt that asks you to enter the network unlock code or PIN. Enter the unlock code that you see on the screen, and press <em>OK</em> or <em>Unlock</em>.</li>
|
96 |
-
<li>If the unlock code is correct, you will see a message that says <em>Network unlock successful</em> or <em>Network unlock complete</em> on your phone screen.</li>
|
97 |
-
<li>If the unlock code is incorrect, you will see a message that says <em>Network unlock unsuccessful</em> or <em>Network unlock failed</em> on your phone screen. In that case, you may have entered the wrong code, or your phone may have a different lock type or level. You can try again with a different code, or contact Fastgsm s3g 1.0.0.42 customer service for assistance.</li>
|
98 |
-
</ul>
|
99 |
-
<p>You have now unlocked your Samsung phone using Fastgsm s3g 1.0.0.42.</p>
|
100 |
-
<p>You can now remove the USB cable from your phone and computer, and restart your phone.</p>
|
101 |
-
<p>You can also insert a different SIM card from another carrier and check if your phone works normally with it.</p>
|
102 |
-
<p>You should see a signal strength indicator and a network name on your phone screen, indicating that your phone is unlocked and ready to use with any SIM card.</p>
|
103 |
-
<h2>How to troubleshoot common issues with Fastgsm s3g 1.0.0.42?</h2>
|
104 |
-
<p>Although Fastgsm s3g 1.0.0.42 is designed to be easy and reliable to use, you may encounter some issues or problems when using it to unlock your Samsung phone.</p>
|
105 |
-
<p>Here are some common error messages or problems that may occur, and how to solve them:</p>
|
106 |
-
<h3>Invalid unlock code</h3>
|
107 |
-
<p>If you enter the unlock code provided by Fastgsm s3g 1.0.0.42 on your phone, but it says that it is invalid or incorrect, there are several possible reasons:</p>
|
108 |
-
<ul>
|
109 |
-
<li>You may have entered the wrong code or made a typo. Make sure that you enter the exact code that you see on the screen, without any spaces or extra characters.</li>
|
110 |
-
<li>You may have used up all your attempts to enter the unlock code. Some phones have a limit on how many times you can try to enter the unlock code before they become permanently locked or blocked. If this happens, you may need to reset your phone or use a different method to unlock it.</li>
|
111 |
-
<li>Your phone may have a different lock type or level than what Fastgsm s3g 1.0.0.42 supports. Some phones have more than one lock type or level, such as network lock, subset lock, provider lock, or user lock. Fastgsm s3g 1.0.0.42 only supports network lock codes, which are the most common and basic ones. If your phone has a different lock type or level, you may need to use a different software or service to unlock it.</li>
|
112 |
-
<li>Your phone may have been blacklisted or reported as lost or stolen by your carrier or the original owner. If this happens, your phone may not work with any SIM card, even if it is unlocked. You can check the blacklist status of your phone using a free online tool like <a href="">this one</a>.</li>
|
113 |
-
</ul>
|
114 |
-
<p>To solve this problem, you can try the following solutions:</p>
|
115 |
-
<ul>
|
116 |
-
<li>Double-check the unlock code and enter it again carefully.</li>
|
117 |
-
<li>Restart your phone and try again.</li>
|
118 |
-
<li>Contact Fastgsm s3g 1.0.0.42 customer service and provide them with your phone model, IMEI number, network name, and error message. They may be able to provide you with a different unlock code or a refund.</li>
|
119 |
-
<li>Use a different software or service to unlock your phone, preferably one that supports your phone model and lock type or level.</li>
|
120 |
-
<li>Check the blacklist status of your phone and contact your carrier or the original owner to resolve the issue.</li>
|
121 |
-
</ul>
|
122 |
-
<h3>Connection failure</h3>
|
123 |
-
<p>If you connect your Samsung phone to your computer using a USB cable, but Fastgsm s3g 1.0.0.42 does not detect it or fails to communicate with it, there are several possible reasons:</p>
|
124 |
-
<ul>
|
125 |
-
<li>You may have used a faulty or incompatible USB cable or port. Make sure that you use a working and compatible USB cable and port for your phone and computer.</li>
|
126 |
-
<li>You may have not enabled USB debugging mode on your phone. Make sure that you enable USB debugging mode on your phone before connecting it to your computer.</li>
|
127 |
-
<li>You may have not installed the proper drivers for your phone on your computer. Make sure that you install the latest drivers for your phone model from the official Samsung website or from Fastgsm s3g 1.0.0.42 website.</li>
|
128 |
-
<li>You may have some interference or conflict from other software or devices on your computer. Make sure that you close any other software or programs that may use the USB port or communicate with your phone, such as antivirus, firewall, VPN, or other unlocking software.</li>
|
129 |
-
</ul>
|
130 |
-
<p>To solve this problem, you can try the following solutions:</p>
|
131 |
-
<ul>
|
132 |
-
<li>Try a different USB cable or port.</li>
|
133 |
-
<li>Enable USB debugging mode on your phone.</li>
|
134 |
-
<li>Install the proper drivers for your phone on your computer.</li>
|
135 |
-
<li>Close any other software or programs that may interfere with the connection.</li>
|
136 |
-
<li>Restart your phone and computer and try again.</li>
|
137 |
-
<li>Contact Fastgsm s3g 1.0.0.42 customer service and provide them with your phone model, IMEI number, network name, and error message. They may be able to help you troubleshoot the issue.</li>
|
138 |
-
</ul>
|
139 |
-
<h3>Device not supported</h3>
|
140 |
-
<p>If you launch Fastgsm s3g 1.0.0.42 on your computer and select your Samsung phone from the list of devices, but it says that it is not supported by the software, there are several possible reasons:</p>
|
141 |
-
<ul>
|
142 |
-
<li>Your phone model may be too new or too old for Fastgsm s3g 1.0.0.42 to support it. Fastgsm s3g 1.0.0.42 supports most Samsung phone models, but not all of them. You can check the list of supported models <a href="">here</a>.</li>
|
143 |
-
<li>Your phone firmware version may be too new or too old for Fastgsm s3g 1.0.0.42 to support it. Fastgsm s3g 1.0.0.42 supports most firmware versions, but not all of them. You can check the firmware version of your phone by going to <em>Settings</em>, <em>About phone</em>, and <em>Software information</em>.</li>
|
144 |
-
<li>Your phone network may be too new or too old for Fastgsm s3g 1.0 .0.0.42 from your computer completely and safely, you need to do the following:</p>
|
145 |
-
<ul>
|
146 |
-
<li>Go to the <em>Control Panel</em> on your computer and click on the <em>Programs and Features</em> option.</li>
|
147 |
-
<li>Find Fastgsm s3g 1.0.0.42 from the list of installed programs and click on it.</li>
|
148 |
-
<li>Click on the <em>Uninstall</em> button at the top or right-click on it and select <em>Uninstall</em> from the menu.</li>
|
149 |
-
<li>A confirmation window will appear asking you if you want to uninstall Fastgsm s3g 1.0.0.42. Click on the <em>Yes</em> button to proceed.</li>
|
150 |
-
<li>The uninstallation wizard will start and guide you through the uninstallation process step by step.</li>
|
151 |
-
<li>Click on the <em>Next</em> button to continue with the uninstallation.</li>
|
152 |
-
<li>Select the option to remove all settings and data associated with Fastgsm s3g 1.0.0.42, and click on the <em>Next</em> button again.</li>
|
153 |
-
<li>Click on the <em>Uninstall</em> button to start the uninstallation process.</li>
|
154 |
-
<li>Wait for a few minutes until the uninstallation is complete, and click on the <em>Finish</em> button to exit the wizard.</li>
|
155 |
-
</ul>
|
156 |
-
<p>You have now uninstalled Fastgsm s3g 1.0.0.42 from your computer completely and safely.</p>
|
157 |
-
<p>You may need to restart your computer to complete the uninstallation process.</p>
|
158 |
-
<p>If you encounter any problems during the uninstallation process, such as error messages or leftover files or folders, you can use a free online tool like <a href="">this one</a> to scan and clean your computer from any traces of Fastgsm s3g 1.0.0.42.</p>
|
159 |
-
<h2>Conclusion</h2>
|
160 |
-
<p>In this article, we have shown you what Fastgsm s3g 1.0.0.42 is, why you need it, how to download and install it, how to use it to unlock your Samsung phone, how to troubleshoot common issues with it, and how to update or uninstall it.</p>
|
161 |
-
<p>We hope that this article has been helpful and informative for you, and that you have learned something new and useful about Fastgsm s3g 1.0.0.42.</p>
|
162 |
-
<p>If you want to try Fastgsm s3g 1.0.0.42 for yourself, you can download it for free from <a href="">here</a>, and unlock your Samsung phone in minutes.</p>
|
163 |
-
<p>If you have any questions, feedback, or suggestions about Fastgsm s3g 1.0.0.42, you can contact their customer service at <a href="">here</a>, or visit their website at <a href="">here</a>.</p>
|
164 |
-
<p>You can also share your experience or opinion about Fastgsm s3g 1.0.0.42 with other users or readers by leaving a comment below this article, or by posting on social media platforms like Facebook, Twitter, or Instagram.</p>
|
165 |
-
<p>We would love to hear from you and learn from your insights and perspectives.</p>
|
166 |
-
<p>Please note that unlocking your phone may void your warranty or violate your carrier's terms of service, and that you are responsible for your own actions.</p>
|
167 |
-
<p>We are not affiliated with or endorsed by Fastgsm s3g 1.0 .0.0.42, and we do not guarantee the accuracy or reliability of the information or software provided in this article.</p>
|
168 |
-
<p>This article is for educational and informational purposes only, and you should use Fastgsm s3g 1.0.0.42 at your own risk and discretion.</p>
|
169 |
-
<h2>FAQs</h2>
|
170 |
-
<p>Here are some frequently asked questions and answers about Fastgsm s3g 1.0.0.42:</p>
|
171 |
-
<h3>Q: Is Fastgsm s3g 1.0.0.42 free?</h3>
|
172 |
-
<p>A: Yes, Fastgsm s3g 1.0.0.42 is free to download and use. However, you may need to pay a small fee to get the unlock code for your phone model and network, depending on the availability and demand of the code.</p>
|
173 |
-
<h3>Q: Is Fastgsm s3g 1.0.0.42 safe?</h3>
|
174 |
-
<p>A: Yes, Fastgsm s3g 1.0.0.42 is safe to use, as long as you download it from a reliable source and verify its file integrity and security before installing it. You should also scan your computer and phone for any viruses or malware before and after using the software.</p>
|
175 |
-
<h3>Q: Is Fastgsm s3g 1.0.0.42 legal?</h3>
|
176 |
-
<p>A: Yes, Fastgsm s3g 1.0.0.42 is legal to use, as long as you own the phone that you want to unlock and you do not intend to use it for any illegal or fraudulent purposes. However, unlocking your phone may void your warranty or violate your carrier's terms of service, so you should check with them before using the software.</p>
|
177 |
-
<h3>Q: How long does it take to unlock a Samsung phone with Fastgsm s3g 1.0.0.42?</h3>
|
178 |
-
<p>A: It usually takes only a few minutes to unlock a Samsung phone with Fastgsm s3g 1.0 .0.0.42, depending on the speed of your internet connection and the availability of the unlock code. However, some phone models or networks may take longer than others, so you should be patient and wait for the software to complete the process.</p>
|
179 |
-
<h3>Q: What if Fastgsm s3g 1.0.0.42 does not work for me?</h3>
|
180 |
-
<p>A: If Fastgsm s3g 1.0.0.42 does not work for you, you can try the following options:</p>
|
181 |
-
<ul>
|
182 |
-
<li>Contact Fastgsm s3g 1.0.0.42 customer service and provide them with your phone model, IMEI number, network name, and error message. They may be able to help you fix the issue or offer you a refund.</li>
|
183 |
-
<li>Use a different software or service to unlock your phone, preferably one that supports your phone model, firmware version, and network.</li>
|
184 |
-
<li>Visit a local phone repair shop or service center and ask them to unlock your phone for you.</li>
|
185 |
-
</ul></p> b2dd77e56b<br />
|
186 |
-
<br />
|
187 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/FinalMesh Professional 2.4.2.331 Crack UPD Downloadl.md
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download FinalMesh Professional 2.4.2.331 Crack for Free</h1>
|
3 |
-
<p>FinalMesh Professional is a powerful and versatile 3D viewer and editor that allows you to create, edit, and convert 3D models and scenes. With FinalMesh Professional, you can easily import and export various 3D formats, such as OBJ, STL, PLY, 3DS, FBX, GLTF, and more. You can also apply materials, textures, lights, and shadows to your 3D models and render them with high-quality results.</p>
|
4 |
-
<p>However, FinalMesh Professional is not a free software. You need to purchase a license to use it without any limitations or watermarks. If you are looking for a way to download FinalMesh Professional 2.4.2.331 crack for free, you may be tempted by some websites that claim to offer it. But beware, these websites may be unsafe and may contain viruses, malware, or spyware that can harm your computer or steal your personal information.</p>
|
5 |
-
<h2>FinalMesh Professional 2.4.2.331 Crack Downloadl</h2><br /><p><b><b>Download File</b> ––– <a href="https://byltly.com/2uKw7S">https://byltly.com/2uKw7S</a></b></p><br /><br />
|
6 |
-
<p>Therefore, we do not recommend downloading FinalMesh Professional 2.4.2.331 crack from any unauthorized sources. Instead, we suggest you to try the official trial version of FinalMesh Professional from its website[^2^]. The trial version allows you to use all the features of FinalMesh Professional for 30 days without any restrictions. You can also contact the support team if you have any questions or issues with the software.</p>
|
7 |
-
<p>If you like FinalMesh Professional and want to continue using it after the trial period expires, you can buy a license from its website[^2^] or from authorized resellers. The license price depends on the number of users and the duration of the subscription. You can choose between a monthly, yearly, or perpetual license. By purchasing a license, you will also get free updates, technical support, and access to online tutorials and documentation.</p>
|
8 |
-
<p>FinalMesh Professional is a great tool for anyone who works with 3D models and scenes. It offers a lot of features and functions that can help you create stunning 3D visuals and presentations. However, downloading FinalMesh Professional 2.4.2.331 crack from untrusted sources is not a good idea. It may expose you to security risks and legal issues. Therefore, we advise you to use the official trial version of FinalMesh Professional or buy a license from its website or authorized resellers.</p>
|
9 |
-
|
10 |
-
<p>FinalMesh Professional has many benefits that make it stand out from other 3D viewers and editors. Some of these benefits are:</p>
|
11 |
-
<p></p>
|
12 |
-
<ul>
|
13 |
-
<li>It supports a wide range of 3D formats, including popular ones like OBJ, STL, PLY, 3DS, FBX, GLTF, and more. You can easily import and export your 3D models and scenes without losing any quality or data.</li>
|
14 |
-
<li>It has a fast and modern user interface that is easy to use and customize. You can access all the features and functions from the ribbon menu, toolbar, or context menu. You can also change the theme, layout, and language of the interface according to your preferences.</li>
|
15 |
-
<li>It has a powerful geometry engine that allows you to create, edit, and transform your 3D models and scenes with various tools and modifiers. You can apply boolean operations, subdivision surfaces, extrusions, mirroring, arrays, and more to your 3D objects. You can also use procedural primitives like splines, cubes, spheres, texts, and more to create complex shapes.</li>
|
16 |
-
<li>It has a built-in raytracer that can render your 3D models and scenes with realistic materials, textures, lights, and shadows. You can adjust the render settings and quality to suit your needs. You can also export your 3D models and scenes as images or vector illustrations in various formats.</li>
|
17 |
-
<li>It has a unique feature of publishing your 3D models and scenes as PDF documents or WebGL applications. You can convert your 3D data into regular PDF files that can be viewed with any PDF reader. You can also create HTML applications with 3D WebGL content that can be viewed with any web browser. You can customize the appearance and behavior of your PDF documents or WebGL applications with various options.</li>
|
18 |
-
</ul>
|
19 |
-
<p>FinalMesh Professional is a comprehensive solution for anyone who needs to work with 3D models and scenes. Whether you are a designer, engineer, artist, or hobbyist, you will find FinalMesh Professional useful and convenient for your 3D projects.</p> cec2833e83<br />
|
20 |
-
<br />
|
21 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Gta 3 Weather Cheat Pc EXCLUSIVE.md
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>GTA 3 Weather Cheat PC: How to Change the Weather in Grand Theft Auto III</h1>
|
3 |
-
<p>Grand Theft Auto III (GTA 3) is a classic open-world action-adventure game that was released in 2001 for PC, PlayStation 2, and Xbox. It is set in a fictional city called Liberty City, which is loosely based on New York City.</p>
|
4 |
-
<p>One of the features that makes GTA 3 fun and immersive is the dynamic weather system, which changes according to the time of day and the location. You can experience sunny, cloudy, rainy, foggy, or snowy weather conditions as you explore the city and complete missions.</p>
|
5 |
-
<h2>gta 3 weather cheat pc</h2><br /><p><b><b>DOWNLOAD</b> ✏ <a href="https://byltly.com/2uKzqx">https://byltly.com/2uKzqx</a></b></p><br /><br />
|
6 |
-
<p>However, sometimes you may want to change the weather to suit your mood or preference. For example, you may want to enjoy a sunny day at the beach, or create a stormy atmosphere for a dramatic chase scene. Or maybe you just want to see how the game looks in different weather settings.</p>
|
7 |
-
<p>Fortunately, GTA 3 has a cheat code that allows you to change the weather at will. In this article, we will show you how to use the GTA 3 weather cheat PC and what are the effects of each weather option.</p>
|
8 |
-
<h2>How to Use the GTA 3 Weather Cheat PC</h2>
|
9 |
-
<p>To use the GTA 3 weather cheat PC, you need to enter a specific code during gameplay. You can do this by typing the code on your keyboard or by using the on-screen keyboard if you are playing with a controller.</p>
|
10 |
-
<p>The code for the GTA 3 weather cheat PC is <code>ILIKESCOTLAND</code>. You need to type this code exactly as it is written, without any spaces or punctuation marks. You will hear a sound effect if you enter the code correctly.</p>
|
11 |
-
<p>Each time you enter the code, the weather will change to a different option. There are four weather options in total: sunny, cloudy, rainy, and foggy. You can cycle through these options by entering the code repeatedly until you get the desired weather.</p>
|
12 |
-
<p></p>
|
13 |
-
<p>Note that the GTA 3 weather cheat PC does not affect the time of day or the season. It only changes the current weather condition. Also, note that using any cheat code in GTA 3 will disable your ability to save your game or earn achievements. Therefore, use cheats at your own risk and only for fun.</p>
|
14 |
-
<h2>The Effects of Each Weather Option</h2>
|
15 |
-
<p>Each weather option in GTA 3 has its own visual and gameplay effects. Here are some of the effects of each option:</p>
|
16 |
-
<ul>
|
17 |
-
<li><b>Sunny:</b> This is the default and most common weather option in GTA 3. It makes the sky clear and bright, and gives the city a vibrant and lively look. It also improves your visibility and driving conditions.</li>
|
18 |
-
<li><b>Cloudy:</b> This option makes the sky overcast and dull, and gives the city a gloomy and depressing look. It also reduces your visibility and makes driving more challenging.</li>
|
19 |
-
<li><b>Rainy:</b> This option makes it rain heavily in Liberty City, creating puddles and splashes on the ground and on vehicles. It also makes the sky dark and stormy, and gives the city a wet and miserable look. It also greatly reduces your visibility and makes driving very difficult.</li>
|
20 |
-
<li><b>Foggy:</b> This option makes it foggy in Liberty City, creating a thick layer of mist that covers everything in sight. It also makes the sky gray and hazy, and gives the city a mysterious and eerie look. It also severely reduces your visibility and makes driving almost impossible.</li>
|
21 |
-
</ul>
|
22 |
-
<h2>Conclusion</h2>
|
23 |
-
<p>GTA 3 is a fun and exciting game that lets you experience different weather conditions in Liberty City. However, if you want to change the weather to suit your preference or mood, you can use the GTA 3 weather cheat PC to do so. Just remember to type <code>ILIKESCOTLAND</code> during gameplay and cycle through the four weather options: sunny, cloudy, rainy, and foggy.</p>
|
24 |
-
<p>We hope this article was helpful and informative for you. Have fun playing GTA 3 with different weather settings!</p> ddb901b051<br />
|
25 |
-
<br />
|
26 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download Aplikasi untuk Buat Undangan Pernikahan yang Bisa Dibagikan ke Media Sosial di Wedding Invitation Card Maker.md
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p><strong>Aplikasi Undangan Digital</strong> | Di zaman serba modern ini, tidak perlu menghabiskan banyak kertas untuk mencetak sebuah undangan. Telah hadir sebuah aplikasi yang sangat kamu butuhkan. Pada saat kamu bingung menyiapkan momen pesta pernikahan atau lainnya. Yaitu, Apk pembuat kartu undangan digital untuk segala keperluan.Aplikasi ini sangat menghemat waktu dan biaya. Kamu tak perlu bersusah payah memikirkan kartu undangan seperti apa yang cocok buat tema pestamu. Pastinya, dengan kartu undangan yang keren ini akan membuat pestamu lebih menarik. Download aplikasi di bawah ini.</p>
|
3 |
-
<p><strong>Brilio.net - </strong>Bagi setiap pasangan, acara pernikahan merupakan momen yang sakral. Jika kamu sudah punya rencana untuk melangsungkan pernikahan, tentu dibutuhkan perencanaan yang matang, dan biasanya butuh biaya yang tidak sedikit. Mulai dari gedung, dekorasi, fotografer, makeup, hingga undangan pernikahan.</p>
|
4 |
-
<h2>download aplikasi untuk buat undangan pernikahan</h2><br /><p><b><b>Download</b> ↔ <a href="https://imgfil.com/2uy1qV">https://imgfil.com/2uy1qV</a></b></p><br /><br />
|
5 |
-
<p>Jika kamu memiliki smartphone atau handphone pintar, tentu ini bukanlah suatu masalah besar dan menjadikan beban. Kamu dapat menekan biaya pernikahan dengan membuat desain undangan pernikahan sendiri sesuai keinginanmu. Tidak perlu memikirkan biaya yang harus dikeluarkan, karena dengan smartphone kemudahan bisa kamu dapatkan.</p>
|
6 |
-
<p>Cukup berbekal aplikasi yang bisa kamu download di Google PlayStore, kamu bisa merancang sendiri undangan pernikahan. Kamu juga akan dimudahkan dengan membuat undangan sendiri karena dapat dikirim kepada saudara dan teman yang jauh hanya dalam hitungan detik.</p>
|
7 |
-
<p>Buat kamu yang berencana untuk membuat undangan sendiri, kamu perlu mencoba untuk mendownload aplikasi ini agar undangan kamu lebih keren. Nah, berikut aplikasi undangan yang bisa kamu gunakan, <strong>brilio.net</strong> rangkum dari berbagai sumber, Senin (3/2).</p>
|
8 |
-
<p>Aplikasi desain yang telah digunakan banyak orang ini memiliki beragam template desain mulai dari undangan pernikahan, acara ulang tahun, wisuda, dan lain sebagainya. Kamu bisa menggunakan template ini dengan gratis dengan membuka aplikasi dan ketik di kolom pencarian "invitation wedding".</p>
|
9 |
-
<p>Aplikasi desain ini telah diunduh lebih dari 500.000 kali di Google PlayStore. Kamu bisa dengan mudah mengubah nama, tanggal, tempat, dan informasi seputar pernikahanmu. Bahkan, kamu juga dapat memberi bingkai lho. Tetapi jangan terlalu berlebihan agar desain undangan kamu tidak norak ya.</p>
|
10 |
-
<p>Aplikasi ini dibuat Photoshop Mobile Apps yang telah diunduh lebih dari 500.000 kali di Google Play Store. Aplikasi ini bisa membuat undangan hanya dengan beberapa menit saja. Kamu juga bisa menggungah desain undangan yang telah kamu bikin di Instagram dengan rasio yang telah kamu tentukan sebelumnya.</p>
|
11 |
-
<p></p>
|
12 |
-
<p>Saat ini telah banyak undangan yang dibuat dengan format video. Kamu tidak perlu risau, karena dengan aplikasi video invitation maker makin mudah membuat undangan dalam bentuk video. Aplikasi video ini juga dibuat oleh Photoshop Mobile Apps.</p>
|
13 |
-
<p>Kamu bisa bebas memilih template desain sesuai keinginanmu dengan mengubah tanggal, nama, hari pernikahan. Kalau ingin undangan yang kamu hasilkan lebih keren, kamu bisa mengubah ratio dan background dengan berbagai macam variasi.</p>
|
14 |
-
<p>Aplikasi ini memiliki fitur yang dapat mengubah hasil editing menjadi sebuah walpaper di smartphone. Kamu juga bisa menambahkan beberapa quotes untuk membuat tampilan undangan kamu agar lebih keren dan menarik tentunya.</p>
|
15 |
-
<p>Aplikasi undangan pernikahan ini bisa kamu gunakan untuk mendesain sesuai keinginan dengan bingkai, latar belakang, dan stiker. Kamu juga bisa mengedit foto bersama pasangan untuk dijadikan background undangan agar makin keren.</p>
|
16 |
-
<p>Sebenarnya ada cukup banyak aplikasi membuat undangan di PC yang bisa diperoleh. Ada yang berbayar dan tak sedikit yang bisa diunduh secara bebas dan gratis. Namun sayangnya tak sedikit software itu yang terbilang tidak user friendly. Kalaupun bagus biasanya berbayar.</p>
|
17 |
-
<p>Kebutuhan terhadap pembuatan kartu undangan khususnya untuk acara pernikahan sangatlah tinggi. Setiap akhir pekan selalu ada acara pernikahan di berbagai daerah. Hal ini biasanya terlihat dari selalu penuhnya jadwal persewaan gedung untuk acara resepsi perkawinan.if(typeof ez_ad_units!='undefined')ez_ad_units.push([[728,90],'cademedia_com-medrectangle-3','ezslot_8',136,'0','0']);__ez_fad_position('div-gpt-ad-cademedia_com-medrectangle-3-0');Besarnya permintaan terhadap pembuatan kartu undangan ini kebanyakan menjadi lahan bisnis yang menggiurkan untuk para pelaku usaha kecil dan perorangan. Jasa desain grafis menjadi sasaran konsumen dalam berburu layanan ini. Apalagi mereka pun biasanya melayani jasa cetaknya meski tidak memiliki mesin percetakan sendiri.</p>
|
18 |
-
<p>Saat ini ada banyak aplikasi membuat undangan di PC terbaik yang bisa digunakan. Aplikasi ini tentunya bisa dimanfaat khususnya buat anda yang ingin mencoba membuat dan mendesain sendiri kartu undangan pernikahan semacam ini. Berikut ini beberapa software desain undangan pernikahan gratis yang sangat direkomendasikan.<strong>1.Card and Pocket</strong>Aplikasi ini cukup populer karena cara penggunaannya yang cukup mudah namun mampu memberikan hasil yang terlihat menarik. Ada banyak fitur yang ditawarkan aplikasi ini, seperti warnanya yang banyak maupun template yang sangat elegan dan cantik. Setelah selesai, anda pun bisa mengunduhnya atau langsung di print dengan printer di rumah atau mencetak di percetakan.</p>
|
19 |
-
<p>Untuk menggunakan aplikasi membuat undangan di laptop ini silahkan kunjungi websitenya di (typeof ez_ad_units!='undefined')ez_ad_units.push([[300,250],'cademedia_com-medrectangle-4','ezslot_2',109,'0','0']);__ez_fad_position('div-gpt-ad-cademedia_com-medrectangle-4-0');<strong>2.Greeting Island</strong>Aplikasi yang satu ini juga mudah digunakan dengan berbagai template yang tinggal dipilih. Aplikasi untuk membuat undangan di laptop ini bisa digunakan untuk mendesain undangan pernikahan, kartu nama, undangan ulang tahun, maupun untuk membuat kalender. Fitur yang disediakan aplikasi online ini juga lumayan banyak dan hasilnya terlihat seperti profesional.<strong>3.Download and Print</strong></p>
|
20 |
-
<p>Aplikasi membuat undangan digital di PC lainnya adalah Download and Print yang juga menawarkan beragam fitur yang menarik. Template atau contoh desainnya tersedia dalam jumlah yang banyak dan sangat menarik.if(typeof ez_ad_units!='undefined')ez_ad_units.push([[336,280],'cademedia_com-box-4','ezslot_3',139,'0','0']);__ez_fad_position('div-gpt-ad-cademedia_com-box-4-0');Fiturnya memang ada yang gratis namun juga ada yang berbayar yang bisa dipilih. Namun untuk bisa menggunakan layanan ini anda harus mendaftar dan login ke websitenya di <strong>4.Elli</strong>Satu lagi software desain undangan pernikahan gratis yang bisa digunakan adalah Elli. Seperti namanya, fitur yang disediakan juga simpel dan mudah digunakan. Template desainnya tersedia dalam katalog yang bisa anda pilih sesuai keinginan. Jika dibutuhkan anda juga bisa download aplikasi membuat undangan untuk di instal di komputer anda.</p>
|
21 |
-
<p><strong>5.Printable Invitation Kits</strong>Aplikasi membuat undangan di PC lainnya yang rekomended adalah Printable Invitation Kits. Aplikasi online ini tidak bisa diunduh karena hanya bisa digunakan secara online pada websitenya.Setelah mendaftar dan login anda bisa memanfaatkan berbagai fitur menarik yang disediakannya. Dari katalog templatenya yang menarik, font, warna, dan fitur lainnya.</p>
|
22 |
-
<p><strong>6.CorelDraw</strong>Software desain undangan pernikahan gratis yang juga bisa digunakan adalah CorelDraw. Aplikasi ini amatlah populer dan termasuk salah satu software desain yang paling terkenal juga.Fitur yang disediakan software ini sangat lengkap dan mudah digunakan, termasuk untuk anda yang masih pemula sekalipun. Setelah selesai, hasil desain anda pun bisa langsung diunduh atau dicetak dengan printer.Demikianlah beberapa software desain undangan pernikahan gratis yang bisa dimanfaatkan untuk kebutuhan anda. Dengan menggunakan aplikasi membuat undangan di PC ini tentunya anda bisa berkreasi secara lebih bebas dan menghemat biaya jasa desain.</p>
|
23 |
-
<p>Menggunakan Android dan iPhone tentu memerlukan bantuan beberapa aplikasi, salah satunya adalah pembuat undangan. Aplikasi pembuat undangan pada Android dan iPhone bisa digunakan dengan mudah untuk membuat pengumuman seperti pernikahan, pesta, syukuran, dan masih banyak lagi. Hadirnya aplikasi pembuat undangan tentu memudahkan setiap orang untuk mengundang kerabat mereka agar menghadiri suatu acara.</p>
|
24 |
-
<p>Invitation maker & Card design merupakan aplikasi pembuat undangan yang bisa digunakan pada perangkat Android dan iPhone. Dengan aplikasi ini, kamu bisa dengan mudah membuat berbagai jenis undangan seperti Party Invitation Card, Wedding Invitation, Birthday Invitation, dan masih banyak lagi. Invitation maker & Card design juga menyediakan versi premium yang tentu memiliki fitur lebih lengkap. Untuk mulai menggunakan Invitation maker & Card design, kamu bisa mendownloadnya di Play Store dan App Store.</p>
|
25 |
-
<p>Undangan Web Digital juga bisa kamu gunakan untuk membuat undangan secara cepat dengan mudah. Aplikasi ini hanya bisa kamu gunakan pada perangkat Android saja. Aplikasi ini bisa kamu gunakan dengan mudah untuk membuat undangan pernikahan berbasis web. Selain terkesan canggih, penggunaan aplikasi ini juga bisa menghemat dana yang diperlukan. Kamu bisa mengatur undangan sesuai keinginanmu menggunakan fitur menarik seperti Potong dan rotasi gambar, Tambah dan hapus fitur, Tersedia beberapa lagu, dan masih banyak lagi.</p>
|
26 |
-
<p>Aplikasi lain yang bsia kamu gunakan untuk membuat undangan pada perangkat Android dengan mudah dan gratis. Aplikasi ini bisa kamu gunakan dengan mudah untuk membuat undangan pernikahan. Dengan fitur utama seperti Tersedia berbagai macam backsound musik, Tersedia galeri foto dan video prewedding, Edit dan atur acaramu sesuai keinginan, dan masih banyak lagi. Untuk mulai menggunakan Goinvite, kamu bisa download aplikasi ini melalui Play Store.</p>
|
27 |
-
<p>Jika ingin menjajal aplikasi lain, kamu bsia coba Invitation Card Maker & Design. Aplikasi ini pada dasarnya hanya bisa digunakan pada perangkat Android. Dengan Invitation Card Maker & Design, kamu bisa membuat berbagai macam undangan seperti Anniversary Invitations, Birthday Invitation Cards Templates, Halloween, , dan masih banyak lagi.</p> aaccfb2cb3<br />
|
28 |
-
<br />
|
29 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Driver Tv Tunner Gadmei Usb Utv330 .rar [NEW].md
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
<h2>Driver Tv Tunner Gadmei Usb Utv330 .rar</h2><br /><p><b><b>DOWNLOAD</b> ✦ <a href="https://imgfil.com/2uxWSD">https://imgfil.com/2uxWSD</a></b></p><br /><br />
|
2 |
-
|
3 |
-
-gtv tunner card driver rar скачать driver tv tunner gadmei usb utv330.rarQ:
|
4 |
-
|
5 |
-
How can I debug a single page with Angular2 and Webpack?
|
6 |
-
|
7 |
-
I have a large Angular2 application using Webpack.
|
8 |
-
|
9 |
-
When we build the app to ES5, we need to make sure every HTML page that has its own components have a.d.ts file to generate the type definition files.
|
10 |
-
|
11 |
-
How can we go about unit testing our.d.ts files?
|
12 |
-
|
13 |
-
I'm thinking of creating a new Angular2 application with an empty file structure and unit testing this component. But this creates a separate build system for testing, instead of having a single build system that builds and transpiles the whole thing.
|
14 |
-
|
15 |
-
Is there a way to "lint" Angular2 code and have it break when there are problems? Like how JSHint or JSLint works?
|
16 |
-
|
17 |
-
A:
|
18 |
-
|
19 |
-
You could use the HtmlWebpackPlugin, like this:
|
20 |
-
|
21 |
-
new HtmlWebpackPlugin(
|
22 |
-
|
23 |
-
template: 'index.html',
|
24 |
-
|
25 |
-
filename: 'index.html',
|
26 |
-
|
27 |
-
inject: 'body'
|
28 |
-
|
29 |
-
)
|
30 |
-
|
31 |
-
This will inject the webpack generated html into the index.html which is served by Angular2.
|
32 |
-
|
33 |
-
In your typescript file, you can then use the export keyword to export things like components:
|
34 |
-
|
35 |
-
export class FirstComponent
|
36 |
-
|
37 |
-
This will make sure that the.ts file is exported, and if you use the typescript module system, the file can be imported like this:
|
38 |
-
|
39 |
-
import FirstComponent from './FirstComponent'
|
40 |
-
|
41 |
-
If you use another file, it is better to just require the file instead of the import statement, for better code maintenance.
|
42 |
-
|
43 |
-
Read more about HtmlWebpackPlugin here.
|
44 |
-
|
45 |
-
The heart is a pump that is primarily responsible for pumping blood throughout the body. The human heart has four chambers, the left and right atrium and the left and right ventricles, that are sequentially squeezed by the atrial and ventricular muscle tissue of the heart. As the chambers of the heart contract and expand, the inner walls of the chambers move in and out of the heart wall, a process which is known as myocardial contraction. As the chambers contract, the pressure within the chambers rises. In addition, as the pressure within the chambers increases, the blood flow into 4fefd39f24<br />
|
46 |
-
<br />
|
47 |
-
<br />
|
48 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Highway Racing APK How to Get Unlimited Money and Dominate the Road.md
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>CarX Highway Racing APK Unlimited Money: A Review</h1>
|
3 |
-
<p>Do you love racing games? Do you want to experience the thrill of driving on a traffic-packed highway with realistic physics? Do you want to have unlimited money to buy and upgrade your dream cars? If you answered yes to any of these questions, then you should definitely check out <strong>CarX Highway Racing</strong>, one of the best racing games for Android devices.</p>
|
4 |
-
<h2>carx highway racing apk unlimited money</h2><br /><p><b><b>Download File</b> ✏ <a href="https://urlin.us/2uT1Fs">https://urlin.us/2uT1Fs</a></b></p><br /><br />
|
5 |
-
<p>CarX Highway Racing is a game that combines lifelike physics, traffic-packed highways, and a variety of cars and modes to create an immersive and exciting racing experience. You can choose from over 100 cars, each with its own characteristics and customization options, and race against other players or AI opponents in different modes, such as campaign, time attack, police chase, and online multiplayer. You can also enjoy stunning graphics, realistic sounds, and dynamic weather effects that make every race unique.</p>
|
6 |
-
<p>In this article, we will review CarX Highway Racing in detail and show you how to download and install CarX Highway Racing APK Unlimited Money, a modded version that gives you unlimited money, unlocked cars, and no ads. We will also give you some tips and tricks on how to master CarX Highway Racing and win every race. So, without further ado, let's get started!</p>
|
7 |
-
<h2>Features</h2>
|
8 |
-
<p>CarX Highway Racing is not your typical racing game. It has some unique features that make it stand out from other racing games. Here are some of them:</p>
|
9 |
-
<h3>Lifelike physics</h3>
|
10 |
-
<p>CarX Highway Racing uses the same physics engine as CarX Drift Racing 2, which is one of the best drifting games for Android devices. This means that CarX Highway Racing has a realistic and challenging driving experience that requires skill and precision. You have to control your car's speed, acceleration, braking, steering, and drifting, as well as take into account the road conditions, the weather, and the traffic. You can also feel the difference between different cars, as each one has its own weight, power, handling, and traction.</p>
|
11 |
-
<h3>Traffic-packed highways</h3>
|
12 |
-
<p>CarX Highway Racing simulates real-life traffic conditions, which adds more thrill and excitement to the races. You have to avoid crashing into other cars or objects, as well as follow the traffic rules and signs. You also have to deal with different types of traffic, such as trucks, buses, motorcycles, and police cars. Some traffic may help you or hinder you, depending on the situation. For example, you can use trucks to block your opponents or hide from the police, but you can also get stuck behind them or get hit by them.</p>
|
13 |
-
<h3>Variety of cars and modes</h3>
|
14 |
-
<p>CarX Highway Racing offers over 100 cars to choose from, each with its own characteristics and customization options. You can find cars from different categories, such as sports cars, muscle cars, supercars, and classic cars. You can also upgrade and customize your car's performance and appearance, such as engine, transmission, suspension, brakes, tires, paint, vinyls, stickers, and more.</p>
|
15 |
-
<p>carx highway racing mod apk download free<br />
|
16 |
-
carx highway racing hack apk unlimited gold<br />
|
17 |
-
carx highway racing apk obb latest version<br />
|
18 |
-
carx highway racing modded apk all cars unlocked<br />
|
19 |
-
carx highway racing cheats apk unlimited fuel<br />
|
20 |
-
carx highway racing premium apk full version<br />
|
21 |
-
carx highway racing cracked apk unlimited money and gold<br />
|
22 |
-
carx highway racing apk mod menu unlimited everything<br />
|
23 |
-
carx highway racing offline apk no root<br />
|
24 |
-
carx highway racing unlimited money apk rexdl<br />
|
25 |
-
carx highway racing mod apk revdl free download<br />
|
26 |
-
carx highway racing hack apk android 1<br />
|
27 |
-
carx highway racing apk data unlimited cash<br />
|
28 |
-
carx highway racing modded apk unlimited nitro<br />
|
29 |
-
carx highway racing apk pure full unlocked<br />
|
30 |
-
carx highway racing hack apk happymod<br />
|
31 |
-
carx highway racing mod apk android republic<br />
|
32 |
-
carx highway racing unlimited money apk mirror<br />
|
33 |
-
carx highway racing hacked apk latest update<br />
|
34 |
-
carx highway racing mod apk an1.com<br />
|
35 |
-
carx highway racing apk mod unlimited coins and gems<br />
|
36 |
-
carx highway racing hack tool apk no survey<br />
|
37 |
-
carx highway racing modded apk free shopping<br />
|
38 |
-
carx highway racing unlimited money and gold apk download<br />
|
39 |
-
carx highway racing mod apk vip unlocked<br />
|
40 |
-
carx highway racing hack version apk unlimited money and gold<br />
|
41 |
-
carx highway racing modded apk no ads<br />
|
42 |
-
carx highway racing unlimited money and gold mod apk 2023<br />
|
43 |
-
carx highway racing hack online generator apk<br />
|
44 |
-
carx highway racing modded apk all levels unlocked</p>
|
45 |
-
<p>CarX Highway Racing also offers different modes to suit different preferences. You can play the campaign mode, where you have to complete various missions and challenges in different locations and scenarios. You can also play the time attack mode, where you have to race against the clock and beat your own records. You can also play the police chase mode, where you have to escape from the police or chase down criminals. And finally, you can play the online multiplayer mode, where you can race against other players from around the world and compete for rankings and rewards.</p> <h2>How to download and install CarX Highway Racing APK Unlimited Money</h2>
|
46 |
-
<p>If you want to enjoy CarX Highway Racing with unlimited money, unlocked cars, and no ads, you can download and install CarX Highway Racing APK Unlimited Money, a modded version of the game that gives you these benefits. Here is how to do it:</p>
|
47 |
-
<ol>
|
48 |
-
<li>Go to the download page of CarX Highway Racing APK Unlimited Money by clicking <a href="">here</a>.</li>
|
49 |
-
<li>Download the APK file and the OBB file to your device.</li>
|
50 |
-
<li>Enable the installation of apps from unknown sources in your device's settings.</li>
|
51 |
-
<li>Install the APK file by tapping on it.</li>
|
52 |
-
<li>Extract the OBB file to the Android/OBB folder in your device's internal storage.</li>
|
53 |
-
<li>Launch the game and enjoy!</li>
|
54 |
-
</ol>
|
55 |
-
<p>Note: This is a modded version of the game that may not be compatible with the official version or the latest updates. Use it at your own risk and discretion. We are not responsible for any damages or issues that may arise from using this modded version.</p>
|
56 |
-
<h2>Tips and tricks</h2>
|
57 |
-
<p>CarX Highway Racing is a fun and challenging game that requires skill and strategy to win. Here are some tips and tricks that can help you improve your skills and performance in CarX Highway Racing:</p>
|
58 |
-
<h3>Choose the right car for each race</h3>
|
59 |
-
<p>Different cars have different strengths and weaknesses, and you should select the best one for each race based on the terrain, weather, traffic, and opponents. For example, if you are racing on a snowy road, you should choose a car with good traction and stability, such as a 4x4 or an SUV. If you are racing on a sunny highway, you should choose a car with high speed and acceleration, such as a sports car or a supercar. You can also check the stats and ratings of each car before selecting it, such as power, handling, fuel consumption, durability, and popularity.</p>
|
60 |
-
<h3>Use the nitro wisely</h3>
|
61 |
-
<p>Nitro can boost your speed and help you overtake other cars, but it can also drain your fuel and make you lose control if used too much or at the wrong time. You should use nitro sparingly and strategically, such as when you need to catch up with an opponent, escape from the police, or pass through a narrow gap. You should also avoid using nitro when you are turning, braking, or drifting, as it can make you skid or crash. You can refill your nitro by driving fast or performing stunts, such as drifting, jumping, or near-missing.</p>
|
62 |
-
<h3>Avoid collisions and penalties</h3>
|
63 |
-
<p>Collisions can damage your car and slow you down, and penalties can reduce your score and time if you break the rules or hit other cars or objects. You should avoid collisions and penalties by driving carefully and skillfully, as well as following the traffic rules and signs. You should also avoid hitting other cars or objects, such as trucks, buses, motorcycles, police cars, barriers, cones, signs, trees, etc. You can also repair your car by driving through repair stations or using repair kits.</p>
|
64 |
-
<h3>Upgrade and customize your car</h3>
|
65 |
-
<p>Upgrading and customizing your car can improve its performance and appearance, and you can use the unlimited money from the modded version to do so. You can upgrade your car's engine, transmission, suspension, brakes, tires, etc., to increase its power, handling, fuel consumption, durability, etc. You can also customize your car's paint, vinyls, stickers, etc., to change its color, style, and design. You can also use the modded version to unlock all the cars and customize them as you wish.</p>
|
66 |
-
<h2>Conclusion</h2>
|
67 |
-
<p>CarX Highway Racing is a game that offers a realistic and thrilling racing experience on traffic-packed highways. You can choose from over 100 cars, each with its own characteristics and customization options, and race against other players or AI opponents in different modes, such as campaign, time attack, police chase, and online multiplayer. You can also enjoy stunning graphics, realistic sounds, and dynamic weather effects that make every race unique.</p>
|
68 |
-
<p>If you want to have unlimited money, unlocked cars, and no ads, you can download and install CarX Highway Racing APK Unlimited Money, a modded version of the game that gives you these benefits. You can also use some tips and tricks to improve your skills and performance in CarX Highway Racing, such as choosing the right car for each race, using the nitro wisely, avoiding collisions and penalties, and upgrading and customizing your car.</p>
|
69 |
-
<p>So, what are you waiting for? Download CarX Highway Racing APK Unlimited Money now and enjoy the ultimate racing experience on your Android device. You won't regret it!</p>
|
70 |
-
<p>Click <a href="">here</a> to download CarX Highway Racing APK Unlimited Money.</p>
|
71 |
-
<h2>FAQs</h2>
|
72 |
-
<p>Here are some frequently asked questions about CarX Highway Racing APK Unlimited Money:</p>
|
73 |
-
<h3>Q: Is CarX Highway Racing APK Unlimited Money safe to use?</h3>
|
74 |
-
<p>A: CarX Highway Racing APK Unlimited Money is a modded version of the game that has been modified by third-party developers. It is not affiliated with or endorsed by the official developers of CarX Highway Racing. Therefore, it may not be safe to use and may contain viruses or malware that can harm your device or compromise your privacy. Use it at your own risk and discretion.</p>
|
75 |
-
<h3>Q: How do I update CarX Highway Racing APK Unlimited Money?</h3>
|
76 |
-
<p>A: CarX Highway Racing APK Unlimited Money may not be compatible with the latest updates or versions of CarX Highway Racing. Therefore, you may not be able to update it through the Google Play Store or the official website. You may have to wait for the modded version to be updated by its developers or look for another source to download it from.</p>
|
77 |
-
<h3>Q: Can I play CarX Highway Racing APK Unlimited Money online?</h3>
|
78 |
-
<p>A: CarX Highway Racing APK Unlimited Money may not work properly or at all when you try to play it online. You may face issues such as connection errors, lagging, crashing, or banning. Therefore, it is recommended that you play CarX Highway Racing APK Unlimited Money offline or in airplane mode.</p>
|
79 |
-
<h3>Q: Can I use CarX Highway Racing APK Unlimited Money with other mods or cheats?</h3>
|
80 |
-
<p>A: CarX Highway Racing APK Unlimited Money may not be compatible with other mods or cheats that you may have installed on your device or in your game. They may cause conflicts or errors that can affect your gameplay or damage your device. Therefore, it is advised that you use CarX Highway Racing APK Unlimited Money alone or with caution.</p>
|
81 |
-
<h3>Q: Can I share CarX Highway Racing APK Unlimited Money with others?</h3>
|
82 |
-
<p>A: CarX Highway Racing APK Unlimited Money is a modded version of the game that is not authorized or approved by the official developers of CarX Highway Racing. Therefore, it may violate their terms of service or intellectual property rights. Sharing it with others may result in legal actions or penalties against you or them. Therefore, it is suggested that you keep CarX Highway Racing APK Unlimited Money for yourself or share it with discretion.</p> 197e85843d<br />
|
83 |
-
<br />
|
84 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Alienvault The Ultimate Solution for Threat Intelligence and Detection.md
DELETED
@@ -1,141 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>What is AlienVault and Why You Need It</h1>
|
3 |
-
<p>If you are looking for a powerful and reliable solution to protect your network from cyber threats, you might have heard of AlienVault. But what is AlienVault exactly, and why do you need it?</p>
|
4 |
-
<h2>alienvault</h2><br /><p><b><b>DOWNLOAD</b> ❤ <a href="https://jinyurl.com/2uNSPS">https://jinyurl.com/2uNSPS</a></b></p><br /><br />
|
5 |
-
<p>AlienVault is a leading provider of cybersecurity solutions that help organizations of all sizes detect, prevent, and respond to cyber attacks. AlienVault offers a unique combination of open threat intelligence, security information and event management (SIEM), and cybersecurity services that enable you to monitor, analyze, and respond to threats in real time.</p>
|
6 |
-
<p>In this article, we will explain what AlienVault is, how it works, what benefits and features it offers, what customers say about it, and how you can get started with it. By the end of this article, you will have a clear understanding of why AlienVault is the best choice for your cybersecurity needs.</p>
|
7 |
-
<h2>AlienVault: The World's First Open Threat Intelligence Community</h2>
|
8 |
-
<p>One of the key components of AlienVault is its Open Threat Exchange (OTX), which is the world's first truly open threat intelligence community. OTX enables private companies, independent security researchers, and government agencies to openly collaborate and share the latest information about emerging threats, attack methods, and malicious actors, promoting greater security across the entire community.</p>
|
9 |
-
<p>alienvault open threat exchange<br />
|
10 |
-
alienvault otx endpoint security<br />
|
11 |
-
alienvault usm anywhere<br />
|
12 |
-
alienvault vs splunk<br />
|
13 |
-
alienvault pricing<br />
|
14 |
-
alienvault siem review<br />
|
15 |
-
alienvault certification<br />
|
16 |
-
alienvault aws integration<br />
|
17 |
-
alienvault azure sentinel<br />
|
18 |
-
alienvault api documentation<br />
|
19 |
-
alienvault at&t cybersecurity<br />
|
20 |
-
alienvault backup and restore<br />
|
21 |
-
alienvault cloud security<br />
|
22 |
-
alienvault compliance reports<br />
|
23 |
-
alienvault dark web monitoring<br />
|
24 |
-
alienvault endpoint detection and response<br />
|
25 |
-
alienvault file integrity monitoring<br />
|
26 |
-
alienvault gartner magic quadrant<br />
|
27 |
-
alienvault honeypot setup<br />
|
28 |
-
alienvault intrusion detection system<br />
|
29 |
-
alienvault jobs<br />
|
30 |
-
alienvault kubernetes<br />
|
31 |
-
alienvault log management<br />
|
32 |
-
alienvault mssp partner program<br />
|
33 |
-
alienvault network monitoring<br />
|
34 |
-
alienvault otx pulses<br />
|
35 |
-
alienvault otx directconnect api<br />
|
36 |
-
alienvault otx vs threatconnect<br />
|
37 |
-
alienvault otx vs virustotal<br />
|
38 |
-
alienvault otx vs mitre att&ck<br />
|
39 |
-
alienvault product comparison matrix<br />
|
40 |
-
alienvault qualys integration<br />
|
41 |
-
alienvault ransomware detection<br />
|
42 |
-
alienvault sensor deployment guide<br />
|
43 |
-
alienvault threat intelligence feed url<br />
|
44 |
-
alienvault unified security management platform<br />
|
45 |
-
alienvault user activity monitoring<br />
|
46 |
-
alienvault vulnerability assessment and remediation<br />
|
47 |
-
alienvault web application firewall integration<br />
|
48 |
-
alienvault windows event log collection configuration guide</p>
|
49 |
-
<h3>How AlienVault Works</h3>
|
50 |
-
<p>AlienVault works by leveraging the power of OTX and its own security products to provide you with comprehensive and up-to-date threat intelligence that helps you detect and respond to threats faster and more effectively. Here are some of the main features of how AlienVault works:</p>
|
51 |
-
<h4>Open Threat Exchange (OTX)</h4>
|
52 |
-
<ul>
|
53 |
-
<li>OTX is a free platform that allows anyone in the security community to contribute, discuss, research, validate, and share threat data.</li>
|
54 |
-
<li>OTX collects over 20 million threat indicators daily from over 200,000 global participants who investigate emerging threats in the wild.</li>
|
55 |
-
<li>OTX automatically extracts indicators of compromise (IOCs) from blogs, threat reports, emails, PCAPs, and more.</li>
|
56 |
-
<li>OTX allows you to join and create specialized groups, including private groups, to share threat intelligence with specific audiences.</li>
|
57 |
-
<li>OTX allows you to submit files and URLs for free malware analysis within Alien Labs OTX sandbox.</li>
|
58 |
-
<li>OTX allows you to quickly identify if your endpoints have been compromised in major cyber attacks using OTX Endpoint Security.</li>
|
59 |
-
<li>OTX allows you to synchronize OTX threat intelligence with other security products via DirectConnect API, SDK, and STIX/TAXII.</li>
|
60 |
-
</ul>
|
61 |
-
<h4>OTX Endpoint Security</h4>
|
62 |
-
<ul>
|
63 |
-
<li>OTX Endpoint Security is a free service that natively uses the community-powered threat intelligence of OTX to scan your endpoints for known IOCs.</li>
|
64 |
-
<li>OTX Endpoint Security uses the same agent-based approach as expensive endpoint security tools and DIY open source agents without the expense, complexity, or guesswork.</li>
|
65 |
-
<li>OTX Endpoint Security is available to any registered OTX user. To get started, you just need to download and install the OTX agent on the Windows or Linux devices you want to monitor.</li>
|
66 |
-
<li>OTX Endpoint Security allows you to launch a query on any endpoint from OTX by selecting a pre-defined query that looks for IOCs in one or more categories, such as processes, registry keys, files, or network connections.</li>
|
67 |
-
<li>OTX Endpoint Security allows you to view the results of the query in OTX and see if any of the endpoints have been compromised by known threats.</li>
|
68 |
-
<li>OTX Endpoint Security allows you to take action on the compromised endpoints by isolating them from the network, killing malicious processes, deleting malicious files, or blocking malicious network connections.</li>
|
69 |
-
</ul>
|
70 |
-
<h3>AlienVault: The Best Solution for Security Information and Event Management (SIEM)</h3>
|
71 |
-
<p>Another key component of AlienVault is its SIEM solution, which is designed to help you collect, correlate, analyze, and act on security data from various sources across your network. AlienVault offers two versions of its SIEM solution: AlienVault OSSIM and AlienVault USM.</p>
|
72 |
-
<h4>AlienVault OSSIM</h4>
|
73 |
-
<ul>
|
74 |
-
<li>AlienVault OSSIM is the world's most widely used open source SIEM solution, with over 500,000 downloads and 195,000 active users.</li>
|
75 |
-
<li>AlienVault OSSIM provides you with the basic security capabilities you need to monitor your network, such as asset discovery, vulnerability assessment, intrusion detection, behavioral monitoring, and event correlation.</li>
|
76 |
-
<li>AlienVault OSSIM is free to download and use for any purpose. However, it does not include any support or maintenance services from AlienVault.</li>
|
77 |
-
<li>AlienVault OSSIM is ideal for security enthusiasts, researchers, students, and small organizations who want to learn about SIEM and get started with basic security monitoring.</li>
|
78 |
-
</ul>
|
79 |
-
<h4>AlienVault USM</h4>
|
80 |
-
<ul>
|
81 |
-
<li>AlienVault USM is the commercial version of AlienVault OSSIM, which provides you with the advanced security capabilities you need to protect your network from sophisticated threats.</li>
|
82 |
-
<li>AlienVault USM includes all the features of AlienVault OSSIM, plus additional features such as threat intelligence updates from OTX and Alien Labs, log management and retention, compliance reporting and management, orchestration and automation, cloud monitoring and integration, and more.</li>
|
83 |
-
<li>AlienVault USM comes with full support and maintenance services from AlienVault, including 24/7 technical support, product updates and upgrades, training and certification, and professional services.</li>
|
84 |
-
<li>AlienVault USM is ideal for medium to large organizations who need a comprehensive and scalable SIEM solution that can handle complex and dynamic environments.</li>
|
85 |
-
</ul>
|
86 |
-
<h3>AlienVault: The Trusted Partner for Cybersecurity Services</h3>
|
87 |
-
<p>Besides its threat intelligence and SIEM solutions, AlienVault also offers a range of cybersecurity services that can help you enhance your security posture and achieve your security goals. These services include:</p>
|
88 |
-
<h4>AlienVault Professional Services</h4>
|
89 |
-
<ul>
|
90 |
-
<li>AlienVault Professional Services are designed to help you get the most out of your AlienVault products and solutions. These services include installation and configuration, migration and upgrade, customization and integration, health check and optimization, incident response and forensics, and more.</li>
|
91 |
-
<li>AlienVault Professional Services are delivered by certified AlienVault experts who have extensive experience and knowledge in cybersecurity best practices and industry standards.</li>
|
92 |
-
<li>AlienVault Professional Services are available on-demand or as part of a subscription plan. You can choose from different service levels depending on your needs and budget.</li>
|
93 |
-
</ul>
|
94 |
-
<h4>AlienVault Managed Security Services</h4>
|
95 |
-
<ul>
|
96 |
-
<li>AlienVault Managed Security Services are designed to help you outsource your security operations to AlienVault's team of security analysts who will monitor, manage, and respond to threats on your behalf. These services include managed detection and response (MDR), managed compliance (MC), managed vulnerability scanning (MVS), managed log review (MLR), managed threat hunting (MTH), and more.</li>
|
97 |
-
<li>AlienVault Managed Security Services are powered by AlienVault USM's advanced technology and OTX's rich threat intelligence. You will get access to a dedicated portal where you can view your security status, alerts, reports, recommendations, and actions.</li>
|
98 |
-
<li>AlienVault Managed Security Services are available as a monthly or annual subscription plan. You can choose from different service tiers depending on your needs and budget.</li>
|
99 |
-
</ul>
|
100 |
-
<h2>AlienVault: The Benefits and Features You Can Expect</h2>
|
101 |
-
<p>Now that you know what AlienVault is and how it works, let's take a look at some of the benefits and features you can expect from using AlienVault for your cybersecurity needs. Here are some of the main ones:</p>
|
102 |
-
<h3>Comprehensive and Up-to-Date Threat Intelligence</h3>
|
103 |
-
<p>One of the biggest advantages of AlienVault is that it provides you with comprehensive and up-to-date threat intelligence that helps you stay ahead of the evolving threat landscape. AlienVault's threat intelligence is derived from multiple sources, including OTX, Alien Labs, third-party feeds, and your own data. AlienVault's threat intelligence is constantly updated and enriched with contextual information, such as threat actors, tactics, techniques, and procedures (TTPs), indicators of compromise (IOCs), and recommended actions. AlienVault's threat intelligence enables you to quickly identify and prioritize the most relevant and critical threats to your network and respond accordingly.</p>
|
104 |
-
<h3>Easy and Flexible Deployment and Integration</h3>
|
105 |
-
<p>Another benefit of AlienVault is that it is easy and flexible to deploy and integrate with your existing infrastructure and security tools. AlienVault supports various deployment options, including on-premises, cloud, hybrid, or virtual appliances. AlienVault also supports various integration options, including native integrations with popular cloud platforms, such as AWS, Azure, Google Cloud, and Office 365, as well as integrations with other security products, such as firewalls, antivirus, endpoint protection, and more. AlienVault's deployment and integration capabilities allow you to extend your visibility and coverage across your entire network and leverage your existing investments in security.</p>
|
106 |
-
<h3>Affordable and Scalable Pricing and Licensing</h3>
|
107 |
-
<p>A third benefit of AlienVault is that it offers affordable and scalable pricing and licensing models that suit your needs and budget. AlienVault's pricing and licensing models are based on the number of assets you want to monitor, not on the volume of data you generate or consume. This means that you only pay for what you need and use, without worrying about data limits or overages. AlienVault's pricing and licensing models also allow you to scale up or down as your network grows or changes, without compromising your security or performance.</p>
|
108 |
-
<h2>AlienVault: The Customer Reviews and Testimonials You Should Know</h2>
|
109 |
-
<p>So far, we have discussed what AlienVault is, how it works, and what benefits and features it offers. But don't just take our word for it. Here are some of the customer reviews and testimonials you should know about AlienVault:</p>
|
110 |
-
<h3>What Customers Love About AlienVault</h3>
|
111 |
-
<p>Here are some of the positive feedbacks that customers have given about AlienVault:</p>
|
112 |
-
<ul>
|
113 |
-
<li>"AlienVault has been a game-changer for us. It has given us the visibility and insight we need to protect our network from threats. It has also saved us a lot of time and money by simplifying our security operations." - IT Manager at a Manufacturing Company</li>
|
114 |
-
<li>"AlienVault is a great solution for small to medium businesses who need a comprehensive SIEM solution that is easy to use and affordable. It has everything you need in one platform: threat intelligence, asset discovery, vulnerability assessment, intrusion detection, behavioral monitoring, event correlation, log management, compliance reporting, orchestration and automation, cloud monitoring, and more." - Security Analyst at a Financial Services Company</li>
|
115 |
-
<li>"AlienVault is the best thing that ever happened to us. It has helped us improve our security posture and compliance status significantly. It has also enabled us to collaborate with other security professionals in the OTX community and learn from their experiences." - CISO at a Healthcare Company</li>
|
116 |
-
</ul>
|
117 |
-
<h3>What Customers Wish AlienVault Could Improve</h3>
|
118 |
-
<p>Here are some of the negative feedbacks that customers have given about AlienVault:</p>
|
119 |
-
<ul>
|
120 |
-
<li>"AlienVault could improve its user interface and dashboard. It can be confusing and overwhelming at times. It could also provide more customization options for reports and alerts." - IT Director at an Education Institution</li>
|
121 |
-
<li>"AlienVault could improve its support for newer technologies and platforms. It can be slow to update its integrations with some of the latest cloud services and security tools." - Security Engineer at a Technology Company</li>
|
122 |
-
<li>"AlienVault could improve its documentation and training resources. It can be hard to find the information you need or get the answers you want. It could also offer more online courses and certifications for users." - Security Consultant at a Professional Services Company</li>
|
123 |
-
</ul>
|
124 |
-
<h2>AlienVault: The Conclusion and Call to Action</h2>
|
125 |
-
<p>In conclusion, AlienVault is a powerful and reliable solution that can help you protect your network from cyber threats. AlienVault offers a unique combination of open threat intelligence, security information and event management (SIEM), and cybersecurity services that enable you to monitor, analyze, and respond to threats in real time. AlienVault is easy and flexible to deploy and integrate, affordable and scalable to use, and comprehensive and up-to-date in its threat intelligence. AlienVault has received positive reviews and testimonials from thousands of customers who have improved their security posture and compliance status with AlienVault.</p>
|
126 |
-
<p>If you are interested in trying out AlienVault for yourself, you can request a free trial or a live demo from their website. You can also download AlienVault OSSIM or join OTX for free. Alternatively, you can contact AlienVault's sales team or find a partner near you to get more information and assistance.</p>
|
127 |
-
<p>Don't wait any longer. Start your journey with AlienVault today and see how it can help you protect your network from cyber threats.</p>
|
128 |
-
<h2>AlienVault: The FAQs</h2>
|
129 |
-
<p>Here are some of the frequently asked questions (FAQs) about AlienVault:</p>
|
130 |
-
<h3>What is the difference between AlienVault OSSIM and AlienVault USM?</h3>
|
131 |
-
<p>AlienVault OSSIM is the open source version of AlienVault USM, which provides basic security capabilities for network monitoring. AlienVault USM is the commercial version of AlienVault OSSIM, which provides advanced security capabilities for threat detection and response.</p>
|
132 |
-
<h3>How much does AlienVault cost?</h3>
|
133 |
-
<p>AlienVault's pricing depends on the number of assets you want to monitor and the service level you choose. You can request a quote from their website or contact their sales team for more details.</p>
|
134 |
-
<h3>How can I get started with AlienVault?</h3>
|
135 |
-
<p>You can get started with AlienVault by requesting a free trial or a live demo from their website. You can also download AlienVault OSSIM or join OTX for free. Alternatively, you can contact AlienVault's sales team or find a partner near you to get more information and assistance.</p>
|
136 |
-
<h3>What are the system requirements for AlienVault?</h3>
|
137 |
-
<p>AlienVault's system requirements vary depending on the deployment option and the product version you choose. You can find the detailed system requirements on their website or contact their support team for more guidance.</p>
|
138 |
-
<h3>Where can I find more resources and support for AlienVault?</h3>
|
139 |
-
<p>You can find more resources and support for AlienVault on their website, where you can access their documentation, knowledge base, forums, blog, webinars, videos, podcasts, and more. You can also contact their support team via phone, email, chat, or ticket system.</p> 401be4b1e0<br />
|
140 |
-
<br />
|
141 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download TikTok Videos Without Watermark in HD Resolution - Best TikTok Saver.md
DELETED
@@ -1,113 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download TikTok Videos Without Watermark in HD</h1>
|
3 |
-
<p>TikTok is one of the most popular social media platforms that allows users to create and share short videos with millions of people around the world. However, if you want to download your favorite TikTok videos to your device or share them on other platforms, you might encounter some problems. For example, you might notice that the downloaded videos have a watermark or logo that covers part of the screen. Or you might find that the video quality is low or blurry. Or you might want to edit your video to make it more appealing and engaging.</p>
|
4 |
-
<h2>download tiktok without watermark hd</h2><br /><p><b><b>Download Zip</b> --->>> <a href="https://jinyurl.com/2uNJRu">https://jinyurl.com/2uNJRu</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will show you how to download TikTok videos without watermark in HD quality using some free online tools. We will also give you some tips on how to improve and edit your TikTok videos for more engagement. By following these steps, you will be able to enjoy your TikTok videos without any limitations or restrictions.</p>
|
6 |
-
<h2>Why Download TikTok Videos Without Watermark?</h2>
|
7 |
-
<p>There are many reasons why you might want to download TikTok videos without watermark. Here are some of them:</p>
|
8 |
-
<ul>
|
9 |
-
<li><strong>Better quality:</strong> The watermark or logo that appears on the downloaded TikTok videos can reduce the quality and clarity of the video. It can also distract or annoy the viewers who want to focus on the content. By downloading TikTok videos without watermark, you can get a better viewing experience.</li>
|
10 |
-
<li><strong>No logo:</strong> The watermark or logo that appears on the downloaded TikTok videos can also infringe on the intellectual property rights of the original creators. It can also make it harder for you to claim ownership or credit for your own work. By downloading TikTok videos without watermark, you can respect the rights of the creators and protect your own reputation.</li>
|
11 |
-
<li><strong>More creative freedom:</strong> The watermark or logo that appears on the downloaded TikTok videos can also limit your creative freedom. It can prevent you from editing or modifying your video as you wish. It can also make it difficult for you to share your video on other platforms or channels. By downloading TikTok videos without watermark, you can have more control over your video and use it for any purpose.</li>
|
12 |
-
</ul>
|
13 |
-
<h2>How to Download TikTok Videos Without Watermark on Mobile Phone</h2>
|
14 |
-
<p>If you want If you want to download TikTok videos without watermark on your mobile phone, you can use a web-based tool called ssstik.io. This tool allows you to download TikTok videos in HD quality without any watermark or logo. You can also choose to download only the audio or the video of the TikTok video. Here are the steps to use ssstik.io on your mobile phone: - Open the TikTok app on your mobile phone and find the video that you want to download. - Tap on the share icon and select "Copy Link" to copy the URL of the video. - Open a web browser on your mobile phone and go to ssstik.io. - Paste the URL of the video in the input box and tap on "Download". - Wait for a few seconds until the tool processes the video and generates the download links. - Tap on "Download MP4" to download the video without watermark, or tap on "Download MP3" to download only the audio of the video. - Save the file to your device and enjoy your TikTok video without watermark. Here are some screenshots of how to use ssstik.io on your mobile phone: <img src="https://i.imgur.com/9XJ6w2f.png" alt="Screenshot of TikTok app with share icon" width="300">
|
15 |
-
<img src="https://i.imgur.com/8yZp0mE.png" alt="Screenshot of ssstik.io with input box and download button" width="300">
|
16 |
-
<img src="https://i.imgur.com/7Qf0d5j.png" alt="Screenshot of ssstik.io with download links" width="300">
|
17 |
-
Here is a table that compares the features of ssstik.io with other TikTok video downloaders: | Feature | ssstik.io | Other TikTok Video Downloaders | | --- | --- | --- | | Download TikTok videos without watermark | Yes | No | | Download TikTok videos in HD quality | Yes | No | | Download only audio or video of TikTok videos | Yes | No | | Support multiple platforms (Android, iOS, Windows, Mac, Linux) | Yes | No | | Free and easy to use | Yes | No | As you can see, ssstik.io is one of the best tools to download TikTok videos without watermark on your mobile phone. It is fast, simple, and reliable. You can use it anytime and anywhere to enjoy your TikTok videos without any limitations or restrictions. <h2>How to Download TikTok Videos Without Watermark on PC</h2>
|
18 |
-
<p>If you want to download TikTok videos without watermark on your PC, you can use another web-based tool called SnapTik.App. This tool also allows you to download TikTok videos in HD quality without any watermark or logo. You can also choose to download only the audio or the video of the TikTok video. Here are the steps to use SnapTik.App on your PC:</p>
|
19 |
-
- Open a web browser on your PC and go to TikTok.com and find the video that you want to download. - Copy the URL of the video from the address bar of your browser. - Open another tab on your browser and go to SnapTik.App. - Paste the URL of the video in the input box and click on "Download". - Wait for a few seconds until the tool processes the video and generates the download links. - Click on "Download MP4" to download the video without watermark, or click on "Download MP3" to download only the audio of the video. - Save the file to your PC and enjoy your TikTok video without watermark. Here are some screenshots of how to use SnapTik.App on your PC: <img src="https://i.imgur.com/0wZy9oX.png" alt="Screenshot of TikTok.com with URL of video" width="600">
|
20 |
-
<img src="https://i.imgur.com/6sFgq1n.png" alt="Screenshot of SnapTik.App with input box and download button" width="600">
|
21 |
-
<img src="https://i.imgur.com/8f0J7Qm.png" alt="Screenshot of SnapTik.App with download links" width="600">
|
22 |
-
Here is a table that compares the features of SnapTik.App with other TikTok video downloaders: | Feature | SnapTik.App | Other TikTok Video Downloaders | | --- | --- | --- | | Download TikTok videos without watermark | Yes | No | | Download TikTok videos in HD quality | Yes | No | | Download only audio or video of TikTok videos | Yes | No | | Support multiple platforms (Android, iOS, Windows, Mac, Linux) | Yes | No | | Free and easy to use | Yes | No | As you can see, SnapTik.App is another great tool to download TikTok videos without watermark on your PC. It is fast, simple, and reliable. You can use it anytime and anywhere to enjoy your TikTok videos without any limitations or restrictions. <h2>How to Improve the Quality of TikTok Videos</h2>
|
23 |
-
<p>Now that you know how to download TikTok videos without watermark, you might want to improve the quality of your videos. The quality of TikTok videos depends on several factors, such as resolution, file size, file format, and length. Here are some of them:</p>
|
24 |
-
<p>How to download tiktok videos without watermark in hd quality<br />
|
25 |
-
Best tiktok downloader online no watermark hd mp4<br />
|
26 |
-
Download tiktok video without logo hd free<br />
|
27 |
-
Tiktok video download without watermark app for android<br />
|
28 |
-
Save tiktok videos without watermark iphone hd<br />
|
29 |
-
Tiktok no watermark downloader chrome extension hd<br />
|
30 |
-
Download tiktok video without watermark online free hd<br />
|
31 |
-
Tiktok video download mp4 hd no watermark<br />
|
32 |
-
Tiktok video saver without watermark apk hd<br />
|
33 |
-
Download tiktok video without watermark pc hd<br />
|
34 |
-
Tiktok video download without watermark website hd<br />
|
35 |
-
Tiktok video download without watermark ios hd<br />
|
36 |
-
Download tiktok video without watermark mac hd<br />
|
37 |
-
Tiktok video download without watermark software hd<br />
|
38 |
-
Tiktok video download without watermark reddit hd<br />
|
39 |
-
Download tiktok video without watermark 1080p hd<br />
|
40 |
-
Tiktok video download without watermark 4k hd<br />
|
41 |
-
Download tiktok video without watermark and sound hd<br />
|
42 |
-
Tiktok video download without watermark and username hd<br />
|
43 |
-
Download tiktok video without watermark and music hd<br />
|
44 |
-
Tiktok video download without watermark and caption hd<br />
|
45 |
-
Download tiktok video without watermark and duet hd<br />
|
46 |
-
Tiktok video download without watermark and filter hd<br />
|
47 |
-
Download tiktok video without watermark and sticker hd<br />
|
48 |
-
Tiktok video download without watermark and effect hd<br />
|
49 |
-
Download tiktok video without watermark with link hd<br />
|
50 |
-
Tiktok video download without watermark with ssstik.io hd<br />
|
51 |
-
Download tiktok video without watermark with snaptik.app hd<br />
|
52 |
-
Tiktok video download without watermark with fetchtik.com hd<br />
|
53 |
-
Download tiktok video without watermark with musicallydown.com hd<br />
|
54 |
-
Tiktok video download without watermark with keepvid.pro hd<br />
|
55 |
-
Download tiktok video without watermark with ttdownloader.com hd<br />
|
56 |
-
Tiktok video download without watermark with expertsphp.com hd<br />
|
57 |
-
Download tiktok video without watermark with ttdown.org hd<br />
|
58 |
-
Tiktok video download without watermark with savefrom.net hd<br />
|
59 |
-
Download tiktok video without watermark with y2mate.com hd<br />
|
60 |
-
Tiktok video download without watermark with 9xbuddy.com hd<br />
|
61 |
-
Download tiktok video without watermark with alltomp3.org hd<br />
|
62 |
-
Tiktok video download without watermark with clipconverter.cc hd<br />
|
63 |
-
Download tiktok video without watermark with vidpaw.com hd</p>
|
64 |
-
<ul>
|
65 |
-
<li><strong>Resolution:</strong> The resolution of a video is the number of pixels that make up the image. The higher the resolution, the clearer and sharper the video. However, higher resolution also means larger file size and more bandwidth consumption. The optimal resolution for TikTok videos is 1080p (1920 x 1080 pixels), which is also known as HD or high definition. To achieve this resolution, you need to use a device that supports HD recording, such as a smartphone or a camera. You can also adjust the resolution settings on your device or on the TikTok app before recording or uploading your video.</li>
|
66 |
-
<li><strong>File size:</strong> The file size of a video is the amount of space that it occupies on your device or on the internet. The larger the file size, the more storage and data usage it requires. However, larger file size also means higher quality and less compression. The optimal file size for TikTok videos is between 10 MB and 50 MB. To achieve this file size, you need to balance the resolution, length, and format of your video. You can also use a video compressor tool to reduce the file size of your video without losing much quality.</li>
|
67 |
-
<li><strong>File format:</strong> The file format of a video is the type of file that it is saved as. The file format determines how the video is encoded, decoded, and played. Different file formats have different advantages and disadvantages in terms of quality, compatibility, and performance. The optimal file format for TikTok videos is MP4 (MPEG-4 Part 14), which is a widely used and supported format that offers high quality and low file size. To achieve this file format, you need to use a device or an app that supports MP4 recording or conversion. You can also use a video converter tool to change the file format of your video to MP4.</li>
|
68 |
-
<li><strong>Length:</strong> The length of a video is the duration or time that it lasts. The longer the video, the more content and information it can convey. However, longer video also means larger file size and more attention span required. The optimal length for TikTok videos is between 15 seconds and 60 seconds. To achieve this length, you need to plan your content and script before recording or editing your video. You can also use a video trimmer tool to cut or shorten your video to the desired length.</li>
|
69 |
-
</ul>
|
70 |
-
<p>By following these tips, you can improve the quality of your TikTok videos and make them more appealing and enjoyable for yourself and your audience.</p>
|
71 |
-
Here are some screenshots and examples of high-quality and low-quality TikTok videos: <img src="https://i.imgur.com/1Q6wZ8F.png" alt="Screenshot of high-quality TikTok video with HD resolution, small file size, MP4 format, and 15 seconds length" width="300">
|
72 |
-
<img src="https://i.imgur.com/9q6Xf5T.png" alt="Screenshot of low-quality TikTok video with low resolution, large file size, unknown format, and 60 seconds length" width="300">
|
73 |
-
<h2>How to Edit TikTok Videos for More Engagement</h2>
|
74 |
-
<p>Besides improving the quality of your TikTok videos, you might also want to edit them for more engagement. Editing your TikTok videos can help you attract more views, likes, comments, and followers by making your videos more interesting, creative, and unique. Here are some suggestions and tools for editing your TikTok videos:</p>
|
75 |
-
<ul>
|
76 |
-
<li><strong>Add text:</strong> Adding text to your TikTok videos can help you convey your message, highlight your keywords, or add captions or subtitles. You can use the built-in text editor on the TikTok app to add text to your videos. You can also use other apps or tools such as InShot, Vont, or Kapwing to add text to your videos with more options and effects.</li>
|
77 |
-
<li><strong>Add animation:</strong> Adding animation to your TikTok videos can help you create motion graphics, transitions, or stickers that make your videos more dynamic and fun. You can use the built-in animation features on the TikTok app to add animation to your videos. You can also use other apps or tools such as Alight Motion, Funimate, or Canva to add animation to your videos with more options and effects.</li>
|
78 |
-
<li><strong>Add music <li><strong>Add music:</strong> Adding music to your TikTok videos can help you create a mood, a theme, or a rhythm that matches your content. You can use the built-in music library on the TikTok app to add music to your videos. You can also use other apps or tools such as Lomotif, BeatSync, or Splice to add music to your videos with more options and effects.</li>
|
79 |
-
<li><strong>Add voiceover:</strong> Adding voiceover to your TikTok videos can help you narrate, explain, or comment on your content. You can use the built-in voiceover feature on the TikTok app to add voiceover to your videos. You can also use other apps or tools such as Voice Recorder, Audacity, or Filmora to add voiceover to your videos with more options and effects.</li>
|
80 |
-
<li><strong>Add stickers:</strong> Adding stickers to your TikTok videos can help you decorate, personalize, or express yourself on your content. You can use the built-in sticker library on the TikTok app to add stickers to your videos. You can also use other apps or tools such as PicsArt, Giphy, or Sticker Maker to add stickers to your videos with more options and effects.</li>
|
81 |
-
<li><strong>Add transitions:</strong> Adding transitions to your TikTok videos can help you create smooth and seamless changes between different scenes or clips. You can use the built-in transition effects on the TikTok app to add transitions to your videos. You can also use other apps or tools such as VivaVideo, KineMaster, or PowerDirector to add transitions to your videos with more options and effects.</li>
|
82 |
-
</ul>
|
83 |
-
<p>By following these suggestions, you can edit your TikTok videos for more engagement and make them more interesting, creative, and unique for yourself and your audience.</p>
|
84 |
-
Here are some screenshots and examples of edited and unedited TikTok videos: <img src="https://i.imgur.com/6Z3Ys9T.png" alt="Screenshot of unedited TikTok video with no text, animation, music, voiceover, stickers, or transitions" width="300">
|
85 |
-
<img src="https://i.imgur.com/4xq0J8f.png" alt="Screenshot of edited TikTok video with text, animation, music, voiceover, stickers, and transitions" width="300">
|
86 |
-
<h2>Conclusion</h2>
|
87 |
-
<p>In conclusion, downloading TikTok videos without watermark is easy and convenient with some free online tools such as ssstik.io and SnapTik.App. These tools allow you to download TikTok videos in HD quality without any watermark or logo. You can also choose to download only the audio or the video of the TikTok video. Moreover, improving and editing your TikTok videos can help you enhance the quality and engagement of your videos. You can use some tips and tricks such as adjusting the resolution, file size, file format, and length of your videos. You can also use some suggestions and tools such as adding text, animation, music, voiceover, stickers, and transitions to your videos.</p>
|
88 |
-
<p>By following these steps, you will be able to enjoy your TikTok videos without any limitations or restrictions. You will also be able to create more appealing and engaging TikTok videos for yourself and your audience. So what are you waiting for? Start downloading, improving, and editing your TikTok videos without watermark today!</p>
|
89 |
-
<h2>FAQs</h2>
|
90 |
-
<p>Here are some frequently asked questions about downloading, improving, and editing TikTok videos without watermark:</p>
|
91 |
-
<h3>Q: Is it legal to download TikTok videos without watermark?</h3>
|
92 |
-
<p>A: It depends on the source and purpose of the video. If the video is public and does not contain any copyrighted material or personal information <p>A: It depends on the source and purpose of the video. If the video is public and does not contain any copyrighted material or personal information, you can download it for personal use or fair use. However, if the video is private or contains any protected content or data, you need to obtain the permission of the owner or the creator before downloading it. You also need to respect the terms and conditions of TikTok and the tools that you use to download the videos. You should not download, distribute, or monetize any TikTok videos without watermark without proper authorization or consent.</p>
|
93 |
-
<h3>Q: How can I download TikTok videos without watermark in bulk?</h3>
|
94 |
-
<p>A: If you want to download multiple TikTok videos without watermark at once, you can use some tools that support batch downloading. For example, you can use 4K Video Downloader or Allavsoft to download TikTok videos without watermark in bulk. These tools allow you to paste multiple URLs of TikTok videos and download them in HD quality without any watermark or logo. You can also choose to download only the audio or the video of the TikTok videos.</p>
|
95 |
-
<h3>Q: How can I download TikTok videos without watermark with sound?</h3>
|
96 |
-
<p>A: If you want to download TikTok videos without watermark with sound, you need to make sure that the video has sound in the first place. Some TikTok videos are muted or have no sound by default. You can check the sound icon on the bottom right corner of the video to see if it has sound or not. If the video has sound, you can use any of the tools mentioned above to download it without watermark with sound. If the video has no sound, you can either add your own sound using a video editor tool or find another video that has sound.</p>
|
97 |
-
<h3>Q: How can I download TikTok videos without watermark on iPhone?</h3>
|
98 |
-
<p>A: If you want to download TikTok videos without watermark on iPhone, you can use the same method as downloading them on Android. You can use ssstik.io to download TikTok videos without watermark on iPhone. However, you need to install an app called Documents by Readdle on your iPhone first. This app allows you to save and manage files on your iPhone. After installing the app, you can follow these steps:</p>
|
99 |
-
- Open the TikTok app on your iPhone and find the video that you want to download. - Tap on the share icon and select "Copy Link" to copy the URL of the video. - Open Documents by Readdle on your iPhone and tap on the browser icon on the bottom right corner. - Go to ssstik.io and paste the URL of the video in the input box and tap on "Download". - Wait for a few seconds until the tool processes the video and generates the download links. - Tap on "Download MP4" to download the video without watermark, or tap on "Download MP3" to download only the audio of the video. - Tap on "Done" and go to the Downloads folder on Documents by Readdle. - Tap and hold on the file that you downloaded and select "Share". - Select "Save Video" or "Save to Files" to save - Select "Save Video" or "Save to Files" to save the file to your iPhone and enjoy your TikTok video without watermark. Here are some screenshots of how to use Documents by Readdle to download TikTok videos without watermark on iPhone: <img src="https://i.imgur.com/0Z7Q3nD.png" alt="Screenshot of Documents by Readdle app with browser icon" width="300">
|
100 |
-
<img src="https://i.imgur.com/8yZp0mE.png" alt="Screenshot of ssstik.io with input box and download button" width="300">
|
101 |
-
<img src="https://i.imgur.com/7Qf0d5j.png" alt="Screenshot of ssstik.io with download links" width="300">
|
102 |
-
<img src="https://i.imgur.com/4nqXs8R.png" alt="Screenshot of Documents by Readdle app with Downloads folder and Share option" width="300">
|
103 |
-
<img src="https://i.imgur.com/9wY2g1D.png" alt="Screenshot of Save Video or Save to Files option on iPhone" width="300">
|
104 |
-
As you can see, Documents by Readdle is a useful app that can help you download TikTok videos without watermark on iPhone. It is free, easy, and reliable. You can use it anytime and anywhere to enjoy your TikTok videos without any limitations or restrictions. <h3>Q: How can I download TikTok videos without watermark on Mac?</h3>
|
105 |
-
<p>A: If you want to download TikTok videos without watermark on Mac, you can use the same method as downloading them on PC. You can use SnapTik.App to download TikTok videos without watermark on Mac. However, you need to install a browser extension called Video Downloader Plus on your Mac first. This extension allows you to download videos from any website with one click. After installing the extension, you can follow these steps:</p>
|
106 |
-
- Open a web browser on your Mac and go to TikTok.com and find the video that you want to download. - Copy the URL of the video from the address bar of your browser. - Open another tab on your browser and go to SnapTik.App. - Paste the URL of the video in the input box and click on "Download". - Wait for a few seconds until the tool processes the video and generates the download links. - Click on "Download MP4" to download the video without watermark, or click on "Download MP3" to download only the audio of the video. - Click on the Video Downloader Plus icon on the top right corner of your browser and select the file that you downloaded. - Save the file to your Mac and enjoy your TikTok video without watermark. Here are some screenshots of how to use Video Downloader Plus to download TikTok videos without watermark on Mac: <img src="https://i.imgur.com/0wZy9oX.png" alt="Screenshot of TikTok.com with URL of video" width="600">
|
107 |
-
<img src="https://i.imgur.com/6sFgq1n.png" alt="Screenshot of SnapTik.App with input box and download button" width="600">
|
108 |
-
<img src="https://i.imgur.com/8f0J7Qm.png" alt="Screenshot of SnapTik.App with download links" width="600">
|
109 |
-
<img src="https://i.imgur.com/5VvWb6O.png" alt="Screenshot of Video Downloader Plus icon and file selection" width="600">
|
110 |
-
<img src="https://i.imgur.com/2kxWc4a.png" alt="Screenshot of Save File option on Mac" width="600">
|
111 |
-
As you can see, Video Downloader Plus is a handy extension that can help you download TikTok videos without watermark on Mac. It is free, easy, and reliable. You can use it anytime and anywhere to enjoy your TikTok videos without any limitations or restrictions.</p> 401be4b1e0<br />
|
112 |
-
<br />
|
113 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download WhatsApp Business APK Terbaru Aplikasi Gratis untuk Bisnis Kecil.md
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
|
2 |
-
<br> - The benefits of using WhatsApp Business for your business | | H2: What is APK Terbaru? | - A brief explanation of what APK Terbaru means and why you might want to download it <br> - The risks and precautions of downloading APK files from unknown sources | | H2: How to Download WhatsApp Business APK Terbaru from Google Play Store | - A step-by-step guide on how to download and install the app from the official source <br> - A screenshot of the app page on Google Play Store | | H2: How to Download WhatsApp Business APK Terbaru from Other Sources | - A list of alternative sources where you can find the latest version of the app <br> - A step-by-step guide on how to download and install the app from each source <br> - A comparison table of the pros and cons of each source | | H2: How to Set Up and Use WhatsApp Business | - A step-by-step guide on how to create a business profile, verify your number, and customize your settings <br> - A list of tips and tricks on how to use the app effectively for your business <br> - A screenshot of the app interface | | H2: Conclusion | - A summary of the main points of the article <br> - A call to action for the readers to download and try the app | Table 2: Article with HTML formatting <h1>How to Download WhatsApp Business APK Terbaru</h1>
|
3 |
-
<p>If you are looking for a way to communicate with your customers more efficiently and grow your business, you might want to try WhatsApp Business. WhatsApp Business is a free app that allows you to create a business presence on WhatsApp, send and receive messages, share media, and manage your customer interactions. In this article, we will show you how to download WhatsApp Business APK Terbaru, which means the latest version of the app in Indonesian. We will also explain what WhatsApp Business is, what APK Terbaru is, how to set up and use the app, and some tips and tricks to make the most out of it.</p>
|
4 |
-
<h2>download whatsapp business apk terbaru</h2><br /><p><b><b>DOWNLOAD</b> ✏ <a href="https://jinyurl.com/2uNP5T">https://jinyurl.com/2uNP5T</a></b></p><br /><br />
|
5 |
-
<h2>What is WhatsApp Business?</h2>
|
6 |
-
<p>WhatsApp Business is an app that was launched by Meta (formerly Facebook) in 2018. It is designed for small and medium-sized businesses that want to use WhatsApp as a platform to connect with their customers. WhatsApp Business has some features that are not available in WhatsApp Messenger, such as:</p>
|
7 |
-
<ul>
|
8 |
-
<li><b>BUSINESS PROFILE:</b> You can create a profile for your business that includes your website, location, contact information, hours of operation, catalog, and more.</li>
|
9 |
-
<li><b>BUSINESS MESSAGING TOOLS:</b> You can use automated messages to greet your customers, inform them when you are away, or send them quick replies. You can also use labels to organize your chats and contacts.</li>
|
10 |
-
<li><b>LANDLINE/FIXED NUMBER SUPPORT:</b> You can use WhatsApp Business with a landline or fixed phone number and receive verification codes via phone calls.</li>
|
11 |
-
<li><b>RUN BOTH WHATSAPP MESSENGER AND WHATSAPP BUSINESS:</b> You can have both apps installed on the same phone, but each app must have its own unique phone number.</li>
|
12 |
-
<li><b>WHATSAPP WEB:</b> You can access your WhatsApp Business account from your computer's browser and respond to your customers more efficiently.</li>
|
13 |
-
</ul>
|
14 |
-
<p>The benefits of using WhatsApp Business for your business are:</p>
|
15 |
-
<ul>
|
16 |
-
<li><b>EASY TO USE:</b> You can use the same interface and features that you are familiar with from WhatsApp Messenger.</li>
|
17 |
-
<li><b>COST-EFFECTIVE:</b> You can send and receive messages, calls, photos, videos, documents, and more for free*, as long as you have an internet connection.</li>
|
18 |
-
<li><b>SECURE:</b> You can enjoy end-to-end encryption for all your communications, which means that only you and your customers can read or listen to them.</li>
|
19 |
-
<li><b>POPULAR:</b> You can reach out to more than 2 billion users around the world who use WhatsApp every month.</li>
|
20 |
-
</ul>
|
21 |
-
<p>*Data charges may apply. Contact your provider for details.</p>
|
22 |
-
<p>How to download whatsapp business apk latest version for android<br />
|
23 |
-
Download whatsapp business apk terbaru 2023 with new features<br />
|
24 |
-
WhatsApp Business: A free app for small business owners<br />
|
25 |
-
Download whatsapp business apk terbaru and create a professional profile for your business<br />
|
26 |
-
WhatsApp Business vs WhatsApp Messenger: What's the difference and how to switch<br />
|
27 |
-
Download whatsapp business apk terbaru and use it with a landline or fixed number<br />
|
28 |
-
How to backup and restore your whatsapp business chats and media<br />
|
29 |
-
Download whatsapp business apk terbaru and communicate with your customers using messaging tools<br />
|
30 |
-
How to use whatsapp web with whatsapp business app<br />
|
31 |
-
Download whatsapp business apk terbaru and manage multiple whatsapp accounts on the same phone<br />
|
32 |
-
How to set up a catalog on whatsapp business app<br />
|
33 |
-
Download whatsapp business apk terbaru and use labels to organize your chats and contacts<br />
|
34 |
-
How to verify your business on whatsapp business app<br />
|
35 |
-
Download whatsapp business apk terbaru and integrate it with Facebook, Instagram, and other platforms<br />
|
36 |
-
How to use whatsapp business api for larger businesses<br />
|
37 |
-
Download whatsapp business apk terbaru and access analytics and insights on your performance<br />
|
38 |
-
How to create and use quick replies on whatsapp business app<br />
|
39 |
-
Download whatsapp business apk terbaru and enable dark mode for your app<br />
|
40 |
-
How to use stickers and emojis on whatsapp business app<br />
|
41 |
-
Download whatsapp business apk terbaru and join the beta program for early access to new features<br />
|
42 |
-
How to delete or deactivate your whatsapp business account<br />
|
43 |
-
Download whatsapp business apk terbaru and learn how to secure your account and chats<br />
|
44 |
-
How to use voice and video calls on whatsapp business app<br />
|
45 |
-
Download whatsapp business apk terbaru and send broadcast messages to your customers<br />
|
46 |
-
How to use status updates on whatsapp business app<br />
|
47 |
-
Download whatsapp business apk terbaru and customize your notifications settings<br />
|
48 |
-
How to use groups and group chats on whatsapp business app<br />
|
49 |
-
Download whatsapp business apk terbaru and share documents, photos, videos, and other files with your customers<br />
|
50 |
-
How to use live location on whatsapp business app<br />
|
51 |
-
Download whatsapp business apk terbaru and learn how to troubleshoot common issues and errors</p>
|
52 |
-
<h2>What is APK Terbaru?</h2>
|
53 |
-
<p>APK Terbaru is an Indonesian term that means "the latest APK". APK stands for Android Package Kit, which is a file format that contains all the elements needed to install an app on an Android device. <p>Downloading APK Terbaru means that you can get the most updated version of the app, which may have new features, bug fixes, or performance improvements. However, downloading APK files from unknown sources can also pose some risks and challenges, such as:</p>
|
54 |
-
<ul>
|
55 |
-
<li><b>MALWARE:</b> You may download a file that contains malicious software that can harm your device or steal your data.</li>
|
56 |
-
<li><b>COMPATIBILITY:</b> You may download a file that is not compatible with your device or operating system, which can cause errors or crashes.</li>
|
57 |
-
<li><b>LEGALITY:</b> You may download a file that violates the terms and conditions of the app developer or the app store, which can result in legal consequences or account suspension.</li>
|
58 |
-
</ul>
|
59 |
-
<p>Therefore, before you download any APK file from unknown sources, you should take some precautions, such as:</p>
|
60 |
-
<ul>
|
61 |
-
<li><b>CHECK THE SOURCE:</b> You should only download APK files from reputable and trusted websites that have positive reviews and ratings from other users.</li>
|
62 |
-
<li><b>CHECK THE FILE:</b> You should scan the APK file with an antivirus or malware detector before you install it on your device.</li>
|
63 |
-
<li><b>CHECK THE PERMISSIONS:</b> You should review the permissions that the APK file requests and only grant them if they are necessary and reasonable for the app's functionality.</li>
|
64 |
-
</ul>
|
65 |
-
<h2>How to Download WhatsApp Business APK Terbaru from Google Play Store</h2>
|
66 |
-
<p>The easiest and safest way to download WhatsApp Business APK Terbaru is from Google Play Store, which is the official app store for Android devices. To do so, you need to follow these steps:</p>
|
67 |
-
<ol>
|
68 |
-
<li><b>OPEN GOOGLE PLAY STORE:</b> On your Android device, tap on the Google Play Store icon to launch the app.</li>
|
69 |
-
<li><b>SEARCH FOR WHATSAPP BUSINESS:</b> In the search bar at the top of the screen, type "WhatsApp Business" and tap on the magnifying glass icon to start the search.</li>
|
70 |
-
<li><b>FIND AND TAP ON THE APP:</b> From the list of results, find and tap on the app that has the name "WhatsApp Business" and the logo that has a green chat bubble with a white letter B inside it.</li>
|
71 |
-
<li><b>TAP ON INSTALL:</b> On the app page, tap on the green button that says "Install" to start downloading and installing the app on your device.</li>
|
72 |
-
<li><b>WAIT FOR THE PROCESS TO COMPLETE:</b> Depending on your internet speed and device storage, it may take a few minutes for the app to download and install. You can see the progress bar on the screen.</li>
|
73 |
-
<li><b>TAP ON OPEN:</b> Once the app is installed, you can tap on the green button that says "Open" to launch the app and start using it.</li>
|
74 |
-
</ol>
|
75 |
-
<p>Here is a screenshot of what the app page looks like on Google Play Store:</p>
|
76 |
-
<img src="https://play-lh.googleusercontent.com/0-0-4GSURc0nI5xVatU5TBRnRVLg5zGWCUTzUqf1NlTnJYAwLzT6hA3FIZjL9f8Ew=w720-h310-rw" alt="WhatsApp Business on Google Play Store" width="720" height="310">
|
77 |
-
<h2>How to Download WhatsApp Business APK Terbaru from Other Sources</h2>
|
78 |
-
<p>If you cannot access Google Play Store or you want to try other sources for downloading WhatsApp Business APK Terbaru, you can also use some alternative websites that offer APK files for free. Some of these websites are:</p>
|
79 |
-
<ul>
|
80 |
-
<li><a href="https://apkpure.com/whatsapp-business/com.whatsapp.w4b">APKPure</a></li>
|
81 |
-
<li><a href="https://apkmirror.com/apk/whatsapp-inc/whatsapp-business/">APKMirror</a></li>
|
82 |
-
<li><a href="https://www.apkmonk.com/app/com.whatsapp.w4b/">APKMonk</a></li>
|
83 |
-
</ul>
|
84 |
-
<p>To download WhatsApp Business APK Terbaru from these websites, you need to follow these steps:</p>
|
85 |
-
<ol>
|
86 |
-
<li><b>OPEN THE WEBSITE:</b> On your Android device's browser, go to the website of your choice from the list above.</li>
|
87 |
-
<li><b>FIND AND TAP ON THE APP:</b> On the website's homepage, find and tap on the app that has the name "WhatsApp Business" and the logo that has a green chat bubble with a white letter B inside it. You can also use the search function if you cannot find it easily.</li>
|
88 |
-
<li><b>TAP ON DOWNLOAD:</b> On the app page RnRVLg5zGWCUTzUqf1NlTnJYAwLzT6hA3FIZjL9f8Ew=w720-h310-rw" alt="WhatsApp Business interface" width="720" height="310">
|
89 |
-
<h2>Conclusion</h2>
|
90 |
-
<p>WhatsApp Business is a great app for small and medium-sized businesses that want to communicate with their customers more effectively and grow their business. It has many features that can help you create a professional and personalized business presence on WhatsApp, send and receive messages, share media, and manage your customer interactions. To download WhatsApp Business APK Terbaru, you can use Google Play Store or other alternative sources, but you need to be careful and cautious when downloading APK files from unknown sources. You also need to set up and use the app properly for your business. We hope this article has helped you learn how to download WhatsApp Business APK Terbaru and how to use it for your business. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
|
91 |
-
<h2>FAQs</h2>
|
92 |
-
<p>Here are some frequently asked questions about WhatsApp Business APK Terbaru:</p>
|
93 |
-
<ol>
|
94 |
-
<li><b>Q: Is WhatsApp Business free?</b> <br> A: Yes, WhatsApp Business is free to download and use, as long as you have an internet connection. However, data charges may apply depending on your provider.</li>
|
95 |
-
<li><b>Q: Can I use WhatsApp Business and WhatsApp Messenger on the same phone?</b> <br> A: Yes, you can use both apps on the same phone, but each app must have its own unique phone number.</li>
|
96 |
-
<li><b>Q: How can I update WhatsApp Business APK Terbaru?</b> <br> A: You can update WhatsApp Business APK Terbaru by downloading and installing the latest version of the file from Google Play Store or other sources. You can also check for updates within the app by going to the menu icon at the top right corner of the screen and tapping on "Settings" > "Help" > "App info".</li>
|
97 |
-
<li><b>Q: How can I backup and restore my WhatsApp Business data?</b> <br> A: You can backup and restore your WhatsApp Business data by using Google Drive or a local backup. You can go to the menu icon at the top right corner of the screen and tap on "Settings" > "Chats" > "Chat backup" to choose your backup options. You can also restore your data when you reinstall the app or switch to a new device.</li>
|
98 |
-
<li><b>Q: How can I contact WhatsApp Business support?</b> <br> A: You can contact WhatsApp Business support by going to the menu icon at the top right corner of the screen and tapping on "Settings" > "Help" > "Contact us". You can also visit their official website or follow them on social media for more information and updates.</li>
|
99 |
-
</ol></p> 401be4b1e0<br />
|
100 |
-
<br />
|
101 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/components/chat-notification.tsx
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
import { useEffect } from 'react'
|
2 |
-
import Image from 'next/image'
|
3 |
-
|
4 |
-
import IconWarning from '@/assets/images/warning.svg'
|
5 |
-
import { ChatError, ErrorCode, ChatMessageModel } from '@/lib/bots/bing/types'
|
6 |
-
import { ExternalLink } from './external-link'
|
7 |
-
import { useBing } from '@/lib/hooks/use-bing'
|
8 |
-
|
9 |
-
export interface ChatNotificationProps extends Pick<ReturnType<typeof useBing>, 'bot'> {
|
10 |
-
message?: ChatMessageModel
|
11 |
-
}
|
12 |
-
|
13 |
-
function getAction(error: ChatError, reset: () => void) {
|
14 |
-
if (error.code === ErrorCode.THROTTLE_LIMIT) {
|
15 |
-
reset()
|
16 |
-
return (
|
17 |
-
<div>
|
18 |
-
你已达到每日最大发送消息次数,请<a href={`#dialog="settings"`}>更换账号</a>或隔一天后重试
|
19 |
-
</div>
|
20 |
-
)
|
21 |
-
}
|
22 |
-
if (error.code === ErrorCode.BING_FORBIDDEN) {
|
23 |
-
return (
|
24 |
-
<ExternalLink href="https://bing.com/new">
|
25 |
-
你的账号已在黑名单,请尝试更换账号及申请解封
|
26 |
-
</ExternalLink>
|
27 |
-
)
|
28 |
-
}
|
29 |
-
if (error.code === ErrorCode.CONVERSATION_LIMIT) {
|
30 |
-
return (
|
31 |
-
<div>
|
32 |
-
当前话题已中止,请点
|
33 |
-
<a href={`#dialog="reset"`}>重新开始</a>
|
34 |
-
开启新的对话
|
35 |
-
</div>
|
36 |
-
)
|
37 |
-
}
|
38 |
-
if (error.code === ErrorCode.BING_CAPTCHA) {
|
39 |
-
return (
|
40 |
-
<ExternalLink href="https://www.bing.com/turing/captcha/challenge">
|
41 |
-
点击通过人机验证
|
42 |
-
</ExternalLink>
|
43 |
-
)
|
44 |
-
}
|
45 |
-
if (error.code === ErrorCode.BING_UNAUTHORIZED) {
|
46 |
-
reset()
|
47 |
-
return (
|
48 |
-
<a href={`#dialog="settings"`}>没有获取到身份信息或身份信息失效,点此重新设置</a>
|
49 |
-
)
|
50 |
-
}
|
51 |
-
return error.message
|
52 |
-
}
|
53 |
-
|
54 |
-
export function ChatNotification({ message, bot }: ChatNotificationProps) {
|
55 |
-
useEffect(() => {
|
56 |
-
window.scrollBy(0, 2000)
|
57 |
-
}, [message])
|
58 |
-
|
59 |
-
if (!message?.error) return
|
60 |
-
|
61 |
-
return (
|
62 |
-
<div
|
63 |
-
className="notification-container"
|
64 |
-
>
|
65 |
-
<div className="bottom-notifications">
|
66 |
-
<div className="inline-type with-decorative-line">
|
67 |
-
<div className="text-container mt-1">
|
68 |
-
<div className="title inline-flex items-start">
|
69 |
-
<Image alt="error" src={IconWarning} width={20} className="mr-1 mt-1" />
|
70 |
-
{getAction(message.error, () => bot.resetConversation())}
|
71 |
-
</div>
|
72 |
-
</div>
|
73 |
-
</div>
|
74 |
-
</div>
|
75 |
-
</div>
|
76 |
-
)
|
77 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7hao/bingo/src/lib/storage.ts
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
import { getMany, set, del, clear } from 'idb-keyval';
|
2 |
-
|
3 |
-
export const Storage = {
|
4 |
-
async get(key: string | string[] | null): Promise<any> {
|
5 |
-
if (key === null) return null;
|
6 |
-
if (typeof key === 'string') {
|
7 |
-
key = [key]
|
8 |
-
}
|
9 |
-
const returnData: Record<string, any> = {}
|
10 |
-
const values = await getMany(key)
|
11 |
-
key.forEach((k, idx)=> {
|
12 |
-
returnData[k] = values[idx]
|
13 |
-
})
|
14 |
-
return returnData;
|
15 |
-
},
|
16 |
-
async set(object: any) {
|
17 |
-
for (let key of Object.keys(object)) {
|
18 |
-
await set(key, object[key])
|
19 |
-
}
|
20 |
-
},
|
21 |
-
async remove(key: string) {
|
22 |
-
return del(key);
|
23 |
-
},
|
24 |
-
async clear() {
|
25 |
-
return clear();
|
26 |
-
}
|
27 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/diff/diffusion.py
DELETED
@@ -1,334 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import random
|
3 |
-
from functools import partial
|
4 |
-
from inspect import isfunction
|
5 |
-
from pathlib import Path
|
6 |
-
import numpy as np
|
7 |
-
import torch
|
8 |
-
import torch.nn.functional as F
|
9 |
-
from torch import nn
|
10 |
-
from tqdm import tqdm
|
11 |
-
from einops import rearrange
|
12 |
-
|
13 |
-
from modules.fastspeech.fs2 import FastSpeech2
|
14 |
-
from modules.diffsinger_midi.fs2 import FastSpeech2MIDI
|
15 |
-
from utils.hparams import hparams
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
def exists(x):
|
20 |
-
return x is not None
|
21 |
-
|
22 |
-
|
23 |
-
def default(val, d):
|
24 |
-
if exists(val):
|
25 |
-
return val
|
26 |
-
return d() if isfunction(d) else d
|
27 |
-
|
28 |
-
|
29 |
-
def cycle(dl):
|
30 |
-
while True:
|
31 |
-
for data in dl:
|
32 |
-
yield data
|
33 |
-
|
34 |
-
|
35 |
-
def num_to_groups(num, divisor):
|
36 |
-
groups = num // divisor
|
37 |
-
remainder = num % divisor
|
38 |
-
arr = [divisor] * groups
|
39 |
-
if remainder > 0:
|
40 |
-
arr.append(remainder)
|
41 |
-
return arr
|
42 |
-
|
43 |
-
|
44 |
-
class Residual(nn.Module):
|
45 |
-
def __init__(self, fn):
|
46 |
-
super().__init__()
|
47 |
-
self.fn = fn
|
48 |
-
|
49 |
-
def forward(self, x, *args, **kwargs):
|
50 |
-
return self.fn(x, *args, **kwargs) + x
|
51 |
-
|
52 |
-
|
53 |
-
class SinusoidalPosEmb(nn.Module):
|
54 |
-
def __init__(self, dim):
|
55 |
-
super().__init__()
|
56 |
-
self.dim = dim
|
57 |
-
|
58 |
-
def forward(self, x):
|
59 |
-
device = x.device
|
60 |
-
half_dim = self.dim // 2
|
61 |
-
emb = math.log(10000) / (half_dim - 1)
|
62 |
-
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
|
63 |
-
emb = x[:, None] * emb[None, :]
|
64 |
-
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
|
65 |
-
return emb
|
66 |
-
|
67 |
-
|
68 |
-
class Mish(nn.Module):
|
69 |
-
def forward(self, x):
|
70 |
-
return x * torch.tanh(F.softplus(x))
|
71 |
-
|
72 |
-
|
73 |
-
class Upsample(nn.Module):
|
74 |
-
def __init__(self, dim):
|
75 |
-
super().__init__()
|
76 |
-
self.conv = nn.ConvTranspose2d(dim, dim, 4, 2, 1)
|
77 |
-
|
78 |
-
def forward(self, x):
|
79 |
-
return self.conv(x)
|
80 |
-
|
81 |
-
|
82 |
-
class Downsample(nn.Module):
|
83 |
-
def __init__(self, dim):
|
84 |
-
super().__init__()
|
85 |
-
self.conv = nn.Conv2d(dim, dim, 3, 2, 1)
|
86 |
-
|
87 |
-
def forward(self, x):
|
88 |
-
return self.conv(x)
|
89 |
-
|
90 |
-
|
91 |
-
class Rezero(nn.Module):
|
92 |
-
def __init__(self, fn):
|
93 |
-
super().__init__()
|
94 |
-
self.fn = fn
|
95 |
-
self.g = nn.Parameter(torch.zeros(1))
|
96 |
-
|
97 |
-
def forward(self, x):
|
98 |
-
return self.fn(x) * self.g
|
99 |
-
|
100 |
-
|
101 |
-
# building block modules
|
102 |
-
|
103 |
-
class Block(nn.Module):
|
104 |
-
def __init__(self, dim, dim_out, groups=8):
|
105 |
-
super().__init__()
|
106 |
-
self.block = nn.Sequential(
|
107 |
-
nn.Conv2d(dim, dim_out, 3, padding=1),
|
108 |
-
nn.GroupNorm(groups, dim_out),
|
109 |
-
Mish()
|
110 |
-
)
|
111 |
-
|
112 |
-
def forward(self, x):
|
113 |
-
return self.block(x)
|
114 |
-
|
115 |
-
|
116 |
-
class ResnetBlock(nn.Module):
|
117 |
-
def __init__(self, dim, dim_out, *, time_emb_dim, groups=8):
|
118 |
-
super().__init__()
|
119 |
-
self.mlp = nn.Sequential(
|
120 |
-
Mish(),
|
121 |
-
nn.Linear(time_emb_dim, dim_out)
|
122 |
-
)
|
123 |
-
|
124 |
-
self.block1 = Block(dim, dim_out)
|
125 |
-
self.block2 = Block(dim_out, dim_out)
|
126 |
-
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
|
127 |
-
|
128 |
-
def forward(self, x, time_emb):
|
129 |
-
h = self.block1(x)
|
130 |
-
h += self.mlp(time_emb)[:, :, None, None]
|
131 |
-
h = self.block2(h)
|
132 |
-
return h + self.res_conv(x)
|
133 |
-
|
134 |
-
|
135 |
-
class LinearAttention(nn.Module):
|
136 |
-
def __init__(self, dim, heads=4, dim_head=32):
|
137 |
-
super().__init__()
|
138 |
-
self.heads = heads
|
139 |
-
hidden_dim = dim_head * heads
|
140 |
-
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
|
141 |
-
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
|
142 |
-
|
143 |
-
def forward(self, x):
|
144 |
-
b, c, h, w = x.shape
|
145 |
-
qkv = self.to_qkv(x)
|
146 |
-
q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads=self.heads, qkv=3)
|
147 |
-
k = k.softmax(dim=-1)
|
148 |
-
context = torch.einsum('bhdn,bhen->bhde', k, v)
|
149 |
-
out = torch.einsum('bhde,bhdn->bhen', context, q)
|
150 |
-
out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w)
|
151 |
-
return self.to_out(out)
|
152 |
-
|
153 |
-
|
154 |
-
# gaussian diffusion trainer class
|
155 |
-
|
156 |
-
def extract(a, t, x_shape):
|
157 |
-
b, *_ = t.shape
|
158 |
-
out = a.gather(-1, t)
|
159 |
-
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
|
160 |
-
|
161 |
-
|
162 |
-
def noise_like(shape, device, repeat=False):
|
163 |
-
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
|
164 |
-
noise = lambda: torch.randn(shape, device=device)
|
165 |
-
return repeat_noise() if repeat else noise()
|
166 |
-
|
167 |
-
|
168 |
-
def cosine_beta_schedule(timesteps, s=0.008):
|
169 |
-
"""
|
170 |
-
cosine schedule
|
171 |
-
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
|
172 |
-
"""
|
173 |
-
steps = timesteps + 1
|
174 |
-
x = np.linspace(0, steps, steps)
|
175 |
-
alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2
|
176 |
-
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
|
177 |
-
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
|
178 |
-
return np.clip(betas, a_min=0, a_max=0.999)
|
179 |
-
|
180 |
-
|
181 |
-
class GaussianDiffusion(nn.Module):
|
182 |
-
def __init__(self, phone_encoder, out_dims, denoise_fn,
|
183 |
-
timesteps=1000, loss_type='l1', betas=None, spec_min=None, spec_max=None):
|
184 |
-
super().__init__()
|
185 |
-
self.denoise_fn = denoise_fn
|
186 |
-
if hparams.get('use_midi') is not None and hparams['use_midi']:
|
187 |
-
self.fs2 = FastSpeech2MIDI(phone_encoder, out_dims)
|
188 |
-
else:
|
189 |
-
self.fs2 = FastSpeech2(phone_encoder, out_dims)
|
190 |
-
self.fs2.decoder = None
|
191 |
-
self.mel_bins = out_dims
|
192 |
-
|
193 |
-
if exists(betas):
|
194 |
-
betas = betas.detach().cpu().numpy() if isinstance(betas, torch.Tensor) else betas
|
195 |
-
else:
|
196 |
-
betas = cosine_beta_schedule(timesteps)
|
197 |
-
|
198 |
-
alphas = 1. - betas
|
199 |
-
alphas_cumprod = np.cumprod(alphas, axis=0)
|
200 |
-
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
|
201 |
-
|
202 |
-
timesteps, = betas.shape
|
203 |
-
self.num_timesteps = int(timesteps)
|
204 |
-
self.loss_type = loss_type
|
205 |
-
|
206 |
-
to_torch = partial(torch.tensor, dtype=torch.float32)
|
207 |
-
|
208 |
-
self.register_buffer('betas', to_torch(betas))
|
209 |
-
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
210 |
-
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
|
211 |
-
|
212 |
-
# calculations for diffusion q(x_t | x_{t-1}) and others
|
213 |
-
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
|
214 |
-
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
|
215 |
-
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
|
216 |
-
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
|
217 |
-
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
|
218 |
-
|
219 |
-
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
220 |
-
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
|
221 |
-
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
|
222 |
-
self.register_buffer('posterior_variance', to_torch(posterior_variance))
|
223 |
-
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
|
224 |
-
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
|
225 |
-
self.register_buffer('posterior_mean_coef1', to_torch(
|
226 |
-
betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
|
227 |
-
self.register_buffer('posterior_mean_coef2', to_torch(
|
228 |
-
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
|
229 |
-
|
230 |
-
self.register_buffer('spec_min', torch.FloatTensor(spec_min)[None, None, :hparams['keep_bins']])
|
231 |
-
self.register_buffer('spec_max', torch.FloatTensor(spec_max)[None, None, :hparams['keep_bins']])
|
232 |
-
|
233 |
-
def q_mean_variance(self, x_start, t):
|
234 |
-
mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
|
235 |
-
variance = extract(1. - self.alphas_cumprod, t, x_start.shape)
|
236 |
-
log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape)
|
237 |
-
return mean, variance, log_variance
|
238 |
-
|
239 |
-
def predict_start_from_noise(self, x_t, t, noise):
|
240 |
-
return (
|
241 |
-
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
|
242 |
-
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
|
243 |
-
)
|
244 |
-
|
245 |
-
def q_posterior(self, x_start, x_t, t):
|
246 |
-
posterior_mean = (
|
247 |
-
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
|
248 |
-
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
|
249 |
-
)
|
250 |
-
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
|
251 |
-
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
|
252 |
-
return posterior_mean, posterior_variance, posterior_log_variance_clipped
|
253 |
-
|
254 |
-
def p_mean_variance(self, x, t, cond, clip_denoised: bool):
|
255 |
-
noise_pred = self.denoise_fn(x, t, cond=cond)
|
256 |
-
x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred)
|
257 |
-
|
258 |
-
if clip_denoised:
|
259 |
-
x_recon.clamp_(-1., 1.)
|
260 |
-
|
261 |
-
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
262 |
-
return model_mean, posterior_variance, posterior_log_variance
|
263 |
-
|
264 |
-
@torch.no_grad()
|
265 |
-
def p_sample(self, x, t, cond, clip_denoised=True, repeat_noise=False):
|
266 |
-
b, *_, device = *x.shape, x.device
|
267 |
-
model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, cond=cond, clip_denoised=clip_denoised)
|
268 |
-
noise = noise_like(x.shape, device, repeat_noise)
|
269 |
-
# no noise when t == 0
|
270 |
-
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
|
271 |
-
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
272 |
-
|
273 |
-
def q_sample(self, x_start, t, noise=None):
|
274 |
-
noise = default(noise, lambda: torch.randn_like(x_start))
|
275 |
-
return (
|
276 |
-
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
|
277 |
-
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
|
278 |
-
)
|
279 |
-
|
280 |
-
def p_losses(self, x_start, t, cond, noise=None, nonpadding=None):
|
281 |
-
noise = default(noise, lambda: torch.randn_like(x_start))
|
282 |
-
|
283 |
-
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
284 |
-
x_recon = self.denoise_fn(x_noisy, t, cond)
|
285 |
-
|
286 |
-
if self.loss_type == 'l1':
|
287 |
-
if nonpadding is not None:
|
288 |
-
loss = ((noise - x_recon).abs() * nonpadding.unsqueeze(1)).mean()
|
289 |
-
else:
|
290 |
-
# print('are you sure w/o nonpadding?')
|
291 |
-
loss = (noise - x_recon).abs().mean()
|
292 |
-
|
293 |
-
elif self.loss_type == 'l2':
|
294 |
-
loss = F.mse_loss(noise, x_recon)
|
295 |
-
else:
|
296 |
-
raise NotImplementedError()
|
297 |
-
|
298 |
-
return loss
|
299 |
-
|
300 |
-
def forward(self, txt_tokens, mel2ph=None, spk_embed=None,
|
301 |
-
ref_mels=None, f0=None, uv=None, energy=None, infer=False):
|
302 |
-
b, *_, device = *txt_tokens.shape, txt_tokens.device
|
303 |
-
ret = self.fs2(txt_tokens, mel2ph, spk_embed, ref_mels, f0, uv, energy,
|
304 |
-
skip_decoder=True, infer=infer)
|
305 |
-
cond = ret['decoder_inp'].transpose(1, 2)
|
306 |
-
if not infer:
|
307 |
-
t = torch.randint(0, self.num_timesteps, (b,), device=device).long()
|
308 |
-
x = ref_mels
|
309 |
-
x = self.norm_spec(x)
|
310 |
-
x = x.transpose(1, 2)[:, None, :, :] # [B, 1, M, T]
|
311 |
-
nonpadding = (mel2ph != 0).float()
|
312 |
-
ret['diff_loss'] = self.p_losses(x, t, cond, nonpadding=nonpadding)
|
313 |
-
else:
|
314 |
-
t = self.num_timesteps
|
315 |
-
shape = (cond.shape[0], 1, self.mel_bins, cond.shape[2])
|
316 |
-
x = torch.randn(shape, device=device)
|
317 |
-
for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t):
|
318 |
-
x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond)
|
319 |
-
x = x[:, 0].transpose(1, 2)
|
320 |
-
ret['mel_out'] = self.denorm_spec(x)
|
321 |
-
|
322 |
-
return ret
|
323 |
-
|
324 |
-
def norm_spec(self, x):
|
325 |
-
return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1
|
326 |
-
|
327 |
-
def denorm_spec(self, x):
|
328 |
-
return (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min
|
329 |
-
|
330 |
-
def cwt2f0_norm(self, cwt_spec, mean, std, mel2ph):
|
331 |
-
return self.fs2.cwt2f0_norm(cwt_spec, mean, std, mel2ph)
|
332 |
-
|
333 |
-
def out2mel(self, x):
|
334 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dropdownlist/methods/Methods.js
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
import ConfigurationMethods from './listpanel/ConfigurationMethods.js';
|
2 |
-
import OpenListPanel from './listpanel/OpenListPanel.js';
|
3 |
-
import CloseListPanel from './listpanel/CloseListPanel.js';
|
4 |
-
import ToggleListPanel from './listpanel/ToggleListPanel.js';
|
5 |
-
|
6 |
-
var Methods = {
|
7 |
-
openListPanel: OpenListPanel,
|
8 |
-
closeListPanel: CloseListPanel,
|
9 |
-
toggleListPanel: ToggleListPanel,
|
10 |
-
}
|
11 |
-
|
12 |
-
Object.assign(
|
13 |
-
Methods,
|
14 |
-
ConfigurationMethods,
|
15 |
-
);
|
16 |
-
|
17 |
-
export default Methods;
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlekseyKorshuk/gai-project/modules/about.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
|
4 |
-
def render_about():
|
5 |
-
gr.Markdown(
|
6 |
-
"# About\n"
|
7 |
-
"In today's fast-paced world, many individuals feel increasingly isolated and crave meaningful connections. "
|
8 |
-
"This project aims not just to produce a conversational model, but to address this societal issue by creating "
|
9 |
-
"diverse conversational companions. Instead of building just one ideal model for all scenarios, the objective "
|
10 |
-
"is to create a range of models suited to various conversation topics and environments. By mixing different "
|
11 |
-
"models, we aspire to achieve a dynamic and engaging experience similar to the TikTok feed. Our core aim is "
|
12 |
-
"to create a reusable pipeline for generating such datasets and ensuring they remain Safe For Work. Through "
|
13 |
-
"this, we hope to offer users not just a chatbot, but a digital companion tailored to their emotional and "
|
14 |
-
"conversational needs.\n\n"
|
15 |
-
"[]"
|
16 |
-
"(https://github.com/AlekseyKorshuk/gai-project)"
|
17 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aloento/9Nine-VITS/transforms.py
DELETED
@@ -1,191 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
from torch.nn import functional as F
|
4 |
-
|
5 |
-
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
6 |
-
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
7 |
-
DEFAULT_MIN_DERIVATIVE = 1e-3
|
8 |
-
|
9 |
-
|
10 |
-
def piecewise_rational_quadratic_transform(inputs,
|
11 |
-
unnormalized_widths,
|
12 |
-
unnormalized_heights,
|
13 |
-
unnormalized_derivatives,
|
14 |
-
inverse=False,
|
15 |
-
tails=None,
|
16 |
-
tail_bound=1.,
|
17 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
18 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
19 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
20 |
-
if tails is None:
|
21 |
-
spline_fn = rational_quadratic_spline
|
22 |
-
spline_kwargs = {}
|
23 |
-
else:
|
24 |
-
spline_fn = unconstrained_rational_quadratic_spline
|
25 |
-
spline_kwargs = {
|
26 |
-
'tails': tails,
|
27 |
-
'tail_bound': tail_bound
|
28 |
-
}
|
29 |
-
|
30 |
-
outputs, logabsdet = spline_fn(
|
31 |
-
inputs=inputs,
|
32 |
-
unnormalized_widths=unnormalized_widths,
|
33 |
-
unnormalized_heights=unnormalized_heights,
|
34 |
-
unnormalized_derivatives=unnormalized_derivatives,
|
35 |
-
inverse=inverse,
|
36 |
-
min_bin_width=min_bin_width,
|
37 |
-
min_bin_height=min_bin_height,
|
38 |
-
min_derivative=min_derivative,
|
39 |
-
**spline_kwargs
|
40 |
-
)
|
41 |
-
return outputs, logabsdet
|
42 |
-
|
43 |
-
|
44 |
-
def searchsorted(bin_locations, inputs, eps=1e-6):
|
45 |
-
bin_locations[..., -1] += eps
|
46 |
-
return torch.sum(
|
47 |
-
inputs[..., None] >= bin_locations,
|
48 |
-
dim=-1
|
49 |
-
) - 1
|
50 |
-
|
51 |
-
|
52 |
-
def unconstrained_rational_quadratic_spline(inputs,
|
53 |
-
unnormalized_widths,
|
54 |
-
unnormalized_heights,
|
55 |
-
unnormalized_derivatives,
|
56 |
-
inverse=False,
|
57 |
-
tails='linear',
|
58 |
-
tail_bound=1.,
|
59 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
60 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
61 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
62 |
-
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
63 |
-
outside_interval_mask = ~inside_interval_mask
|
64 |
-
|
65 |
-
outputs = torch.zeros_like(inputs)
|
66 |
-
logabsdet = torch.zeros_like(inputs)
|
67 |
-
|
68 |
-
if tails == 'linear':
|
69 |
-
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
70 |
-
constant = np.log(np.exp(1 - min_derivative) - 1)
|
71 |
-
unnormalized_derivatives[..., 0] = constant
|
72 |
-
unnormalized_derivatives[..., -1] = constant
|
73 |
-
|
74 |
-
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
75 |
-
logabsdet[outside_interval_mask] = 0
|
76 |
-
else:
|
77 |
-
raise RuntimeError('{} tails are not implemented.'.format(tails))
|
78 |
-
|
79 |
-
outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
|
80 |
-
inputs=inputs[inside_interval_mask],
|
81 |
-
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
82 |
-
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
83 |
-
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
84 |
-
inverse=inverse,
|
85 |
-
left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
|
86 |
-
min_bin_width=min_bin_width,
|
87 |
-
min_bin_height=min_bin_height,
|
88 |
-
min_derivative=min_derivative
|
89 |
-
)
|
90 |
-
|
91 |
-
return outputs, logabsdet
|
92 |
-
|
93 |
-
|
94 |
-
def rational_quadratic_spline(inputs,
|
95 |
-
unnormalized_widths,
|
96 |
-
unnormalized_heights,
|
97 |
-
unnormalized_derivatives,
|
98 |
-
inverse=False,
|
99 |
-
left=0., right=1., bottom=0., top=1.,
|
100 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
101 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
102 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
103 |
-
if torch.min(inputs) < left or torch.max(inputs) > right:
|
104 |
-
raise ValueError('Input to a transform is not within its domain')
|
105 |
-
|
106 |
-
num_bins = unnormalized_widths.shape[-1]
|
107 |
-
|
108 |
-
if min_bin_width * num_bins > 1.0:
|
109 |
-
raise ValueError('Minimal bin width too large for the number of bins')
|
110 |
-
if min_bin_height * num_bins > 1.0:
|
111 |
-
raise ValueError('Minimal bin height too large for the number of bins')
|
112 |
-
|
113 |
-
widths = F.softmax(unnormalized_widths, dim=-1)
|
114 |
-
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
115 |
-
cumwidths = torch.cumsum(widths, dim=-1)
|
116 |
-
cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
|
117 |
-
cumwidths = (right - left) * cumwidths + left
|
118 |
-
cumwidths[..., 0] = left
|
119 |
-
cumwidths[..., -1] = right
|
120 |
-
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
121 |
-
|
122 |
-
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
123 |
-
|
124 |
-
heights = F.softmax(unnormalized_heights, dim=-1)
|
125 |
-
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
126 |
-
cumheights = torch.cumsum(heights, dim=-1)
|
127 |
-
cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
|
128 |
-
cumheights = (top - bottom) * cumheights + bottom
|
129 |
-
cumheights[..., 0] = bottom
|
130 |
-
cumheights[..., -1] = top
|
131 |
-
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
132 |
-
|
133 |
-
if inverse:
|
134 |
-
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
135 |
-
else:
|
136 |
-
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
137 |
-
|
138 |
-
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
139 |
-
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
140 |
-
|
141 |
-
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
142 |
-
delta = heights / widths
|
143 |
-
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
144 |
-
|
145 |
-
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
146 |
-
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
147 |
-
|
148 |
-
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
149 |
-
|
150 |
-
if inverse:
|
151 |
-
a = (((inputs - input_cumheights) * (input_derivatives
|
152 |
-
+ input_derivatives_plus_one
|
153 |
-
- 2 * input_delta)
|
154 |
-
+ input_heights * (input_delta - input_derivatives)))
|
155 |
-
b = (input_heights * input_derivatives
|
156 |
-
- (inputs - input_cumheights) * (input_derivatives
|
157 |
-
+ input_derivatives_plus_one
|
158 |
-
- 2 * input_delta))
|
159 |
-
c = - input_delta * (inputs - input_cumheights)
|
160 |
-
|
161 |
-
discriminant = b.pow(2) - 4 * a * c
|
162 |
-
assert (discriminant >= 0).all()
|
163 |
-
|
164 |
-
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
165 |
-
outputs = root * input_bin_widths + input_cumwidths
|
166 |
-
|
167 |
-
theta_one_minus_theta = root * (1 - root)
|
168 |
-
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
169 |
-
* theta_one_minus_theta)
|
170 |
-
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
|
171 |
-
+ 2 * input_delta * theta_one_minus_theta
|
172 |
-
+ input_derivatives * (1 - root).pow(2))
|
173 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
174 |
-
|
175 |
-
return outputs, -logabsdet
|
176 |
-
else:
|
177 |
-
theta = (inputs - input_cumwidths) / input_bin_widths
|
178 |
-
theta_one_minus_theta = theta * (1 - theta)
|
179 |
-
|
180 |
-
numerator = input_heights * (input_delta * theta.pow(2)
|
181 |
-
+ input_derivatives * theta_one_minus_theta)
|
182 |
-
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
183 |
-
* theta_one_minus_theta)
|
184 |
-
outputs = input_cumheights + numerator / denominator
|
185 |
-
|
186 |
-
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
|
187 |
-
+ 2 * input_delta * theta_one_minus_theta
|
188 |
-
+ input_derivatives * (1 - theta).pow(2))
|
189 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
190 |
-
|
191 |
-
return outputs, logabsdet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/loaders.md
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Loaders
|
14 |
-
|
15 |
-
Adapters (textual inversion, LoRA, hypernetworks) allow you to modify a diffusion model to generate images in a specific style without training or finetuning the entire model. The adapter weights are typically only a tiny fraction of the pretrained model's which making them very portable. 🤗 Diffusers provides an easy-to-use `LoaderMixin` API to load adapter weights.
|
16 |
-
|
17 |
-
<Tip warning={true}>
|
18 |
-
|
19 |
-
🧪 The `LoaderMixins` are highly experimental and prone to future changes. To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `huggingface-cli login`.
|
20 |
-
|
21 |
-
</Tip>
|
22 |
-
|
23 |
-
## UNet2DConditionLoadersMixin
|
24 |
-
|
25 |
-
[[autodoc]] loaders.UNet2DConditionLoadersMixin
|
26 |
-
|
27 |
-
## TextualInversionLoaderMixin
|
28 |
-
|
29 |
-
[[autodoc]] loaders.TextualInversionLoaderMixin
|
30 |
-
|
31 |
-
## LoraLoaderMixin
|
32 |
-
|
33 |
-
[[autodoc]] loaders.LoraLoaderMixin
|
34 |
-
|
35 |
-
## FromSingleFileMixin
|
36 |
-
|
37 |
-
[[autodoc]] loaders.FromSingleFileMixin
|
38 |
-
|
39 |
-
## FromOriginalControlnetMixin
|
40 |
-
|
41 |
-
[[autodoc]] loaders.FromOriginalControlnetMixin
|
42 |
-
|
43 |
-
## FromOriginalVAEMixin
|
44 |
-
|
45 |
-
[[autodoc]] loaders.FromOriginalVAEMixin
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_dance_diffusion_to_diffusers.py
DELETED
@@ -1,339 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
import argparse
|
3 |
-
import math
|
4 |
-
import os
|
5 |
-
from copy import deepcopy
|
6 |
-
|
7 |
-
import torch
|
8 |
-
from audio_diffusion.models import DiffusionAttnUnet1D
|
9 |
-
from diffusion import sampling
|
10 |
-
from torch import nn
|
11 |
-
|
12 |
-
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel
|
13 |
-
|
14 |
-
|
15 |
-
MODELS_MAP = {
|
16 |
-
"gwf-440k": {
|
17 |
-
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
|
18 |
-
"sample_rate": 48000,
|
19 |
-
"sample_size": 65536,
|
20 |
-
},
|
21 |
-
"jmann-small-190k": {
|
22 |
-
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
|
23 |
-
"sample_rate": 48000,
|
24 |
-
"sample_size": 65536,
|
25 |
-
},
|
26 |
-
"jmann-large-580k": {
|
27 |
-
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
|
28 |
-
"sample_rate": 48000,
|
29 |
-
"sample_size": 131072,
|
30 |
-
},
|
31 |
-
"maestro-uncond-150k": {
|
32 |
-
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
|
33 |
-
"sample_rate": 16000,
|
34 |
-
"sample_size": 65536,
|
35 |
-
},
|
36 |
-
"unlocked-uncond-250k": {
|
37 |
-
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
|
38 |
-
"sample_rate": 16000,
|
39 |
-
"sample_size": 65536,
|
40 |
-
},
|
41 |
-
"honk-140k": {
|
42 |
-
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
|
43 |
-
"sample_rate": 16000,
|
44 |
-
"sample_size": 65536,
|
45 |
-
},
|
46 |
-
}
|
47 |
-
|
48 |
-
|
49 |
-
def alpha_sigma_to_t(alpha, sigma):
|
50 |
-
"""Returns a timestep, given the scaling factors for the clean image and for
|
51 |
-
the noise."""
|
52 |
-
return torch.atan2(sigma, alpha) / math.pi * 2
|
53 |
-
|
54 |
-
|
55 |
-
def get_crash_schedule(t):
|
56 |
-
sigma = torch.sin(t * math.pi / 2) ** 2
|
57 |
-
alpha = (1 - sigma**2) ** 0.5
|
58 |
-
return alpha_sigma_to_t(alpha, sigma)
|
59 |
-
|
60 |
-
|
61 |
-
class Object(object):
|
62 |
-
pass
|
63 |
-
|
64 |
-
|
65 |
-
class DiffusionUncond(nn.Module):
|
66 |
-
def __init__(self, global_args):
|
67 |
-
super().__init__()
|
68 |
-
|
69 |
-
self.diffusion = DiffusionAttnUnet1D(global_args, n_attn_layers=4)
|
70 |
-
self.diffusion_ema = deepcopy(self.diffusion)
|
71 |
-
self.rng = torch.quasirandom.SobolEngine(1, scramble=True)
|
72 |
-
|
73 |
-
|
74 |
-
def download(model_name):
|
75 |
-
url = MODELS_MAP[model_name]["url"]
|
76 |
-
os.system(f"wget {url} ./")
|
77 |
-
|
78 |
-
return f"./{model_name}.ckpt"
|
79 |
-
|
80 |
-
|
81 |
-
DOWN_NUM_TO_LAYER = {
|
82 |
-
"1": "resnets.0",
|
83 |
-
"2": "attentions.0",
|
84 |
-
"3": "resnets.1",
|
85 |
-
"4": "attentions.1",
|
86 |
-
"5": "resnets.2",
|
87 |
-
"6": "attentions.2",
|
88 |
-
}
|
89 |
-
UP_NUM_TO_LAYER = {
|
90 |
-
"8": "resnets.0",
|
91 |
-
"9": "attentions.0",
|
92 |
-
"10": "resnets.1",
|
93 |
-
"11": "attentions.1",
|
94 |
-
"12": "resnets.2",
|
95 |
-
"13": "attentions.2",
|
96 |
-
}
|
97 |
-
MID_NUM_TO_LAYER = {
|
98 |
-
"1": "resnets.0",
|
99 |
-
"2": "attentions.0",
|
100 |
-
"3": "resnets.1",
|
101 |
-
"4": "attentions.1",
|
102 |
-
"5": "resnets.2",
|
103 |
-
"6": "attentions.2",
|
104 |
-
"8": "resnets.3",
|
105 |
-
"9": "attentions.3",
|
106 |
-
"10": "resnets.4",
|
107 |
-
"11": "attentions.4",
|
108 |
-
"12": "resnets.5",
|
109 |
-
"13": "attentions.5",
|
110 |
-
}
|
111 |
-
DEPTH_0_TO_LAYER = {
|
112 |
-
"0": "resnets.0",
|
113 |
-
"1": "resnets.1",
|
114 |
-
"2": "resnets.2",
|
115 |
-
"4": "resnets.0",
|
116 |
-
"5": "resnets.1",
|
117 |
-
"6": "resnets.2",
|
118 |
-
}
|
119 |
-
|
120 |
-
RES_CONV_MAP = {
|
121 |
-
"skip": "conv_skip",
|
122 |
-
"main.0": "conv_1",
|
123 |
-
"main.1": "group_norm_1",
|
124 |
-
"main.3": "conv_2",
|
125 |
-
"main.4": "group_norm_2",
|
126 |
-
}
|
127 |
-
|
128 |
-
ATTN_MAP = {
|
129 |
-
"norm": "group_norm",
|
130 |
-
"qkv_proj": ["query", "key", "value"],
|
131 |
-
"out_proj": ["proj_attn"],
|
132 |
-
}
|
133 |
-
|
134 |
-
|
135 |
-
def convert_resconv_naming(name):
|
136 |
-
if name.startswith("skip"):
|
137 |
-
return name.replace("skip", RES_CONV_MAP["skip"])
|
138 |
-
|
139 |
-
# name has to be of format main.{digit}
|
140 |
-
if not name.startswith("main."):
|
141 |
-
raise ValueError(f"ResConvBlock error with {name}")
|
142 |
-
|
143 |
-
return name.replace(name[:6], RES_CONV_MAP[name[:6]])
|
144 |
-
|
145 |
-
|
146 |
-
def convert_attn_naming(name):
|
147 |
-
for key, value in ATTN_MAP.items():
|
148 |
-
if name.startswith(key) and not isinstance(value, list):
|
149 |
-
return name.replace(key, value)
|
150 |
-
elif name.startswith(key):
|
151 |
-
return [name.replace(key, v) for v in value]
|
152 |
-
raise ValueError(f"Attn error with {name}")
|
153 |
-
|
154 |
-
|
155 |
-
def rename(input_string, max_depth=13):
|
156 |
-
string = input_string
|
157 |
-
|
158 |
-
if string.split(".")[0] == "timestep_embed":
|
159 |
-
return string.replace("timestep_embed", "time_proj")
|
160 |
-
|
161 |
-
depth = 0
|
162 |
-
if string.startswith("net.3."):
|
163 |
-
depth += 1
|
164 |
-
string = string[6:]
|
165 |
-
elif string.startswith("net."):
|
166 |
-
string = string[4:]
|
167 |
-
|
168 |
-
while string.startswith("main.7."):
|
169 |
-
depth += 1
|
170 |
-
string = string[7:]
|
171 |
-
|
172 |
-
if string.startswith("main."):
|
173 |
-
string = string[5:]
|
174 |
-
|
175 |
-
# mid block
|
176 |
-
if string[:2].isdigit():
|
177 |
-
layer_num = string[:2]
|
178 |
-
string_left = string[2:]
|
179 |
-
else:
|
180 |
-
layer_num = string[0]
|
181 |
-
string_left = string[1:]
|
182 |
-
|
183 |
-
if depth == max_depth:
|
184 |
-
new_layer = MID_NUM_TO_LAYER[layer_num]
|
185 |
-
prefix = "mid_block"
|
186 |
-
elif depth > 0 and int(layer_num) < 7:
|
187 |
-
new_layer = DOWN_NUM_TO_LAYER[layer_num]
|
188 |
-
prefix = f"down_blocks.{depth}"
|
189 |
-
elif depth > 0 and int(layer_num) > 7:
|
190 |
-
new_layer = UP_NUM_TO_LAYER[layer_num]
|
191 |
-
prefix = f"up_blocks.{max_depth - depth - 1}"
|
192 |
-
elif depth == 0:
|
193 |
-
new_layer = DEPTH_0_TO_LAYER[layer_num]
|
194 |
-
prefix = f"up_blocks.{max_depth - 1}" if int(layer_num) > 3 else "down_blocks.0"
|
195 |
-
|
196 |
-
if not string_left.startswith("."):
|
197 |
-
raise ValueError(f"Naming error with {input_string} and string_left: {string_left}.")
|
198 |
-
|
199 |
-
string_left = string_left[1:]
|
200 |
-
|
201 |
-
if "resnets" in new_layer:
|
202 |
-
string_left = convert_resconv_naming(string_left)
|
203 |
-
elif "attentions" in new_layer:
|
204 |
-
new_string_left = convert_attn_naming(string_left)
|
205 |
-
string_left = new_string_left
|
206 |
-
|
207 |
-
if not isinstance(string_left, list):
|
208 |
-
new_string = prefix + "." + new_layer + "." + string_left
|
209 |
-
else:
|
210 |
-
new_string = [prefix + "." + new_layer + "." + s for s in string_left]
|
211 |
-
return new_string
|
212 |
-
|
213 |
-
|
214 |
-
def rename_orig_weights(state_dict):
|
215 |
-
new_state_dict = {}
|
216 |
-
for k, v in state_dict.items():
|
217 |
-
if k.endswith("kernel"):
|
218 |
-
# up- and downsample layers, don't have trainable weights
|
219 |
-
continue
|
220 |
-
|
221 |
-
new_k = rename(k)
|
222 |
-
|
223 |
-
# check if we need to transform from Conv => Linear for attention
|
224 |
-
if isinstance(new_k, list):
|
225 |
-
new_state_dict = transform_conv_attns(new_state_dict, new_k, v)
|
226 |
-
else:
|
227 |
-
new_state_dict[new_k] = v
|
228 |
-
|
229 |
-
return new_state_dict
|
230 |
-
|
231 |
-
|
232 |
-
def transform_conv_attns(new_state_dict, new_k, v):
|
233 |
-
if len(new_k) == 1:
|
234 |
-
if len(v.shape) == 3:
|
235 |
-
# weight
|
236 |
-
new_state_dict[new_k[0]] = v[:, :, 0]
|
237 |
-
else:
|
238 |
-
# bias
|
239 |
-
new_state_dict[new_k[0]] = v
|
240 |
-
else:
|
241 |
-
# qkv matrices
|
242 |
-
trippled_shape = v.shape[0]
|
243 |
-
single_shape = trippled_shape // 3
|
244 |
-
for i in range(3):
|
245 |
-
if len(v.shape) == 3:
|
246 |
-
new_state_dict[new_k[i]] = v[i * single_shape : (i + 1) * single_shape, :, 0]
|
247 |
-
else:
|
248 |
-
new_state_dict[new_k[i]] = v[i * single_shape : (i + 1) * single_shape]
|
249 |
-
return new_state_dict
|
250 |
-
|
251 |
-
|
252 |
-
def main(args):
|
253 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
254 |
-
|
255 |
-
model_name = args.model_path.split("/")[-1].split(".")[0]
|
256 |
-
if not os.path.isfile(args.model_path):
|
257 |
-
assert (
|
258 |
-
model_name == args.model_path
|
259 |
-
), f"Make sure to provide one of the official model names {MODELS_MAP.keys()}"
|
260 |
-
args.model_path = download(model_name)
|
261 |
-
|
262 |
-
sample_rate = MODELS_MAP[model_name]["sample_rate"]
|
263 |
-
sample_size = MODELS_MAP[model_name]["sample_size"]
|
264 |
-
|
265 |
-
config = Object()
|
266 |
-
config.sample_size = sample_size
|
267 |
-
config.sample_rate = sample_rate
|
268 |
-
config.latent_dim = 0
|
269 |
-
|
270 |
-
diffusers_model = UNet1DModel(sample_size=sample_size, sample_rate=sample_rate)
|
271 |
-
diffusers_state_dict = diffusers_model.state_dict()
|
272 |
-
|
273 |
-
orig_model = DiffusionUncond(config)
|
274 |
-
orig_model.load_state_dict(torch.load(args.model_path, map_location=device)["state_dict"])
|
275 |
-
orig_model = orig_model.diffusion_ema.eval()
|
276 |
-
orig_model_state_dict = orig_model.state_dict()
|
277 |
-
renamed_state_dict = rename_orig_weights(orig_model_state_dict)
|
278 |
-
|
279 |
-
renamed_minus_diffusers = set(renamed_state_dict.keys()) - set(diffusers_state_dict.keys())
|
280 |
-
diffusers_minus_renamed = set(diffusers_state_dict.keys()) - set(renamed_state_dict.keys())
|
281 |
-
|
282 |
-
assert len(renamed_minus_diffusers) == 0, f"Problem with {renamed_minus_diffusers}"
|
283 |
-
assert all(k.endswith("kernel") for k in list(diffusers_minus_renamed)), f"Problem with {diffusers_minus_renamed}"
|
284 |
-
|
285 |
-
for key, value in renamed_state_dict.items():
|
286 |
-
assert (
|
287 |
-
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
|
288 |
-
), f"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"
|
289 |
-
if key == "time_proj.weight":
|
290 |
-
value = value.squeeze()
|
291 |
-
|
292 |
-
diffusers_state_dict[key] = value
|
293 |
-
|
294 |
-
diffusers_model.load_state_dict(diffusers_state_dict)
|
295 |
-
|
296 |
-
steps = 100
|
297 |
-
seed = 33
|
298 |
-
|
299 |
-
diffusers_scheduler = IPNDMScheduler(num_train_timesteps=steps)
|
300 |
-
|
301 |
-
generator = torch.manual_seed(seed)
|
302 |
-
noise = torch.randn([1, 2, config.sample_size], generator=generator).to(device)
|
303 |
-
|
304 |
-
t = torch.linspace(1, 0, steps + 1, device=device)[:-1]
|
305 |
-
step_list = get_crash_schedule(t)
|
306 |
-
|
307 |
-
pipe = DanceDiffusionPipeline(unet=diffusers_model, scheduler=diffusers_scheduler)
|
308 |
-
|
309 |
-
generator = torch.manual_seed(33)
|
310 |
-
audio = pipe(num_inference_steps=steps, generator=generator).audios
|
311 |
-
|
312 |
-
generated = sampling.iplms_sample(orig_model, noise, step_list, {})
|
313 |
-
generated = generated.clamp(-1, 1)
|
314 |
-
|
315 |
-
diff_sum = (generated - audio).abs().sum()
|
316 |
-
diff_max = (generated - audio).abs().max()
|
317 |
-
|
318 |
-
if args.save:
|
319 |
-
pipe.save_pretrained(args.checkpoint_path)
|
320 |
-
|
321 |
-
print("Diff sum", diff_sum)
|
322 |
-
print("Diff max", diff_max)
|
323 |
-
|
324 |
-
assert diff_max < 1e-3, f"Diff max: {diff_max} is too much :-/"
|
325 |
-
|
326 |
-
print(f"Conversion for {model_name} successful!")
|
327 |
-
|
328 |
-
|
329 |
-
if __name__ == "__main__":
|
330 |
-
parser = argparse.ArgumentParser()
|
331 |
-
|
332 |
-
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
|
333 |
-
parser.add_argument(
|
334 |
-
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
|
335 |
-
)
|
336 |
-
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
|
337 |
-
args = parser.parse_args()
|
338 |
-
|
339 |
-
main(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
DELETED
@@ -1,713 +0,0 @@
|
|
1 |
-
import inspect
|
2 |
-
import warnings
|
3 |
-
from itertools import repeat
|
4 |
-
from typing import Callable, List, Optional, Union
|
5 |
-
|
6 |
-
import torch
|
7 |
-
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
8 |
-
|
9 |
-
from ...image_processor import VaeImageProcessor
|
10 |
-
from ...models import AutoencoderKL, UNet2DConditionModel
|
11 |
-
from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
12 |
-
from ...schedulers import KarrasDiffusionSchedulers
|
13 |
-
from ...utils import logging, randn_tensor
|
14 |
-
from ..pipeline_utils import DiffusionPipeline
|
15 |
-
from . import SemanticStableDiffusionPipelineOutput
|
16 |
-
|
17 |
-
|
18 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
19 |
-
|
20 |
-
|
21 |
-
class SemanticStableDiffusionPipeline(DiffusionPipeline):
|
22 |
-
r"""
|
23 |
-
Pipeline for text-to-image generation using Stable Diffusion with latent editing.
|
24 |
-
|
25 |
-
This model inherits from [`DiffusionPipeline`] and builds on the [`StableDiffusionPipeline`]. Check the superclass
|
26 |
-
documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular
|
27 |
-
device, etc.).
|
28 |
-
|
29 |
-
Args:
|
30 |
-
vae ([`AutoencoderKL`]):
|
31 |
-
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
32 |
-
text_encoder ([`~transformers.CLIPTextModel`]):
|
33 |
-
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
34 |
-
tokenizer ([`~transformers.CLIPTokenizer`]):
|
35 |
-
A `CLIPTokenizer` to tokenize text.
|
36 |
-
unet ([`UNet2DConditionModel`]):
|
37 |
-
A `UNet2DConditionModel` to denoise the encoded image latents.
|
38 |
-
scheduler ([`SchedulerMixin`]):
|
39 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
40 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
41 |
-
safety_checker ([`Q16SafetyChecker`]):
|
42 |
-
Classification module that estimates whether generated images could be considered offensive or harmful.
|
43 |
-
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
|
44 |
-
about a model's potential harms.
|
45 |
-
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
46 |
-
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
47 |
-
"""
|
48 |
-
|
49 |
-
_optional_components = ["safety_checker", "feature_extractor"]
|
50 |
-
|
51 |
-
def __init__(
|
52 |
-
self,
|
53 |
-
vae: AutoencoderKL,
|
54 |
-
text_encoder: CLIPTextModel,
|
55 |
-
tokenizer: CLIPTokenizer,
|
56 |
-
unet: UNet2DConditionModel,
|
57 |
-
scheduler: KarrasDiffusionSchedulers,
|
58 |
-
safety_checker: StableDiffusionSafetyChecker,
|
59 |
-
feature_extractor: CLIPImageProcessor,
|
60 |
-
requires_safety_checker: bool = True,
|
61 |
-
):
|
62 |
-
super().__init__()
|
63 |
-
|
64 |
-
if safety_checker is None and requires_safety_checker:
|
65 |
-
logger.warning(
|
66 |
-
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
67 |
-
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
68 |
-
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
69 |
-
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
70 |
-
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
71 |
-
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
72 |
-
)
|
73 |
-
|
74 |
-
if safety_checker is not None and feature_extractor is None:
|
75 |
-
raise ValueError(
|
76 |
-
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
77 |
-
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
78 |
-
)
|
79 |
-
|
80 |
-
self.register_modules(
|
81 |
-
vae=vae,
|
82 |
-
text_encoder=text_encoder,
|
83 |
-
tokenizer=tokenizer,
|
84 |
-
unet=unet,
|
85 |
-
scheduler=scheduler,
|
86 |
-
safety_checker=safety_checker,
|
87 |
-
feature_extractor=feature_extractor,
|
88 |
-
)
|
89 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
90 |
-
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
91 |
-
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
92 |
-
|
93 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
94 |
-
def run_safety_checker(self, image, device, dtype):
|
95 |
-
if self.safety_checker is None:
|
96 |
-
has_nsfw_concept = None
|
97 |
-
else:
|
98 |
-
if torch.is_tensor(image):
|
99 |
-
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
100 |
-
else:
|
101 |
-
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
102 |
-
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
103 |
-
image, has_nsfw_concept = self.safety_checker(
|
104 |
-
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
105 |
-
)
|
106 |
-
return image, has_nsfw_concept
|
107 |
-
|
108 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
|
109 |
-
def decode_latents(self, latents):
|
110 |
-
warnings.warn(
|
111 |
-
"The decode_latents method is deprecated and will be removed in a future version. Please"
|
112 |
-
" use VaeImageProcessor instead",
|
113 |
-
FutureWarning,
|
114 |
-
)
|
115 |
-
latents = 1 / self.vae.config.scaling_factor * latents
|
116 |
-
image = self.vae.decode(latents, return_dict=False)[0]
|
117 |
-
image = (image / 2 + 0.5).clamp(0, 1)
|
118 |
-
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
119 |
-
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
120 |
-
return image
|
121 |
-
|
122 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
123 |
-
def prepare_extra_step_kwargs(self, generator, eta):
|
124 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
125 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
126 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
127 |
-
# and should be between [0, 1]
|
128 |
-
|
129 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
130 |
-
extra_step_kwargs = {}
|
131 |
-
if accepts_eta:
|
132 |
-
extra_step_kwargs["eta"] = eta
|
133 |
-
|
134 |
-
# check if the scheduler accepts generator
|
135 |
-
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
136 |
-
if accepts_generator:
|
137 |
-
extra_step_kwargs["generator"] = generator
|
138 |
-
return extra_step_kwargs
|
139 |
-
|
140 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
|
141 |
-
def check_inputs(
|
142 |
-
self,
|
143 |
-
prompt,
|
144 |
-
height,
|
145 |
-
width,
|
146 |
-
callback_steps,
|
147 |
-
negative_prompt=None,
|
148 |
-
prompt_embeds=None,
|
149 |
-
negative_prompt_embeds=None,
|
150 |
-
):
|
151 |
-
if height % 8 != 0 or width % 8 != 0:
|
152 |
-
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
153 |
-
|
154 |
-
if (callback_steps is None) or (
|
155 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
156 |
-
):
|
157 |
-
raise ValueError(
|
158 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
159 |
-
f" {type(callback_steps)}."
|
160 |
-
)
|
161 |
-
|
162 |
-
if prompt is not None and prompt_embeds is not None:
|
163 |
-
raise ValueError(
|
164 |
-
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
165 |
-
" only forward one of the two."
|
166 |
-
)
|
167 |
-
elif prompt is None and prompt_embeds is None:
|
168 |
-
raise ValueError(
|
169 |
-
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
170 |
-
)
|
171 |
-
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
172 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
173 |
-
|
174 |
-
if negative_prompt is not None and negative_prompt_embeds is not None:
|
175 |
-
raise ValueError(
|
176 |
-
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
177 |
-
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
178 |
-
)
|
179 |
-
|
180 |
-
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
181 |
-
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
182 |
-
raise ValueError(
|
183 |
-
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
184 |
-
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
185 |
-
f" {negative_prompt_embeds.shape}."
|
186 |
-
)
|
187 |
-
|
188 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
189 |
-
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
190 |
-
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
191 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
192 |
-
raise ValueError(
|
193 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
194 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
195 |
-
)
|
196 |
-
|
197 |
-
if latents is None:
|
198 |
-
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
199 |
-
else:
|
200 |
-
latents = latents.to(device)
|
201 |
-
|
202 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
203 |
-
latents = latents * self.scheduler.init_noise_sigma
|
204 |
-
return latents
|
205 |
-
|
206 |
-
@torch.no_grad()
|
207 |
-
def __call__(
|
208 |
-
self,
|
209 |
-
prompt: Union[str, List[str]],
|
210 |
-
height: Optional[int] = None,
|
211 |
-
width: Optional[int] = None,
|
212 |
-
num_inference_steps: int = 50,
|
213 |
-
guidance_scale: float = 7.5,
|
214 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
215 |
-
num_images_per_prompt: int = 1,
|
216 |
-
eta: float = 0.0,
|
217 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
218 |
-
latents: Optional[torch.FloatTensor] = None,
|
219 |
-
output_type: Optional[str] = "pil",
|
220 |
-
return_dict: bool = True,
|
221 |
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
222 |
-
callback_steps: int = 1,
|
223 |
-
editing_prompt: Optional[Union[str, List[str]]] = None,
|
224 |
-
editing_prompt_embeddings: Optional[torch.Tensor] = None,
|
225 |
-
reverse_editing_direction: Optional[Union[bool, List[bool]]] = False,
|
226 |
-
edit_guidance_scale: Optional[Union[float, List[float]]] = 5,
|
227 |
-
edit_warmup_steps: Optional[Union[int, List[int]]] = 10,
|
228 |
-
edit_cooldown_steps: Optional[Union[int, List[int]]] = None,
|
229 |
-
edit_threshold: Optional[Union[float, List[float]]] = 0.9,
|
230 |
-
edit_momentum_scale: Optional[float] = 0.1,
|
231 |
-
edit_mom_beta: Optional[float] = 0.4,
|
232 |
-
edit_weights: Optional[List[float]] = None,
|
233 |
-
sem_guidance: Optional[List[torch.Tensor]] = None,
|
234 |
-
):
|
235 |
-
r"""
|
236 |
-
The call function to the pipeline for generation.
|
237 |
-
|
238 |
-
Args:
|
239 |
-
prompt (`str` or `List[str]`):
|
240 |
-
The prompt or prompts to guide image generation.
|
241 |
-
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
242 |
-
The height in pixels of the generated image.
|
243 |
-
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
244 |
-
The width in pixels of the generated image.
|
245 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
246 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
247 |
-
expense of slower inference.
|
248 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
249 |
-
A higher guidance scale value encourages the model to generate images closely linked to the text
|
250 |
-
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
251 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
252 |
-
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
253 |
-
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
254 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
255 |
-
The number of images to generate per prompt.
|
256 |
-
eta (`float`, *optional*, defaults to 0.0):
|
257 |
-
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
258 |
-
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
259 |
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
260 |
-
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
261 |
-
generation deterministic.
|
262 |
-
latents (`torch.FloatTensor`, *optional*):
|
263 |
-
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
264 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
265 |
-
tensor is generated by sampling using the supplied random `generator`.
|
266 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
267 |
-
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
268 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
269 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
270 |
-
plain tuple.
|
271 |
-
callback (`Callable`, *optional*):
|
272 |
-
A function that calls every `callback_steps` steps during inference. The function is called with the
|
273 |
-
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
274 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
275 |
-
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
276 |
-
every step.
|
277 |
-
editing_prompt (`str` or `List[str]`, *optional*):
|
278 |
-
The prompt or prompts to use for semantic guidance. Semantic guidance is disabled by setting
|
279 |
-
`editing_prompt = None`. Guidance direction of prompt should be specified via
|
280 |
-
`reverse_editing_direction`.
|
281 |
-
editing_prompt_embeddings (`torch.Tensor`, *optional*):
|
282 |
-
Pre-computed embeddings to use for semantic guidance. Guidance direction of embedding should be
|
283 |
-
specified via `reverse_editing_direction`.
|
284 |
-
reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`):
|
285 |
-
Whether the corresponding prompt in `editing_prompt` should be increased or decreased.
|
286 |
-
edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5):
|
287 |
-
Guidance scale for semantic guidance. If provided as a list, values should correspond to
|
288 |
-
`editing_prompt`.
|
289 |
-
edit_warmup_steps (`float` or `List[float]`, *optional*, defaults to 10):
|
290 |
-
Number of diffusion steps (for each prompt) for which semantic guidance is not applied. Momentum is
|
291 |
-
calculated for those steps and applied once all warmup periods are over.
|
292 |
-
edit_cooldown_steps (`float` or `List[float]`, *optional*, defaults to `None`):
|
293 |
-
Number of diffusion steps (for each prompt) after which semantic guidance is longer applied.
|
294 |
-
edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9):
|
295 |
-
Threshold of semantic guidance.
|
296 |
-
edit_momentum_scale (`float`, *optional*, defaults to 0.1):
|
297 |
-
Scale of the momentum to be added to the semantic guidance at each diffusion step. If set to 0.0,
|
298 |
-
momentum is disabled. Momentum is already built up during warmup (for diffusion steps smaller than
|
299 |
-
`sld_warmup_steps`). Momentum is only added to latent guidance once all warmup periods are finished.
|
300 |
-
edit_mom_beta (`float`, *optional*, defaults to 0.4):
|
301 |
-
Defines how semantic guidance momentum builds up. `edit_mom_beta` indicates how much of the previous
|
302 |
-
momentum is kept. Momentum is already built up during warmup (for diffusion steps smaller than
|
303 |
-
`edit_warmup_steps`).
|
304 |
-
edit_weights (`List[float]`, *optional*, defaults to `None`):
|
305 |
-
Indicates how much each individual concept should influence the overall guidance. If no weights are
|
306 |
-
provided all concepts are applied equally.
|
307 |
-
sem_guidance (`List[torch.Tensor]`, *optional*):
|
308 |
-
List of pre-generated guidance vectors to be applied at generation. Length of the list has to
|
309 |
-
correspond to `num_inference_steps`.
|
310 |
-
|
311 |
-
Examples:
|
312 |
-
|
313 |
-
```py
|
314 |
-
>>> import torch
|
315 |
-
>>> from diffusers import SemanticStableDiffusionPipeline
|
316 |
-
|
317 |
-
>>> pipe = SemanticStableDiffusionPipeline.from_pretrained(
|
318 |
-
... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16
|
319 |
-
... )
|
320 |
-
>>> pipe = pipe.to("cuda")
|
321 |
-
|
322 |
-
>>> out = pipe(
|
323 |
-
... prompt="a photo of the face of a woman",
|
324 |
-
... num_images_per_prompt=1,
|
325 |
-
... guidance_scale=7,
|
326 |
-
... editing_prompt=[
|
327 |
-
... "smiling, smile", # Concepts to apply
|
328 |
-
... "glasses, wearing glasses",
|
329 |
-
... "curls, wavy hair, curly hair",
|
330 |
-
... "beard, full beard, mustache",
|
331 |
-
... ],
|
332 |
-
... reverse_editing_direction=[
|
333 |
-
... False,
|
334 |
-
... False,
|
335 |
-
... False,
|
336 |
-
... False,
|
337 |
-
... ], # Direction of guidance i.e. increase all concepts
|
338 |
-
... edit_warmup_steps=[10, 10, 10, 10], # Warmup period for each concept
|
339 |
-
... edit_guidance_scale=[4, 5, 5, 5.4], # Guidance scale for each concept
|
340 |
-
... edit_threshold=[
|
341 |
-
... 0.99,
|
342 |
-
... 0.975,
|
343 |
-
... 0.925,
|
344 |
-
... 0.96,
|
345 |
-
... ], # Threshold for each concept. Threshold equals the percentile of the latent space that will be discarded. I.e. threshold=0.99 uses 1% of the latent dimensions
|
346 |
-
... edit_momentum_scale=0.3, # Momentum scale that will be added to the latent guidance
|
347 |
-
... edit_mom_beta=0.6, # Momentum beta
|
348 |
-
... edit_weights=[1, 1, 1, 1, 1], # Weights of the individual concepts against each other
|
349 |
-
... )
|
350 |
-
>>> image = out.images[0]
|
351 |
-
```
|
352 |
-
|
353 |
-
Returns:
|
354 |
-
[`~pipelines.semantic_stable_diffusion.SemanticStableDiffusionPipelineOutput`] or `tuple`:
|
355 |
-
If `return_dict` is `True`,
|
356 |
-
[`~pipelines.semantic_stable_diffusion.SemanticStableDiffusionPipelineOutput`] is returned, otherwise a
|
357 |
-
`tuple` is returned where the first element is a list with the generated images and the second element
|
358 |
-
is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work"
|
359 |
-
(nsfw) content.
|
360 |
-
"""
|
361 |
-
# 0. Default height and width to unet
|
362 |
-
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
363 |
-
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
364 |
-
|
365 |
-
# 1. Check inputs. Raise error if not correct
|
366 |
-
self.check_inputs(prompt, height, width, callback_steps)
|
367 |
-
|
368 |
-
# 2. Define call parameters
|
369 |
-
batch_size = 1 if isinstance(prompt, str) else len(prompt)
|
370 |
-
|
371 |
-
if editing_prompt:
|
372 |
-
enable_edit_guidance = True
|
373 |
-
if isinstance(editing_prompt, str):
|
374 |
-
editing_prompt = [editing_prompt]
|
375 |
-
enabled_editing_prompts = len(editing_prompt)
|
376 |
-
elif editing_prompt_embeddings is not None:
|
377 |
-
enable_edit_guidance = True
|
378 |
-
enabled_editing_prompts = editing_prompt_embeddings.shape[0]
|
379 |
-
else:
|
380 |
-
enabled_editing_prompts = 0
|
381 |
-
enable_edit_guidance = False
|
382 |
-
|
383 |
-
# get prompt text embeddings
|
384 |
-
text_inputs = self.tokenizer(
|
385 |
-
prompt,
|
386 |
-
padding="max_length",
|
387 |
-
max_length=self.tokenizer.model_max_length,
|
388 |
-
return_tensors="pt",
|
389 |
-
)
|
390 |
-
text_input_ids = text_inputs.input_ids
|
391 |
-
|
392 |
-
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
|
393 |
-
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
|
394 |
-
logger.warning(
|
395 |
-
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
396 |
-
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
397 |
-
)
|
398 |
-
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
|
399 |
-
text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
|
400 |
-
|
401 |
-
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
402 |
-
bs_embed, seq_len, _ = text_embeddings.shape
|
403 |
-
text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
|
404 |
-
text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
405 |
-
|
406 |
-
if enable_edit_guidance:
|
407 |
-
# get safety text embeddings
|
408 |
-
if editing_prompt_embeddings is None:
|
409 |
-
edit_concepts_input = self.tokenizer(
|
410 |
-
[x for item in editing_prompt for x in repeat(item, batch_size)],
|
411 |
-
padding="max_length",
|
412 |
-
max_length=self.tokenizer.model_max_length,
|
413 |
-
return_tensors="pt",
|
414 |
-
)
|
415 |
-
|
416 |
-
edit_concepts_input_ids = edit_concepts_input.input_ids
|
417 |
-
|
418 |
-
if edit_concepts_input_ids.shape[-1] > self.tokenizer.model_max_length:
|
419 |
-
removed_text = self.tokenizer.batch_decode(
|
420 |
-
edit_concepts_input_ids[:, self.tokenizer.model_max_length :]
|
421 |
-
)
|
422 |
-
logger.warning(
|
423 |
-
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
424 |
-
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
425 |
-
)
|
426 |
-
edit_concepts_input_ids = edit_concepts_input_ids[:, : self.tokenizer.model_max_length]
|
427 |
-
edit_concepts = self.text_encoder(edit_concepts_input_ids.to(self.device))[0]
|
428 |
-
else:
|
429 |
-
edit_concepts = editing_prompt_embeddings.to(self.device).repeat(batch_size, 1, 1)
|
430 |
-
|
431 |
-
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
432 |
-
bs_embed_edit, seq_len_edit, _ = edit_concepts.shape
|
433 |
-
edit_concepts = edit_concepts.repeat(1, num_images_per_prompt, 1)
|
434 |
-
edit_concepts = edit_concepts.view(bs_embed_edit * num_images_per_prompt, seq_len_edit, -1)
|
435 |
-
|
436 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
437 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
438 |
-
# corresponds to doing no classifier free guidance.
|
439 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
440 |
-
# get unconditional embeddings for classifier free guidance
|
441 |
-
|
442 |
-
if do_classifier_free_guidance:
|
443 |
-
uncond_tokens: List[str]
|
444 |
-
if negative_prompt is None:
|
445 |
-
uncond_tokens = [""]
|
446 |
-
elif type(prompt) is not type(negative_prompt):
|
447 |
-
raise TypeError(
|
448 |
-
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
449 |
-
f" {type(prompt)}."
|
450 |
-
)
|
451 |
-
elif isinstance(negative_prompt, str):
|
452 |
-
uncond_tokens = [negative_prompt]
|
453 |
-
elif batch_size != len(negative_prompt):
|
454 |
-
raise ValueError(
|
455 |
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
456 |
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
457 |
-
" the batch size of `prompt`."
|
458 |
-
)
|
459 |
-
else:
|
460 |
-
uncond_tokens = negative_prompt
|
461 |
-
|
462 |
-
max_length = text_input_ids.shape[-1]
|
463 |
-
uncond_input = self.tokenizer(
|
464 |
-
uncond_tokens,
|
465 |
-
padding="max_length",
|
466 |
-
max_length=max_length,
|
467 |
-
truncation=True,
|
468 |
-
return_tensors="pt",
|
469 |
-
)
|
470 |
-
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
471 |
-
|
472 |
-
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
473 |
-
seq_len = uncond_embeddings.shape[1]
|
474 |
-
uncond_embeddings = uncond_embeddings.repeat(batch_size, num_images_per_prompt, 1)
|
475 |
-
uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
|
476 |
-
|
477 |
-
# For classifier free guidance, we need to do two forward passes.
|
478 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
479 |
-
# to avoid doing two forward passes
|
480 |
-
if enable_edit_guidance:
|
481 |
-
text_embeddings = torch.cat([uncond_embeddings, text_embeddings, edit_concepts])
|
482 |
-
else:
|
483 |
-
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
484 |
-
# get the initial random noise unless the user supplied it
|
485 |
-
|
486 |
-
# 4. Prepare timesteps
|
487 |
-
self.scheduler.set_timesteps(num_inference_steps, device=self.device)
|
488 |
-
timesteps = self.scheduler.timesteps
|
489 |
-
|
490 |
-
# 5. Prepare latent variables
|
491 |
-
num_channels_latents = self.unet.config.in_channels
|
492 |
-
latents = self.prepare_latents(
|
493 |
-
batch_size * num_images_per_prompt,
|
494 |
-
num_channels_latents,
|
495 |
-
height,
|
496 |
-
width,
|
497 |
-
text_embeddings.dtype,
|
498 |
-
self.device,
|
499 |
-
generator,
|
500 |
-
latents,
|
501 |
-
)
|
502 |
-
|
503 |
-
# 6. Prepare extra step kwargs.
|
504 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
505 |
-
|
506 |
-
# Initialize edit_momentum to None
|
507 |
-
edit_momentum = None
|
508 |
-
|
509 |
-
self.uncond_estimates = None
|
510 |
-
self.text_estimates = None
|
511 |
-
self.edit_estimates = None
|
512 |
-
self.sem_guidance = None
|
513 |
-
|
514 |
-
for i, t in enumerate(self.progress_bar(timesteps)):
|
515 |
-
# expand the latents if we are doing classifier free guidance
|
516 |
-
latent_model_input = (
|
517 |
-
torch.cat([latents] * (2 + enabled_editing_prompts)) if do_classifier_free_guidance else latents
|
518 |
-
)
|
519 |
-
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
520 |
-
|
521 |
-
# predict the noise residual
|
522 |
-
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
523 |
-
|
524 |
-
# perform guidance
|
525 |
-
if do_classifier_free_guidance:
|
526 |
-
noise_pred_out = noise_pred.chunk(2 + enabled_editing_prompts) # [b,4, 64, 64]
|
527 |
-
noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1]
|
528 |
-
noise_pred_edit_concepts = noise_pred_out[2:]
|
529 |
-
|
530 |
-
# default text guidance
|
531 |
-
noise_guidance = guidance_scale * (noise_pred_text - noise_pred_uncond)
|
532 |
-
# noise_guidance = (noise_pred_text - noise_pred_edit_concepts[0])
|
533 |
-
|
534 |
-
if self.uncond_estimates is None:
|
535 |
-
self.uncond_estimates = torch.zeros((num_inference_steps + 1, *noise_pred_uncond.shape))
|
536 |
-
self.uncond_estimates[i] = noise_pred_uncond.detach().cpu()
|
537 |
-
|
538 |
-
if self.text_estimates is None:
|
539 |
-
self.text_estimates = torch.zeros((num_inference_steps + 1, *noise_pred_text.shape))
|
540 |
-
self.text_estimates[i] = noise_pred_text.detach().cpu()
|
541 |
-
|
542 |
-
if self.edit_estimates is None and enable_edit_guidance:
|
543 |
-
self.edit_estimates = torch.zeros(
|
544 |
-
(num_inference_steps + 1, len(noise_pred_edit_concepts), *noise_pred_edit_concepts[0].shape)
|
545 |
-
)
|
546 |
-
|
547 |
-
if self.sem_guidance is None:
|
548 |
-
self.sem_guidance = torch.zeros((num_inference_steps + 1, *noise_pred_text.shape))
|
549 |
-
|
550 |
-
if edit_momentum is None:
|
551 |
-
edit_momentum = torch.zeros_like(noise_guidance)
|
552 |
-
|
553 |
-
if enable_edit_guidance:
|
554 |
-
concept_weights = torch.zeros(
|
555 |
-
(len(noise_pred_edit_concepts), noise_guidance.shape[0]),
|
556 |
-
device=self.device,
|
557 |
-
dtype=noise_guidance.dtype,
|
558 |
-
)
|
559 |
-
noise_guidance_edit = torch.zeros(
|
560 |
-
(len(noise_pred_edit_concepts), *noise_guidance.shape),
|
561 |
-
device=self.device,
|
562 |
-
dtype=noise_guidance.dtype,
|
563 |
-
)
|
564 |
-
# noise_guidance_edit = torch.zeros_like(noise_guidance)
|
565 |
-
warmup_inds = []
|
566 |
-
for c, noise_pred_edit_concept in enumerate(noise_pred_edit_concepts):
|
567 |
-
self.edit_estimates[i, c] = noise_pred_edit_concept
|
568 |
-
if isinstance(edit_guidance_scale, list):
|
569 |
-
edit_guidance_scale_c = edit_guidance_scale[c]
|
570 |
-
else:
|
571 |
-
edit_guidance_scale_c = edit_guidance_scale
|
572 |
-
|
573 |
-
if isinstance(edit_threshold, list):
|
574 |
-
edit_threshold_c = edit_threshold[c]
|
575 |
-
else:
|
576 |
-
edit_threshold_c = edit_threshold
|
577 |
-
if isinstance(reverse_editing_direction, list):
|
578 |
-
reverse_editing_direction_c = reverse_editing_direction[c]
|
579 |
-
else:
|
580 |
-
reverse_editing_direction_c = reverse_editing_direction
|
581 |
-
if edit_weights:
|
582 |
-
edit_weight_c = edit_weights[c]
|
583 |
-
else:
|
584 |
-
edit_weight_c = 1.0
|
585 |
-
if isinstance(edit_warmup_steps, list):
|
586 |
-
edit_warmup_steps_c = edit_warmup_steps[c]
|
587 |
-
else:
|
588 |
-
edit_warmup_steps_c = edit_warmup_steps
|
589 |
-
|
590 |
-
if isinstance(edit_cooldown_steps, list):
|
591 |
-
edit_cooldown_steps_c = edit_cooldown_steps[c]
|
592 |
-
elif edit_cooldown_steps is None:
|
593 |
-
edit_cooldown_steps_c = i + 1
|
594 |
-
else:
|
595 |
-
edit_cooldown_steps_c = edit_cooldown_steps
|
596 |
-
if i >= edit_warmup_steps_c:
|
597 |
-
warmup_inds.append(c)
|
598 |
-
if i >= edit_cooldown_steps_c:
|
599 |
-
noise_guidance_edit[c, :, :, :, :] = torch.zeros_like(noise_pred_edit_concept)
|
600 |
-
continue
|
601 |
-
|
602 |
-
noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond
|
603 |
-
# tmp_weights = (noise_pred_text - noise_pred_edit_concept).sum(dim=(1, 2, 3))
|
604 |
-
tmp_weights = (noise_guidance - noise_pred_edit_concept).sum(dim=(1, 2, 3))
|
605 |
-
|
606 |
-
tmp_weights = torch.full_like(tmp_weights, edit_weight_c) # * (1 / enabled_editing_prompts)
|
607 |
-
if reverse_editing_direction_c:
|
608 |
-
noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1
|
609 |
-
concept_weights[c, :] = tmp_weights
|
610 |
-
|
611 |
-
noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c
|
612 |
-
|
613 |
-
# torch.quantile function expects float32
|
614 |
-
if noise_guidance_edit_tmp.dtype == torch.float32:
|
615 |
-
tmp = torch.quantile(
|
616 |
-
torch.abs(noise_guidance_edit_tmp).flatten(start_dim=2),
|
617 |
-
edit_threshold_c,
|
618 |
-
dim=2,
|
619 |
-
keepdim=False,
|
620 |
-
)
|
621 |
-
else:
|
622 |
-
tmp = torch.quantile(
|
623 |
-
torch.abs(noise_guidance_edit_tmp).flatten(start_dim=2).to(torch.float32),
|
624 |
-
edit_threshold_c,
|
625 |
-
dim=2,
|
626 |
-
keepdim=False,
|
627 |
-
).to(noise_guidance_edit_tmp.dtype)
|
628 |
-
|
629 |
-
noise_guidance_edit_tmp = torch.where(
|
630 |
-
torch.abs(noise_guidance_edit_tmp) >= tmp[:, :, None, None],
|
631 |
-
noise_guidance_edit_tmp,
|
632 |
-
torch.zeros_like(noise_guidance_edit_tmp),
|
633 |
-
)
|
634 |
-
noise_guidance_edit[c, :, :, :, :] = noise_guidance_edit_tmp
|
635 |
-
|
636 |
-
# noise_guidance_edit = noise_guidance_edit + noise_guidance_edit_tmp
|
637 |
-
|
638 |
-
warmup_inds = torch.tensor(warmup_inds).to(self.device)
|
639 |
-
if len(noise_pred_edit_concepts) > warmup_inds.shape[0] > 0:
|
640 |
-
concept_weights = concept_weights.to("cpu") # Offload to cpu
|
641 |
-
noise_guidance_edit = noise_guidance_edit.to("cpu")
|
642 |
-
|
643 |
-
concept_weights_tmp = torch.index_select(concept_weights.to(self.device), 0, warmup_inds)
|
644 |
-
concept_weights_tmp = torch.where(
|
645 |
-
concept_weights_tmp < 0, torch.zeros_like(concept_weights_tmp), concept_weights_tmp
|
646 |
-
)
|
647 |
-
concept_weights_tmp = concept_weights_tmp / concept_weights_tmp.sum(dim=0)
|
648 |
-
# concept_weights_tmp = torch.nan_to_num(concept_weights_tmp)
|
649 |
-
|
650 |
-
noise_guidance_edit_tmp = torch.index_select(
|
651 |
-
noise_guidance_edit.to(self.device), 0, warmup_inds
|
652 |
-
)
|
653 |
-
noise_guidance_edit_tmp = torch.einsum(
|
654 |
-
"cb,cbijk->bijk", concept_weights_tmp, noise_guidance_edit_tmp
|
655 |
-
)
|
656 |
-
noise_guidance_edit_tmp = noise_guidance_edit_tmp
|
657 |
-
noise_guidance = noise_guidance + noise_guidance_edit_tmp
|
658 |
-
|
659 |
-
self.sem_guidance[i] = noise_guidance_edit_tmp.detach().cpu()
|
660 |
-
|
661 |
-
del noise_guidance_edit_tmp
|
662 |
-
del concept_weights_tmp
|
663 |
-
concept_weights = concept_weights.to(self.device)
|
664 |
-
noise_guidance_edit = noise_guidance_edit.to(self.device)
|
665 |
-
|
666 |
-
concept_weights = torch.where(
|
667 |
-
concept_weights < 0, torch.zeros_like(concept_weights), concept_weights
|
668 |
-
)
|
669 |
-
|
670 |
-
concept_weights = torch.nan_to_num(concept_weights)
|
671 |
-
|
672 |
-
noise_guidance_edit = torch.einsum("cb,cbijk->bijk", concept_weights, noise_guidance_edit)
|
673 |
-
|
674 |
-
noise_guidance_edit = noise_guidance_edit + edit_momentum_scale * edit_momentum
|
675 |
-
|
676 |
-
edit_momentum = edit_mom_beta * edit_momentum + (1 - edit_mom_beta) * noise_guidance_edit
|
677 |
-
|
678 |
-
if warmup_inds.shape[0] == len(noise_pred_edit_concepts):
|
679 |
-
noise_guidance = noise_guidance + noise_guidance_edit
|
680 |
-
self.sem_guidance[i] = noise_guidance_edit.detach().cpu()
|
681 |
-
|
682 |
-
if sem_guidance is not None:
|
683 |
-
edit_guidance = sem_guidance[i].to(self.device)
|
684 |
-
noise_guidance = noise_guidance + edit_guidance
|
685 |
-
|
686 |
-
noise_pred = noise_pred_uncond + noise_guidance
|
687 |
-
|
688 |
-
# compute the previous noisy sample x_t -> x_t-1
|
689 |
-
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
690 |
-
|
691 |
-
# call the callback, if provided
|
692 |
-
if callback is not None and i % callback_steps == 0:
|
693 |
-
callback(i, t, latents)
|
694 |
-
|
695 |
-
# 8. Post-processing
|
696 |
-
if not output_type == "latent":
|
697 |
-
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
698 |
-
image, has_nsfw_concept = self.run_safety_checker(image, self.device, text_embeddings.dtype)
|
699 |
-
else:
|
700 |
-
image = latents
|
701 |
-
has_nsfw_concept = None
|
702 |
-
|
703 |
-
if has_nsfw_concept is None:
|
704 |
-
do_denormalize = [True] * image.shape[0]
|
705 |
-
else:
|
706 |
-
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
707 |
-
|
708 |
-
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
709 |
-
|
710 |
-
if not return_dict:
|
711 |
-
return (image, has_nsfw_concept)
|
712 |
-
|
713 |
-
return SemanticStableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py
DELETED
@@ -1,594 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import gc
|
17 |
-
import unittest
|
18 |
-
|
19 |
-
import numpy as np
|
20 |
-
import torch
|
21 |
-
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
|
22 |
-
|
23 |
-
from diffusers import (
|
24 |
-
AutoencoderKL,
|
25 |
-
DDIMScheduler,
|
26 |
-
DPMSolverMultistepScheduler,
|
27 |
-
EulerAncestralDiscreteScheduler,
|
28 |
-
EulerDiscreteScheduler,
|
29 |
-
LMSDiscreteScheduler,
|
30 |
-
PNDMScheduler,
|
31 |
-
StableDiffusionPipeline,
|
32 |
-
UNet2DConditionModel,
|
33 |
-
logging,
|
34 |
-
)
|
35 |
-
from diffusers.utils import load_numpy, nightly, slow, torch_device
|
36 |
-
from diffusers.utils.testing_utils import CaptureLogger, enable_full_determinism, require_torch_gpu
|
37 |
-
|
38 |
-
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
|
39 |
-
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
|
40 |
-
|
41 |
-
|
42 |
-
enable_full_determinism()
|
43 |
-
|
44 |
-
|
45 |
-
class StableDiffusion2PipelineFastTests(
|
46 |
-
PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
|
47 |
-
):
|
48 |
-
pipeline_class = StableDiffusionPipeline
|
49 |
-
params = TEXT_TO_IMAGE_PARAMS
|
50 |
-
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
|
51 |
-
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
|
52 |
-
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
|
53 |
-
|
54 |
-
def get_dummy_components(self):
|
55 |
-
torch.manual_seed(0)
|
56 |
-
unet = UNet2DConditionModel(
|
57 |
-
block_out_channels=(32, 64),
|
58 |
-
layers_per_block=2,
|
59 |
-
sample_size=32,
|
60 |
-
in_channels=4,
|
61 |
-
out_channels=4,
|
62 |
-
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
|
63 |
-
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
|
64 |
-
cross_attention_dim=32,
|
65 |
-
# SD2-specific config below
|
66 |
-
attention_head_dim=(2, 4),
|
67 |
-
use_linear_projection=True,
|
68 |
-
)
|
69 |
-
scheduler = DDIMScheduler(
|
70 |
-
beta_start=0.00085,
|
71 |
-
beta_end=0.012,
|
72 |
-
beta_schedule="scaled_linear",
|
73 |
-
clip_sample=False,
|
74 |
-
set_alpha_to_one=False,
|
75 |
-
)
|
76 |
-
torch.manual_seed(0)
|
77 |
-
vae = AutoencoderKL(
|
78 |
-
block_out_channels=[32, 64],
|
79 |
-
in_channels=3,
|
80 |
-
out_channels=3,
|
81 |
-
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
82 |
-
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
83 |
-
latent_channels=4,
|
84 |
-
sample_size=128,
|
85 |
-
)
|
86 |
-
torch.manual_seed(0)
|
87 |
-
text_encoder_config = CLIPTextConfig(
|
88 |
-
bos_token_id=0,
|
89 |
-
eos_token_id=2,
|
90 |
-
hidden_size=32,
|
91 |
-
intermediate_size=37,
|
92 |
-
layer_norm_eps=1e-05,
|
93 |
-
num_attention_heads=4,
|
94 |
-
num_hidden_layers=5,
|
95 |
-
pad_token_id=1,
|
96 |
-
vocab_size=1000,
|
97 |
-
# SD2-specific config below
|
98 |
-
hidden_act="gelu",
|
99 |
-
projection_dim=512,
|
100 |
-
)
|
101 |
-
text_encoder = CLIPTextModel(text_encoder_config)
|
102 |
-
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
103 |
-
|
104 |
-
components = {
|
105 |
-
"unet": unet,
|
106 |
-
"scheduler": scheduler,
|
107 |
-
"vae": vae,
|
108 |
-
"text_encoder": text_encoder,
|
109 |
-
"tokenizer": tokenizer,
|
110 |
-
"safety_checker": None,
|
111 |
-
"feature_extractor": None,
|
112 |
-
}
|
113 |
-
return components
|
114 |
-
|
115 |
-
def get_dummy_inputs(self, device, seed=0):
|
116 |
-
if str(device).startswith("mps"):
|
117 |
-
generator = torch.manual_seed(seed)
|
118 |
-
else:
|
119 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
120 |
-
inputs = {
|
121 |
-
"prompt": "A painting of a squirrel eating a burger",
|
122 |
-
"generator": generator,
|
123 |
-
"num_inference_steps": 2,
|
124 |
-
"guidance_scale": 6.0,
|
125 |
-
"output_type": "numpy",
|
126 |
-
}
|
127 |
-
return inputs
|
128 |
-
|
129 |
-
def test_stable_diffusion_ddim(self):
|
130 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
131 |
-
components = self.get_dummy_components()
|
132 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
133 |
-
sd_pipe = sd_pipe.to(device)
|
134 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
135 |
-
|
136 |
-
inputs = self.get_dummy_inputs(device)
|
137 |
-
image = sd_pipe(**inputs).images
|
138 |
-
image_slice = image[0, -3:, -3:, -1]
|
139 |
-
|
140 |
-
assert image.shape == (1, 64, 64, 3)
|
141 |
-
expected_slice = np.array([0.5753, 0.6113, 0.5005, 0.5036, 0.5464, 0.4725, 0.4982, 0.4865, 0.4861])
|
142 |
-
|
143 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
144 |
-
|
145 |
-
def test_stable_diffusion_pndm(self):
|
146 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
147 |
-
components = self.get_dummy_components()
|
148 |
-
components["scheduler"] = PNDMScheduler(skip_prk_steps=True)
|
149 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
150 |
-
sd_pipe = sd_pipe.to(device)
|
151 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
152 |
-
|
153 |
-
inputs = self.get_dummy_inputs(device)
|
154 |
-
image = sd_pipe(**inputs).images
|
155 |
-
image_slice = image[0, -3:, -3:, -1]
|
156 |
-
|
157 |
-
assert image.shape == (1, 64, 64, 3)
|
158 |
-
expected_slice = np.array([0.5121, 0.5714, 0.4827, 0.5057, 0.5646, 0.4766, 0.5189, 0.4895, 0.4990])
|
159 |
-
|
160 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
161 |
-
|
162 |
-
def test_stable_diffusion_k_lms(self):
|
163 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
164 |
-
components = self.get_dummy_components()
|
165 |
-
components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config)
|
166 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
167 |
-
sd_pipe = sd_pipe.to(device)
|
168 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
169 |
-
|
170 |
-
inputs = self.get_dummy_inputs(device)
|
171 |
-
image = sd_pipe(**inputs).images
|
172 |
-
image_slice = image[0, -3:, -3:, -1]
|
173 |
-
|
174 |
-
assert image.shape == (1, 64, 64, 3)
|
175 |
-
expected_slice = np.array([0.4865, 0.5439, 0.4840, 0.4995, 0.5543, 0.4846, 0.5199, 0.4942, 0.5061])
|
176 |
-
|
177 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
178 |
-
|
179 |
-
def test_stable_diffusion_k_euler_ancestral(self):
|
180 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
181 |
-
components = self.get_dummy_components()
|
182 |
-
components["scheduler"] = EulerAncestralDiscreteScheduler.from_config(components["scheduler"].config)
|
183 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
184 |
-
sd_pipe = sd_pipe.to(device)
|
185 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
186 |
-
|
187 |
-
inputs = self.get_dummy_inputs(device)
|
188 |
-
image = sd_pipe(**inputs).images
|
189 |
-
image_slice = image[0, -3:, -3:, -1]
|
190 |
-
|
191 |
-
assert image.shape == (1, 64, 64, 3)
|
192 |
-
expected_slice = np.array([0.4864, 0.5440, 0.4842, 0.4994, 0.5543, 0.4846, 0.5196, 0.4942, 0.5063])
|
193 |
-
|
194 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
195 |
-
|
196 |
-
def test_stable_diffusion_k_euler(self):
|
197 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
198 |
-
components = self.get_dummy_components()
|
199 |
-
components["scheduler"] = EulerDiscreteScheduler.from_config(components["scheduler"].config)
|
200 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
201 |
-
sd_pipe = sd_pipe.to(device)
|
202 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
203 |
-
|
204 |
-
inputs = self.get_dummy_inputs(device)
|
205 |
-
image = sd_pipe(**inputs).images
|
206 |
-
image_slice = image[0, -3:, -3:, -1]
|
207 |
-
|
208 |
-
assert image.shape == (1, 64, 64, 3)
|
209 |
-
expected_slice = np.array([0.4865, 0.5439, 0.4840, 0.4995, 0.5543, 0.4846, 0.5199, 0.4942, 0.5061])
|
210 |
-
|
211 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
212 |
-
|
213 |
-
def test_stable_diffusion_unflawed(self):
|
214 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
215 |
-
components = self.get_dummy_components()
|
216 |
-
components["scheduler"] = DDIMScheduler.from_config(
|
217 |
-
components["scheduler"].config, timestep_spacing="trailing"
|
218 |
-
)
|
219 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
220 |
-
sd_pipe = sd_pipe.to(device)
|
221 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
222 |
-
|
223 |
-
inputs = self.get_dummy_inputs(device)
|
224 |
-
inputs["guidance_rescale"] = 0.7
|
225 |
-
inputs["num_inference_steps"] = 10
|
226 |
-
image = sd_pipe(**inputs).images
|
227 |
-
image_slice = image[0, -3:, -3:, -1]
|
228 |
-
|
229 |
-
assert image.shape == (1, 64, 64, 3)
|
230 |
-
expected_slice = np.array([0.4736, 0.5405, 0.4705, 0.4955, 0.5675, 0.4812, 0.5310, 0.4967, 0.5064])
|
231 |
-
|
232 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
233 |
-
|
234 |
-
def test_stable_diffusion_long_prompt(self):
|
235 |
-
components = self.get_dummy_components()
|
236 |
-
components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config)
|
237 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
238 |
-
sd_pipe = sd_pipe.to(torch_device)
|
239 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
240 |
-
|
241 |
-
do_classifier_free_guidance = True
|
242 |
-
negative_prompt = None
|
243 |
-
num_images_per_prompt = 1
|
244 |
-
logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion")
|
245 |
-
|
246 |
-
prompt = 25 * "@"
|
247 |
-
with CaptureLogger(logger) as cap_logger_3:
|
248 |
-
text_embeddings_3 = sd_pipe._encode_prompt(
|
249 |
-
prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
|
250 |
-
)
|
251 |
-
|
252 |
-
prompt = 100 * "@"
|
253 |
-
with CaptureLogger(logger) as cap_logger:
|
254 |
-
text_embeddings = sd_pipe._encode_prompt(
|
255 |
-
prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
|
256 |
-
)
|
257 |
-
|
258 |
-
negative_prompt = "Hello"
|
259 |
-
with CaptureLogger(logger) as cap_logger_2:
|
260 |
-
text_embeddings_2 = sd_pipe._encode_prompt(
|
261 |
-
prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
|
262 |
-
)
|
263 |
-
|
264 |
-
assert text_embeddings_3.shape == text_embeddings_2.shape == text_embeddings.shape
|
265 |
-
assert text_embeddings.shape[1] == 77
|
266 |
-
|
267 |
-
assert cap_logger.out == cap_logger_2.out
|
268 |
-
# 100 - 77 + 1 (BOS token) + 1 (EOS token) = 25
|
269 |
-
assert cap_logger.out.count("@") == 25
|
270 |
-
assert cap_logger_3.out == ""
|
271 |
-
|
272 |
-
def test_attention_slicing_forward_pass(self):
|
273 |
-
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3)
|
274 |
-
|
275 |
-
def test_inference_batch_single_identical(self):
|
276 |
-
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
|
277 |
-
|
278 |
-
|
279 |
-
@slow
|
280 |
-
@require_torch_gpu
|
281 |
-
class StableDiffusion2PipelineSlowTests(unittest.TestCase):
|
282 |
-
def tearDown(self):
|
283 |
-
super().tearDown()
|
284 |
-
gc.collect()
|
285 |
-
torch.cuda.empty_cache()
|
286 |
-
|
287 |
-
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
|
288 |
-
generator = torch.Generator(device=generator_device).manual_seed(seed)
|
289 |
-
latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64))
|
290 |
-
latents = torch.from_numpy(latents).to(device=device, dtype=dtype)
|
291 |
-
inputs = {
|
292 |
-
"prompt": "a photograph of an astronaut riding a horse",
|
293 |
-
"latents": latents,
|
294 |
-
"generator": generator,
|
295 |
-
"num_inference_steps": 3,
|
296 |
-
"guidance_scale": 7.5,
|
297 |
-
"output_type": "numpy",
|
298 |
-
}
|
299 |
-
return inputs
|
300 |
-
|
301 |
-
def test_stable_diffusion_default_ddim(self):
|
302 |
-
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base")
|
303 |
-
pipe.to(torch_device)
|
304 |
-
pipe.set_progress_bar_config(disable=None)
|
305 |
-
|
306 |
-
inputs = self.get_inputs(torch_device)
|
307 |
-
image = pipe(**inputs).images
|
308 |
-
image_slice = image[0, -3:, -3:, -1].flatten()
|
309 |
-
|
310 |
-
assert image.shape == (1, 512, 512, 3)
|
311 |
-
expected_slice = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506])
|
312 |
-
assert np.abs(image_slice - expected_slice).max() < 7e-3
|
313 |
-
|
314 |
-
def test_stable_diffusion_pndm(self):
|
315 |
-
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base")
|
316 |
-
pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config)
|
317 |
-
pipe.to(torch_device)
|
318 |
-
pipe.set_progress_bar_config(disable=None)
|
319 |
-
|
320 |
-
inputs = self.get_inputs(torch_device)
|
321 |
-
image = pipe(**inputs).images
|
322 |
-
image_slice = image[0, -3:, -3:, -1].flatten()
|
323 |
-
|
324 |
-
assert image.shape == (1, 512, 512, 3)
|
325 |
-
expected_slice = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506])
|
326 |
-
assert np.abs(image_slice - expected_slice).max() < 7e-3
|
327 |
-
|
328 |
-
def test_stable_diffusion_k_lms(self):
|
329 |
-
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base")
|
330 |
-
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
331 |
-
pipe.to(torch_device)
|
332 |
-
pipe.set_progress_bar_config(disable=None)
|
333 |
-
|
334 |
-
inputs = self.get_inputs(torch_device)
|
335 |
-
image = pipe(**inputs).images
|
336 |
-
image_slice = image[0, -3:, -3:, -1].flatten()
|
337 |
-
|
338 |
-
assert image.shape == (1, 512, 512, 3)
|
339 |
-
expected_slice = np.array([0.10440, 0.13115, 0.11100, 0.10141, 0.11440, 0.07215, 0.11332, 0.09693, 0.10006])
|
340 |
-
assert np.abs(image_slice - expected_slice).max() < 3e-3
|
341 |
-
|
342 |
-
def test_stable_diffusion_attention_slicing(self):
|
343 |
-
torch.cuda.reset_peak_memory_stats()
|
344 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
345 |
-
"stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16
|
346 |
-
)
|
347 |
-
pipe = pipe.to(torch_device)
|
348 |
-
pipe.set_progress_bar_config(disable=None)
|
349 |
-
|
350 |
-
# enable attention slicing
|
351 |
-
pipe.enable_attention_slicing()
|
352 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
353 |
-
image_sliced = pipe(**inputs).images
|
354 |
-
|
355 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
356 |
-
torch.cuda.reset_peak_memory_stats()
|
357 |
-
# make sure that less than 3.3 GB is allocated
|
358 |
-
assert mem_bytes < 3.3 * 10**9
|
359 |
-
|
360 |
-
# disable slicing
|
361 |
-
pipe.disable_attention_slicing()
|
362 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
363 |
-
image = pipe(**inputs).images
|
364 |
-
|
365 |
-
# make sure that more than 3.3 GB is allocated
|
366 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
367 |
-
assert mem_bytes > 3.3 * 10**9
|
368 |
-
assert np.abs(image_sliced - image).max() < 1e-3
|
369 |
-
|
370 |
-
def test_stable_diffusion_text2img_intermediate_state(self):
|
371 |
-
number_of_steps = 0
|
372 |
-
|
373 |
-
def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None:
|
374 |
-
callback_fn.has_been_called = True
|
375 |
-
nonlocal number_of_steps
|
376 |
-
number_of_steps += 1
|
377 |
-
if step == 1:
|
378 |
-
latents = latents.detach().cpu().numpy()
|
379 |
-
assert latents.shape == (1, 4, 64, 64)
|
380 |
-
latents_slice = latents[0, -3:, -3:, -1]
|
381 |
-
expected_slice = np.array(
|
382 |
-
[-0.3862, -0.4507, -1.1729, 0.0686, -1.1045, 0.7124, -1.8301, 0.1903, 1.2773]
|
383 |
-
)
|
384 |
-
|
385 |
-
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
|
386 |
-
elif step == 2:
|
387 |
-
latents = latents.detach().cpu().numpy()
|
388 |
-
assert latents.shape == (1, 4, 64, 64)
|
389 |
-
latents_slice = latents[0, -3:, -3:, -1]
|
390 |
-
expected_slice = np.array(
|
391 |
-
[0.2720, -0.1863, -0.7383, -0.5029, -0.7534, 0.3970, -0.7646, 0.4468, 1.2686]
|
392 |
-
)
|
393 |
-
|
394 |
-
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
|
395 |
-
|
396 |
-
callback_fn.has_been_called = False
|
397 |
-
|
398 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
399 |
-
"stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16
|
400 |
-
)
|
401 |
-
pipe = pipe.to(torch_device)
|
402 |
-
pipe.set_progress_bar_config(disable=None)
|
403 |
-
pipe.enable_attention_slicing()
|
404 |
-
|
405 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
406 |
-
pipe(**inputs, callback=callback_fn, callback_steps=1)
|
407 |
-
assert callback_fn.has_been_called
|
408 |
-
assert number_of_steps == inputs["num_inference_steps"]
|
409 |
-
|
410 |
-
def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
|
411 |
-
torch.cuda.empty_cache()
|
412 |
-
torch.cuda.reset_max_memory_allocated()
|
413 |
-
torch.cuda.reset_peak_memory_stats()
|
414 |
-
|
415 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
416 |
-
"stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16
|
417 |
-
)
|
418 |
-
pipe = pipe.to(torch_device)
|
419 |
-
pipe.set_progress_bar_config(disable=None)
|
420 |
-
pipe.enable_attention_slicing(1)
|
421 |
-
pipe.enable_sequential_cpu_offload()
|
422 |
-
|
423 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
424 |
-
_ = pipe(**inputs)
|
425 |
-
|
426 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
427 |
-
# make sure that less than 2.8 GB is allocated
|
428 |
-
assert mem_bytes < 2.8 * 10**9
|
429 |
-
|
430 |
-
def test_stable_diffusion_pipeline_with_model_offloading(self):
|
431 |
-
torch.cuda.empty_cache()
|
432 |
-
torch.cuda.reset_max_memory_allocated()
|
433 |
-
torch.cuda.reset_peak_memory_stats()
|
434 |
-
|
435 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
436 |
-
|
437 |
-
# Normal inference
|
438 |
-
|
439 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
440 |
-
"stabilityai/stable-diffusion-2-base",
|
441 |
-
torch_dtype=torch.float16,
|
442 |
-
)
|
443 |
-
pipe.unet.set_default_attn_processor()
|
444 |
-
pipe.to(torch_device)
|
445 |
-
pipe.set_progress_bar_config(disable=None)
|
446 |
-
outputs = pipe(**inputs)
|
447 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
448 |
-
|
449 |
-
# With model offloading
|
450 |
-
|
451 |
-
# Reload but don't move to cuda
|
452 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
453 |
-
"stabilityai/stable-diffusion-2-base",
|
454 |
-
torch_dtype=torch.float16,
|
455 |
-
)
|
456 |
-
pipe.unet.set_default_attn_processor()
|
457 |
-
|
458 |
-
torch.cuda.empty_cache()
|
459 |
-
torch.cuda.reset_max_memory_allocated()
|
460 |
-
torch.cuda.reset_peak_memory_stats()
|
461 |
-
|
462 |
-
pipe.enable_model_cpu_offload()
|
463 |
-
pipe.set_progress_bar_config(disable=None)
|
464 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
465 |
-
outputs_offloaded = pipe(**inputs)
|
466 |
-
mem_bytes_offloaded = torch.cuda.max_memory_allocated()
|
467 |
-
|
468 |
-
assert np.abs(outputs.images - outputs_offloaded.images).max() < 1e-3
|
469 |
-
assert mem_bytes_offloaded < mem_bytes
|
470 |
-
assert mem_bytes_offloaded < 3 * 10**9
|
471 |
-
for module in pipe.text_encoder, pipe.unet, pipe.vae:
|
472 |
-
assert module.device == torch.device("cpu")
|
473 |
-
|
474 |
-
# With attention slicing
|
475 |
-
torch.cuda.empty_cache()
|
476 |
-
torch.cuda.reset_max_memory_allocated()
|
477 |
-
torch.cuda.reset_peak_memory_stats()
|
478 |
-
|
479 |
-
pipe.enable_attention_slicing()
|
480 |
-
_ = pipe(**inputs)
|
481 |
-
mem_bytes_slicing = torch.cuda.max_memory_allocated()
|
482 |
-
assert mem_bytes_slicing < mem_bytes_offloaded
|
483 |
-
|
484 |
-
|
485 |
-
@nightly
|
486 |
-
@require_torch_gpu
|
487 |
-
class StableDiffusion2PipelineNightlyTests(unittest.TestCase):
|
488 |
-
def tearDown(self):
|
489 |
-
super().tearDown()
|
490 |
-
gc.collect()
|
491 |
-
torch.cuda.empty_cache()
|
492 |
-
|
493 |
-
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
|
494 |
-
generator = torch.Generator(device=generator_device).manual_seed(seed)
|
495 |
-
latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64))
|
496 |
-
latents = torch.from_numpy(latents).to(device=device, dtype=dtype)
|
497 |
-
inputs = {
|
498 |
-
"prompt": "a photograph of an astronaut riding a horse",
|
499 |
-
"latents": latents,
|
500 |
-
"generator": generator,
|
501 |
-
"num_inference_steps": 50,
|
502 |
-
"guidance_scale": 7.5,
|
503 |
-
"output_type": "numpy",
|
504 |
-
}
|
505 |
-
return inputs
|
506 |
-
|
507 |
-
def test_stable_diffusion_2_0_default_ddim(self):
|
508 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base").to(torch_device)
|
509 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
510 |
-
|
511 |
-
inputs = self.get_inputs(torch_device)
|
512 |
-
image = sd_pipe(**inputs).images[0]
|
513 |
-
|
514 |
-
expected_image = load_numpy(
|
515 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
516 |
-
"/stable_diffusion_2_text2img/stable_diffusion_2_0_base_ddim.npy"
|
517 |
-
)
|
518 |
-
max_diff = np.abs(expected_image - image).max()
|
519 |
-
assert max_diff < 1e-3
|
520 |
-
|
521 |
-
def test_stable_diffusion_2_1_default_pndm(self):
|
522 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device)
|
523 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
524 |
-
|
525 |
-
inputs = self.get_inputs(torch_device)
|
526 |
-
image = sd_pipe(**inputs).images[0]
|
527 |
-
|
528 |
-
expected_image = load_numpy(
|
529 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
530 |
-
"/stable_diffusion_2_text2img/stable_diffusion_2_1_base_pndm.npy"
|
531 |
-
)
|
532 |
-
max_diff = np.abs(expected_image - image).max()
|
533 |
-
assert max_diff < 1e-3
|
534 |
-
|
535 |
-
def test_stable_diffusion_ddim(self):
|
536 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device)
|
537 |
-
sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config)
|
538 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
539 |
-
|
540 |
-
inputs = self.get_inputs(torch_device)
|
541 |
-
image = sd_pipe(**inputs).images[0]
|
542 |
-
|
543 |
-
expected_image = load_numpy(
|
544 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
545 |
-
"/stable_diffusion_2_text2img/stable_diffusion_2_1_base_ddim.npy"
|
546 |
-
)
|
547 |
-
max_diff = np.abs(expected_image - image).max()
|
548 |
-
assert max_diff < 1e-3
|
549 |
-
|
550 |
-
def test_stable_diffusion_lms(self):
|
551 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device)
|
552 |
-
sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
|
553 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
554 |
-
|
555 |
-
inputs = self.get_inputs(torch_device)
|
556 |
-
image = sd_pipe(**inputs).images[0]
|
557 |
-
|
558 |
-
expected_image = load_numpy(
|
559 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
560 |
-
"/stable_diffusion_2_text2img/stable_diffusion_2_1_base_lms.npy"
|
561 |
-
)
|
562 |
-
max_diff = np.abs(expected_image - image).max()
|
563 |
-
assert max_diff < 1e-3
|
564 |
-
|
565 |
-
def test_stable_diffusion_euler(self):
|
566 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device)
|
567 |
-
sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config)
|
568 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
569 |
-
|
570 |
-
inputs = self.get_inputs(torch_device)
|
571 |
-
image = sd_pipe(**inputs).images[0]
|
572 |
-
|
573 |
-
expected_image = load_numpy(
|
574 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
575 |
-
"/stable_diffusion_2_text2img/stable_diffusion_2_1_base_euler.npy"
|
576 |
-
)
|
577 |
-
max_diff = np.abs(expected_image - image).max()
|
578 |
-
assert max_diff < 1e-3
|
579 |
-
|
580 |
-
def test_stable_diffusion_dpm(self):
|
581 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device)
|
582 |
-
sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config)
|
583 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
584 |
-
|
585 |
-
inputs = self.get_inputs(torch_device)
|
586 |
-
inputs["num_inference_steps"] = 25
|
587 |
-
image = sd_pipe(**inputs).images[0]
|
588 |
-
|
589 |
-
expected_image = load_numpy(
|
590 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
591 |
-
"/stable_diffusion_2_text2img/stable_diffusion_2_1_base_dpm_multi.npy"
|
592 |
-
)
|
593 |
-
max_diff = np.abs(expected_image - image).max()
|
594 |
-
assert max_diff < 1e-3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
_base_ = './faster_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://detectron2/resnet50_caffe',
|
4 |
-
backbone=dict(
|
5 |
-
norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe'))
|
6 |
-
# use caffe img_norm
|
7 |
-
img_norm_cfg = dict(
|
8 |
-
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
|
9 |
-
train_pipeline = [
|
10 |
-
dict(type='LoadImageFromFile'),
|
11 |
-
dict(type='LoadAnnotations', with_bbox=True),
|
12 |
-
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
|
13 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
14 |
-
dict(type='Normalize', **img_norm_cfg),
|
15 |
-
dict(type='Pad', size_divisor=32),
|
16 |
-
dict(type='DefaultFormatBundle'),
|
17 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
|
18 |
-
]
|
19 |
-
test_pipeline = [
|
20 |
-
dict(type='LoadImageFromFile'),
|
21 |
-
dict(
|
22 |
-
type='MultiScaleFlipAug',
|
23 |
-
img_scale=(1333, 800),
|
24 |
-
flip=False,
|
25 |
-
transforms=[
|
26 |
-
dict(type='Resize', keep_ratio=True),
|
27 |
-
dict(type='RandomFlip'),
|
28 |
-
dict(type='Normalize', **img_norm_cfg),
|
29 |
-
dict(type='Pad', size_divisor=32),
|
30 |
-
dict(type='ImageToTensor', keys=['img']),
|
31 |
-
dict(type='Collect', keys=['img']),
|
32 |
-
])
|
33 |
-
]
|
34 |
-
data = dict(
|
35 |
-
train=dict(pipeline=train_pipeline),
|
36 |
-
val=dict(pipeline=test_pipeline),
|
37 |
-
test=dict(pipeline=test_pipeline))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
_base_ = './mask_rcnn_r101_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://detectron2/resnext101_32x8d',
|
4 |
-
backbone=dict(
|
5 |
-
type='ResNeXt',
|
6 |
-
depth=101,
|
7 |
-
groups=32,
|
8 |
-
base_width=8,
|
9 |
-
num_stages=4,
|
10 |
-
out_indices=(0, 1, 2, 3),
|
11 |
-
frozen_stages=1,
|
12 |
-
norm_cfg=dict(type='BN', requires_grad=False),
|
13 |
-
style='pytorch'))
|
14 |
-
|
15 |
-
dataset_type = 'CocoDataset'
|
16 |
-
data_root = 'data/coco/'
|
17 |
-
img_norm_cfg = dict(
|
18 |
-
mean=[103.530, 116.280, 123.675],
|
19 |
-
std=[57.375, 57.120, 58.395],
|
20 |
-
to_rgb=False)
|
21 |
-
train_pipeline = [
|
22 |
-
dict(type='LoadImageFromFile'),
|
23 |
-
dict(
|
24 |
-
type='LoadAnnotations',
|
25 |
-
with_bbox=True,
|
26 |
-
with_mask=True,
|
27 |
-
poly2mask=False),
|
28 |
-
dict(
|
29 |
-
type='Resize',
|
30 |
-
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
|
31 |
-
(1333, 768), (1333, 800)],
|
32 |
-
multiscale_mode='value',
|
33 |
-
keep_ratio=True),
|
34 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
35 |
-
dict(type='Normalize', **img_norm_cfg),
|
36 |
-
dict(type='Pad', size_divisor=32),
|
37 |
-
dict(type='DefaultFormatBundle'),
|
38 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
|
39 |
-
]
|
40 |
-
test_pipeline = [
|
41 |
-
dict(type='LoadImageFromFile'),
|
42 |
-
dict(
|
43 |
-
type='MultiScaleFlipAug',
|
44 |
-
img_scale=(1333, 800),
|
45 |
-
flip=False,
|
46 |
-
transforms=[
|
47 |
-
dict(type='Resize', keep_ratio=True),
|
48 |
-
dict(type='RandomFlip'),
|
49 |
-
dict(type='Normalize', **img_norm_cfg),
|
50 |
-
dict(type='Pad', size_divisor=32),
|
51 |
-
dict(type='ImageToTensor', keys=['img']),
|
52 |
-
dict(type='Collect', keys=['img']),
|
53 |
-
])
|
54 |
-
]
|
55 |
-
data = dict(
|
56 |
-
train=dict(pipeline=train_pipeline),
|
57 |
-
val=dict(pipeline=test_pipeline),
|
58 |
-
test=dict(pipeline=test_pipeline))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
import torch.nn as nn
|
2 |
-
from mmcv.cnn import ConvModule, Linear, constant_init, xavier_init
|
3 |
-
from mmcv.runner import auto_fp16
|
4 |
-
|
5 |
-
from mmdet.models.builder import HEADS
|
6 |
-
from .fcn_mask_head import FCNMaskHead
|
7 |
-
|
8 |
-
|
9 |
-
@HEADS.register_module()
|
10 |
-
class CoarseMaskHead(FCNMaskHead):
|
11 |
-
"""Coarse mask head used in PointRend.
|
12 |
-
|
13 |
-
Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample
|
14 |
-
the input feature map instead of upsample it.
|
15 |
-
|
16 |
-
Args:
|
17 |
-
num_convs (int): Number of conv layers in the head. Default: 0.
|
18 |
-
num_fcs (int): Number of fc layers in the head. Default: 2.
|
19 |
-
fc_out_channels (int): Number of output channels of fc layer.
|
20 |
-
Default: 1024.
|
21 |
-
downsample_factor (int): The factor that feature map is downsampled by.
|
22 |
-
Default: 2.
|
23 |
-
"""
|
24 |
-
|
25 |
-
def __init__(self,
|
26 |
-
num_convs=0,
|
27 |
-
num_fcs=2,
|
28 |
-
fc_out_channels=1024,
|
29 |
-
downsample_factor=2,
|
30 |
-
*arg,
|
31 |
-
**kwarg):
|
32 |
-
super(CoarseMaskHead, self).__init__(
|
33 |
-
*arg, num_convs=num_convs, upsample_cfg=dict(type=None), **kwarg)
|
34 |
-
self.num_fcs = num_fcs
|
35 |
-
assert self.num_fcs > 0
|
36 |
-
self.fc_out_channels = fc_out_channels
|
37 |
-
self.downsample_factor = downsample_factor
|
38 |
-
assert self.downsample_factor >= 1
|
39 |
-
# remove conv_logit
|
40 |
-
delattr(self, 'conv_logits')
|
41 |
-
|
42 |
-
if downsample_factor > 1:
|
43 |
-
downsample_in_channels = (
|
44 |
-
self.conv_out_channels
|
45 |
-
if self.num_convs > 0 else self.in_channels)
|
46 |
-
self.downsample_conv = ConvModule(
|
47 |
-
downsample_in_channels,
|
48 |
-
self.conv_out_channels,
|
49 |
-
kernel_size=downsample_factor,
|
50 |
-
stride=downsample_factor,
|
51 |
-
padding=0,
|
52 |
-
conv_cfg=self.conv_cfg,
|
53 |
-
norm_cfg=self.norm_cfg)
|
54 |
-
else:
|
55 |
-
self.downsample_conv = None
|
56 |
-
|
57 |
-
self.output_size = (self.roi_feat_size[0] // downsample_factor,
|
58 |
-
self.roi_feat_size[1] // downsample_factor)
|
59 |
-
self.output_area = self.output_size[0] * self.output_size[1]
|
60 |
-
|
61 |
-
last_layer_dim = self.conv_out_channels * self.output_area
|
62 |
-
|
63 |
-
self.fcs = nn.ModuleList()
|
64 |
-
for i in range(num_fcs):
|
65 |
-
fc_in_channels = (
|
66 |
-
last_layer_dim if i == 0 else self.fc_out_channels)
|
67 |
-
self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))
|
68 |
-
last_layer_dim = self.fc_out_channels
|
69 |
-
output_channels = self.num_classes * self.output_area
|
70 |
-
self.fc_logits = Linear(last_layer_dim, output_channels)
|
71 |
-
|
72 |
-
def init_weights(self):
|
73 |
-
for m in self.fcs.modules():
|
74 |
-
if isinstance(m, nn.Linear):
|
75 |
-
xavier_init(m)
|
76 |
-
constant_init(self.fc_logits, 0.001)
|
77 |
-
|
78 |
-
@auto_fp16()
|
79 |
-
def forward(self, x):
|
80 |
-
for conv in self.convs:
|
81 |
-
x = conv(x)
|
82 |
-
|
83 |
-
if self.downsample_conv is not None:
|
84 |
-
x = self.downsample_conv(x)
|
85 |
-
|
86 |
-
x = x.flatten(1)
|
87 |
-
for fc in self.fcs:
|
88 |
-
x = self.relu(fc(x))
|
89 |
-
mask_pred = self.fc_logits(x).view(
|
90 |
-
x.size(0), self.num_classes, *self.output_size)
|
91 |
-
return mask_pred
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/tools/dataset_converters/pascal_voc.py
DELETED
@@ -1,236 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import os.path as osp
|
3 |
-
import xml.etree.ElementTree as ET
|
4 |
-
|
5 |
-
import mmcv
|
6 |
-
import numpy as np
|
7 |
-
|
8 |
-
from mmdet.core import voc_classes
|
9 |
-
|
10 |
-
label_ids = {name: i for i, name in enumerate(voc_classes())}
|
11 |
-
|
12 |
-
|
13 |
-
def parse_xml(args):
|
14 |
-
xml_path, img_path = args
|
15 |
-
tree = ET.parse(xml_path)
|
16 |
-
root = tree.getroot()
|
17 |
-
size = root.find('size')
|
18 |
-
w = int(size.find('width').text)
|
19 |
-
h = int(size.find('height').text)
|
20 |
-
bboxes = []
|
21 |
-
labels = []
|
22 |
-
bboxes_ignore = []
|
23 |
-
labels_ignore = []
|
24 |
-
for obj in root.findall('object'):
|
25 |
-
name = obj.find('name').text
|
26 |
-
label = label_ids[name]
|
27 |
-
difficult = int(obj.find('difficult').text)
|
28 |
-
bnd_box = obj.find('bndbox')
|
29 |
-
bbox = [
|
30 |
-
int(bnd_box.find('xmin').text),
|
31 |
-
int(bnd_box.find('ymin').text),
|
32 |
-
int(bnd_box.find('xmax').text),
|
33 |
-
int(bnd_box.find('ymax').text)
|
34 |
-
]
|
35 |
-
if difficult:
|
36 |
-
bboxes_ignore.append(bbox)
|
37 |
-
labels_ignore.append(label)
|
38 |
-
else:
|
39 |
-
bboxes.append(bbox)
|
40 |
-
labels.append(label)
|
41 |
-
if not bboxes:
|
42 |
-
bboxes = np.zeros((0, 4))
|
43 |
-
labels = np.zeros((0, ))
|
44 |
-
else:
|
45 |
-
bboxes = np.array(bboxes, ndmin=2) - 1
|
46 |
-
labels = np.array(labels)
|
47 |
-
if not bboxes_ignore:
|
48 |
-
bboxes_ignore = np.zeros((0, 4))
|
49 |
-
labels_ignore = np.zeros((0, ))
|
50 |
-
else:
|
51 |
-
bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1
|
52 |
-
labels_ignore = np.array(labels_ignore)
|
53 |
-
annotation = {
|
54 |
-
'filename': img_path,
|
55 |
-
'width': w,
|
56 |
-
'height': h,
|
57 |
-
'ann': {
|
58 |
-
'bboxes': bboxes.astype(np.float32),
|
59 |
-
'labels': labels.astype(np.int64),
|
60 |
-
'bboxes_ignore': bboxes_ignore.astype(np.float32),
|
61 |
-
'labels_ignore': labels_ignore.astype(np.int64)
|
62 |
-
}
|
63 |
-
}
|
64 |
-
return annotation
|
65 |
-
|
66 |
-
|
67 |
-
def cvt_annotations(devkit_path, years, split, out_file):
|
68 |
-
if not isinstance(years, list):
|
69 |
-
years = [years]
|
70 |
-
annotations = []
|
71 |
-
for year in years:
|
72 |
-
filelist = osp.join(devkit_path,
|
73 |
-
f'VOC{year}/ImageSets/Main/{split}.txt')
|
74 |
-
if not osp.isfile(filelist):
|
75 |
-
print(f'filelist does not exist: {filelist}, '
|
76 |
-
f'skip voc{year} {split}')
|
77 |
-
return
|
78 |
-
img_names = mmcv.list_from_file(filelist)
|
79 |
-
xml_paths = [
|
80 |
-
osp.join(devkit_path, f'VOC{year}/Annotations/{img_name}.xml')
|
81 |
-
for img_name in img_names
|
82 |
-
]
|
83 |
-
img_paths = [
|
84 |
-
f'VOC{year}/JPEGImages/{img_name}.jpg' for img_name in img_names
|
85 |
-
]
|
86 |
-
part_annotations = mmcv.track_progress(parse_xml,
|
87 |
-
list(zip(xml_paths, img_paths)))
|
88 |
-
annotations.extend(part_annotations)
|
89 |
-
if out_file.endswith('json'):
|
90 |
-
annotations = cvt_to_coco_json(annotations)
|
91 |
-
mmcv.dump(annotations, out_file)
|
92 |
-
return annotations
|
93 |
-
|
94 |
-
|
95 |
-
def cvt_to_coco_json(annotations):
|
96 |
-
image_id = 0
|
97 |
-
annotation_id = 0
|
98 |
-
coco = dict()
|
99 |
-
coco['images'] = []
|
100 |
-
coco['type'] = 'instance'
|
101 |
-
coco['categories'] = []
|
102 |
-
coco['annotations'] = []
|
103 |
-
image_set = set()
|
104 |
-
|
105 |
-
def addAnnItem(annotation_id, image_id, category_id, bbox, difficult_flag):
|
106 |
-
annotation_item = dict()
|
107 |
-
annotation_item['segmentation'] = []
|
108 |
-
|
109 |
-
seg = []
|
110 |
-
# bbox[] is x1,y1,x2,y2
|
111 |
-
# left_top
|
112 |
-
seg.append(int(bbox[0]))
|
113 |
-
seg.append(int(bbox[1]))
|
114 |
-
# left_bottom
|
115 |
-
seg.append(int(bbox[0]))
|
116 |
-
seg.append(int(bbox[3]))
|
117 |
-
# right_bottom
|
118 |
-
seg.append(int(bbox[2]))
|
119 |
-
seg.append(int(bbox[3]))
|
120 |
-
# right_top
|
121 |
-
seg.append(int(bbox[2]))
|
122 |
-
seg.append(int(bbox[1]))
|
123 |
-
|
124 |
-
annotation_item['segmentation'].append(seg)
|
125 |
-
|
126 |
-
xywh = np.array(
|
127 |
-
[bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]])
|
128 |
-
annotation_item['area'] = int(xywh[2] * xywh[3])
|
129 |
-
if difficult_flag == 1:
|
130 |
-
annotation_item['ignore'] = 0
|
131 |
-
annotation_item['iscrowd'] = 1
|
132 |
-
else:
|
133 |
-
annotation_item['ignore'] = 0
|
134 |
-
annotation_item['iscrowd'] = 0
|
135 |
-
annotation_item['image_id'] = int(image_id)
|
136 |
-
annotation_item['bbox'] = xywh.astype(int).tolist()
|
137 |
-
annotation_item['category_id'] = int(category_id)
|
138 |
-
annotation_item['id'] = int(annotation_id)
|
139 |
-
coco['annotations'].append(annotation_item)
|
140 |
-
return annotation_id + 1
|
141 |
-
|
142 |
-
for category_id, name in enumerate(voc_classes()):
|
143 |
-
category_item = dict()
|
144 |
-
category_item['supercategory'] = str('none')
|
145 |
-
category_item['id'] = int(category_id)
|
146 |
-
category_item['name'] = str(name)
|
147 |
-
coco['categories'].append(category_item)
|
148 |
-
|
149 |
-
for ann_dict in annotations:
|
150 |
-
file_name = ann_dict['filename']
|
151 |
-
ann = ann_dict['ann']
|
152 |
-
assert file_name not in image_set
|
153 |
-
image_item = dict()
|
154 |
-
image_item['id'] = int(image_id)
|
155 |
-
image_item['file_name'] = str(file_name)
|
156 |
-
image_item['height'] = int(ann_dict['height'])
|
157 |
-
image_item['width'] = int(ann_dict['width'])
|
158 |
-
coco['images'].append(image_item)
|
159 |
-
image_set.add(file_name)
|
160 |
-
|
161 |
-
bboxes = ann['bboxes'][:, :4]
|
162 |
-
labels = ann['labels']
|
163 |
-
for bbox_id in range(len(bboxes)):
|
164 |
-
bbox = bboxes[bbox_id]
|
165 |
-
label = labels[bbox_id]
|
166 |
-
annotation_id = addAnnItem(
|
167 |
-
annotation_id, image_id, label, bbox, difficult_flag=0)
|
168 |
-
|
169 |
-
bboxes_ignore = ann['bboxes_ignore'][:, :4]
|
170 |
-
labels_ignore = ann['labels_ignore']
|
171 |
-
for bbox_id in range(len(bboxes_ignore)):
|
172 |
-
bbox = bboxes_ignore[bbox_id]
|
173 |
-
label = labels_ignore[bbox_id]
|
174 |
-
annotation_id = addAnnItem(
|
175 |
-
annotation_id, image_id, label, bbox, difficult_flag=1)
|
176 |
-
|
177 |
-
image_id += 1
|
178 |
-
|
179 |
-
return coco
|
180 |
-
|
181 |
-
|
182 |
-
def parse_args():
|
183 |
-
parser = argparse.ArgumentParser(
|
184 |
-
description='Convert PASCAL VOC annotations to mmdetection format')
|
185 |
-
parser.add_argument('devkit_path', help='pascal voc devkit path')
|
186 |
-
parser.add_argument('-o', '--out-dir', help='output path')
|
187 |
-
parser.add_argument(
|
188 |
-
'--out-format',
|
189 |
-
default='pkl',
|
190 |
-
choices=('pkl', 'coco'),
|
191 |
-
help='output format, "coco" indicates coco annotation format')
|
192 |
-
args = parser.parse_args()
|
193 |
-
return args
|
194 |
-
|
195 |
-
|
196 |
-
def main():
|
197 |
-
args = parse_args()
|
198 |
-
devkit_path = args.devkit_path
|
199 |
-
out_dir = args.out_dir if args.out_dir else devkit_path
|
200 |
-
mmcv.mkdir_or_exist(out_dir)
|
201 |
-
|
202 |
-
years = []
|
203 |
-
if osp.isdir(osp.join(devkit_path, 'VOC2007')):
|
204 |
-
years.append('2007')
|
205 |
-
if osp.isdir(osp.join(devkit_path, 'VOC2012')):
|
206 |
-
years.append('2012')
|
207 |
-
if '2007' in years and '2012' in years:
|
208 |
-
years.append(['2007', '2012'])
|
209 |
-
if not years:
|
210 |
-
raise IOError(f'The devkit path {devkit_path} contains neither '
|
211 |
-
'"VOC2007" nor "VOC2012" subfolder')
|
212 |
-
out_fmt = f'.{args.out_format}'
|
213 |
-
if args.out_format == 'coco':
|
214 |
-
out_fmt = '.json'
|
215 |
-
for year in years:
|
216 |
-
if year == '2007':
|
217 |
-
prefix = 'voc07'
|
218 |
-
elif year == '2012':
|
219 |
-
prefix = 'voc12'
|
220 |
-
elif year == ['2007', '2012']:
|
221 |
-
prefix = 'voc0712'
|
222 |
-
for split in ['train', 'val', 'trainval']:
|
223 |
-
dataset_name = prefix + '_' + split
|
224 |
-
print(f'processing {dataset_name} ...')
|
225 |
-
cvt_annotations(devkit_path, year, split,
|
226 |
-
osp.join(out_dir, dataset_name + out_fmt))
|
227 |
-
if not isinstance(year, list):
|
228 |
-
dataset_name = prefix + '_test'
|
229 |
-
print(f'processing {dataset_name} ...')
|
230 |
-
cvt_annotations(devkit_path, year, 'test',
|
231 |
-
osp.join(out_dir, dataset_name + out_fmt))
|
232 |
-
print('Done!')
|
233 |
-
|
234 |
-
|
235 |
-
if __name__ == '__main__':
|
236 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/apcnet/apcnet_r50-d8_512x512_160k_ade20k.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/ade20k.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
|
4 |
-
]
|
5 |
-
model = dict(
|
6 |
-
decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './nonlocal_r50-d8_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/ocrnet_hr18.py', '../_base_/datasets/cityscapes.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
|
4 |
-
]
|
|
|
|
|
|
|
|
|
|
spaces/AnimeStudio/anime-models/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Maximum Multiplier
|
3 |
-
emoji: 🛕🛕
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.15.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: true
|
10 |
-
duplicated_from: blueorigin6/stablediffusion-models
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/lr_updater.py
DELETED
@@ -1,670 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import numbers
|
3 |
-
from math import cos, pi
|
4 |
-
|
5 |
-
import annotator.uniformer.mmcv as mmcv
|
6 |
-
from .hook import HOOKS, Hook
|
7 |
-
|
8 |
-
|
9 |
-
class LrUpdaterHook(Hook):
|
10 |
-
"""LR Scheduler in MMCV.
|
11 |
-
|
12 |
-
Args:
|
13 |
-
by_epoch (bool): LR changes epoch by epoch
|
14 |
-
warmup (string): Type of warmup used. It can be None(use no warmup),
|
15 |
-
'constant', 'linear' or 'exp'
|
16 |
-
warmup_iters (int): The number of iterations or epochs that warmup
|
17 |
-
lasts
|
18 |
-
warmup_ratio (float): LR used at the beginning of warmup equals to
|
19 |
-
warmup_ratio * initial_lr
|
20 |
-
warmup_by_epoch (bool): When warmup_by_epoch == True, warmup_iters
|
21 |
-
means the number of epochs that warmup lasts, otherwise means the
|
22 |
-
number of iteration that warmup lasts
|
23 |
-
"""
|
24 |
-
|
25 |
-
def __init__(self,
|
26 |
-
by_epoch=True,
|
27 |
-
warmup=None,
|
28 |
-
warmup_iters=0,
|
29 |
-
warmup_ratio=0.1,
|
30 |
-
warmup_by_epoch=False):
|
31 |
-
# validate the "warmup" argument
|
32 |
-
if warmup is not None:
|
33 |
-
if warmup not in ['constant', 'linear', 'exp']:
|
34 |
-
raise ValueError(
|
35 |
-
f'"{warmup}" is not a supported type for warming up, valid'
|
36 |
-
' types are "constant" and "linear"')
|
37 |
-
if warmup is not None:
|
38 |
-
assert warmup_iters > 0, \
|
39 |
-
'"warmup_iters" must be a positive integer'
|
40 |
-
assert 0 < warmup_ratio <= 1.0, \
|
41 |
-
'"warmup_ratio" must be in range (0,1]'
|
42 |
-
|
43 |
-
self.by_epoch = by_epoch
|
44 |
-
self.warmup = warmup
|
45 |
-
self.warmup_iters = warmup_iters
|
46 |
-
self.warmup_ratio = warmup_ratio
|
47 |
-
self.warmup_by_epoch = warmup_by_epoch
|
48 |
-
|
49 |
-
if self.warmup_by_epoch:
|
50 |
-
self.warmup_epochs = self.warmup_iters
|
51 |
-
self.warmup_iters = None
|
52 |
-
else:
|
53 |
-
self.warmup_epochs = None
|
54 |
-
|
55 |
-
self.base_lr = [] # initial lr for all param groups
|
56 |
-
self.regular_lr = [] # expected lr if no warming up is performed
|
57 |
-
|
58 |
-
def _set_lr(self, runner, lr_groups):
|
59 |
-
if isinstance(runner.optimizer, dict):
|
60 |
-
for k, optim in runner.optimizer.items():
|
61 |
-
for param_group, lr in zip(optim.param_groups, lr_groups[k]):
|
62 |
-
param_group['lr'] = lr
|
63 |
-
else:
|
64 |
-
for param_group, lr in zip(runner.optimizer.param_groups,
|
65 |
-
lr_groups):
|
66 |
-
param_group['lr'] = lr
|
67 |
-
|
68 |
-
def get_lr(self, runner, base_lr):
|
69 |
-
raise NotImplementedError
|
70 |
-
|
71 |
-
def get_regular_lr(self, runner):
|
72 |
-
if isinstance(runner.optimizer, dict):
|
73 |
-
lr_groups = {}
|
74 |
-
for k in runner.optimizer.keys():
|
75 |
-
_lr_group = [
|
76 |
-
self.get_lr(runner, _base_lr)
|
77 |
-
for _base_lr in self.base_lr[k]
|
78 |
-
]
|
79 |
-
lr_groups.update({k: _lr_group})
|
80 |
-
|
81 |
-
return lr_groups
|
82 |
-
else:
|
83 |
-
return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr]
|
84 |
-
|
85 |
-
def get_warmup_lr(self, cur_iters):
|
86 |
-
|
87 |
-
def _get_warmup_lr(cur_iters, regular_lr):
|
88 |
-
if self.warmup == 'constant':
|
89 |
-
warmup_lr = [_lr * self.warmup_ratio for _lr in regular_lr]
|
90 |
-
elif self.warmup == 'linear':
|
91 |
-
k = (1 - cur_iters / self.warmup_iters) * (1 -
|
92 |
-
self.warmup_ratio)
|
93 |
-
warmup_lr = [_lr * (1 - k) for _lr in regular_lr]
|
94 |
-
elif self.warmup == 'exp':
|
95 |
-
k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters)
|
96 |
-
warmup_lr = [_lr * k for _lr in regular_lr]
|
97 |
-
return warmup_lr
|
98 |
-
|
99 |
-
if isinstance(self.regular_lr, dict):
|
100 |
-
lr_groups = {}
|
101 |
-
for key, regular_lr in self.regular_lr.items():
|
102 |
-
lr_groups[key] = _get_warmup_lr(cur_iters, regular_lr)
|
103 |
-
return lr_groups
|
104 |
-
else:
|
105 |
-
return _get_warmup_lr(cur_iters, self.regular_lr)
|
106 |
-
|
107 |
-
def before_run(self, runner):
|
108 |
-
# NOTE: when resuming from a checkpoint, if 'initial_lr' is not saved,
|
109 |
-
# it will be set according to the optimizer params
|
110 |
-
if isinstance(runner.optimizer, dict):
|
111 |
-
self.base_lr = {}
|
112 |
-
for k, optim in runner.optimizer.items():
|
113 |
-
for group in optim.param_groups:
|
114 |
-
group.setdefault('initial_lr', group['lr'])
|
115 |
-
_base_lr = [
|
116 |
-
group['initial_lr'] for group in optim.param_groups
|
117 |
-
]
|
118 |
-
self.base_lr.update({k: _base_lr})
|
119 |
-
else:
|
120 |
-
for group in runner.optimizer.param_groups:
|
121 |
-
group.setdefault('initial_lr', group['lr'])
|
122 |
-
self.base_lr = [
|
123 |
-
group['initial_lr'] for group in runner.optimizer.param_groups
|
124 |
-
]
|
125 |
-
|
126 |
-
def before_train_epoch(self, runner):
|
127 |
-
if self.warmup_iters is None:
|
128 |
-
epoch_len = len(runner.data_loader)
|
129 |
-
self.warmup_iters = self.warmup_epochs * epoch_len
|
130 |
-
|
131 |
-
if not self.by_epoch:
|
132 |
-
return
|
133 |
-
|
134 |
-
self.regular_lr = self.get_regular_lr(runner)
|
135 |
-
self._set_lr(runner, self.regular_lr)
|
136 |
-
|
137 |
-
def before_train_iter(self, runner):
|
138 |
-
cur_iter = runner.iter
|
139 |
-
if not self.by_epoch:
|
140 |
-
self.regular_lr = self.get_regular_lr(runner)
|
141 |
-
if self.warmup is None or cur_iter >= self.warmup_iters:
|
142 |
-
self._set_lr(runner, self.regular_lr)
|
143 |
-
else:
|
144 |
-
warmup_lr = self.get_warmup_lr(cur_iter)
|
145 |
-
self._set_lr(runner, warmup_lr)
|
146 |
-
elif self.by_epoch:
|
147 |
-
if self.warmup is None or cur_iter > self.warmup_iters:
|
148 |
-
return
|
149 |
-
elif cur_iter == self.warmup_iters:
|
150 |
-
self._set_lr(runner, self.regular_lr)
|
151 |
-
else:
|
152 |
-
warmup_lr = self.get_warmup_lr(cur_iter)
|
153 |
-
self._set_lr(runner, warmup_lr)
|
154 |
-
|
155 |
-
|
156 |
-
@HOOKS.register_module()
|
157 |
-
class FixedLrUpdaterHook(LrUpdaterHook):
|
158 |
-
|
159 |
-
def __init__(self, **kwargs):
|
160 |
-
super(FixedLrUpdaterHook, self).__init__(**kwargs)
|
161 |
-
|
162 |
-
def get_lr(self, runner, base_lr):
|
163 |
-
return base_lr
|
164 |
-
|
165 |
-
|
166 |
-
@HOOKS.register_module()
|
167 |
-
class StepLrUpdaterHook(LrUpdaterHook):
|
168 |
-
"""Step LR scheduler with min_lr clipping.
|
169 |
-
|
170 |
-
Args:
|
171 |
-
step (int | list[int]): Step to decay the LR. If an int value is given,
|
172 |
-
regard it as the decay interval. If a list is given, decay LR at
|
173 |
-
these steps.
|
174 |
-
gamma (float, optional): Decay LR ratio. Default: 0.1.
|
175 |
-
min_lr (float, optional): Minimum LR value to keep. If LR after decay
|
176 |
-
is lower than `min_lr`, it will be clipped to this value. If None
|
177 |
-
is given, we don't perform lr clipping. Default: None.
|
178 |
-
"""
|
179 |
-
|
180 |
-
def __init__(self, step, gamma=0.1, min_lr=None, **kwargs):
|
181 |
-
if isinstance(step, list):
|
182 |
-
assert mmcv.is_list_of(step, int)
|
183 |
-
assert all([s > 0 for s in step])
|
184 |
-
elif isinstance(step, int):
|
185 |
-
assert step > 0
|
186 |
-
else:
|
187 |
-
raise TypeError('"step" must be a list or integer')
|
188 |
-
self.step = step
|
189 |
-
self.gamma = gamma
|
190 |
-
self.min_lr = min_lr
|
191 |
-
super(StepLrUpdaterHook, self).__init__(**kwargs)
|
192 |
-
|
193 |
-
def get_lr(self, runner, base_lr):
|
194 |
-
progress = runner.epoch if self.by_epoch else runner.iter
|
195 |
-
|
196 |
-
# calculate exponential term
|
197 |
-
if isinstance(self.step, int):
|
198 |
-
exp = progress // self.step
|
199 |
-
else:
|
200 |
-
exp = len(self.step)
|
201 |
-
for i, s in enumerate(self.step):
|
202 |
-
if progress < s:
|
203 |
-
exp = i
|
204 |
-
break
|
205 |
-
|
206 |
-
lr = base_lr * (self.gamma**exp)
|
207 |
-
if self.min_lr is not None:
|
208 |
-
# clip to a minimum value
|
209 |
-
lr = max(lr, self.min_lr)
|
210 |
-
return lr
|
211 |
-
|
212 |
-
|
213 |
-
@HOOKS.register_module()
|
214 |
-
class ExpLrUpdaterHook(LrUpdaterHook):
|
215 |
-
|
216 |
-
def __init__(self, gamma, **kwargs):
|
217 |
-
self.gamma = gamma
|
218 |
-
super(ExpLrUpdaterHook, self).__init__(**kwargs)
|
219 |
-
|
220 |
-
def get_lr(self, runner, base_lr):
|
221 |
-
progress = runner.epoch if self.by_epoch else runner.iter
|
222 |
-
return base_lr * self.gamma**progress
|
223 |
-
|
224 |
-
|
225 |
-
@HOOKS.register_module()
|
226 |
-
class PolyLrUpdaterHook(LrUpdaterHook):
|
227 |
-
|
228 |
-
def __init__(self, power=1., min_lr=0., **kwargs):
|
229 |
-
self.power = power
|
230 |
-
self.min_lr = min_lr
|
231 |
-
super(PolyLrUpdaterHook, self).__init__(**kwargs)
|
232 |
-
|
233 |
-
def get_lr(self, runner, base_lr):
|
234 |
-
if self.by_epoch:
|
235 |
-
progress = runner.epoch
|
236 |
-
max_progress = runner.max_epochs
|
237 |
-
else:
|
238 |
-
progress = runner.iter
|
239 |
-
max_progress = runner.max_iters
|
240 |
-
coeff = (1 - progress / max_progress)**self.power
|
241 |
-
return (base_lr - self.min_lr) * coeff + self.min_lr
|
242 |
-
|
243 |
-
|
244 |
-
@HOOKS.register_module()
|
245 |
-
class InvLrUpdaterHook(LrUpdaterHook):
|
246 |
-
|
247 |
-
def __init__(self, gamma, power=1., **kwargs):
|
248 |
-
self.gamma = gamma
|
249 |
-
self.power = power
|
250 |
-
super(InvLrUpdaterHook, self).__init__(**kwargs)
|
251 |
-
|
252 |
-
def get_lr(self, runner, base_lr):
|
253 |
-
progress = runner.epoch if self.by_epoch else runner.iter
|
254 |
-
return base_lr * (1 + self.gamma * progress)**(-self.power)
|
255 |
-
|
256 |
-
|
257 |
-
@HOOKS.register_module()
|
258 |
-
class CosineAnnealingLrUpdaterHook(LrUpdaterHook):
|
259 |
-
|
260 |
-
def __init__(self, min_lr=None, min_lr_ratio=None, **kwargs):
|
261 |
-
assert (min_lr is None) ^ (min_lr_ratio is None)
|
262 |
-
self.min_lr = min_lr
|
263 |
-
self.min_lr_ratio = min_lr_ratio
|
264 |
-
super(CosineAnnealingLrUpdaterHook, self).__init__(**kwargs)
|
265 |
-
|
266 |
-
def get_lr(self, runner, base_lr):
|
267 |
-
if self.by_epoch:
|
268 |
-
progress = runner.epoch
|
269 |
-
max_progress = runner.max_epochs
|
270 |
-
else:
|
271 |
-
progress = runner.iter
|
272 |
-
max_progress = runner.max_iters
|
273 |
-
|
274 |
-
if self.min_lr_ratio is not None:
|
275 |
-
target_lr = base_lr * self.min_lr_ratio
|
276 |
-
else:
|
277 |
-
target_lr = self.min_lr
|
278 |
-
return annealing_cos(base_lr, target_lr, progress / max_progress)
|
279 |
-
|
280 |
-
|
281 |
-
@HOOKS.register_module()
|
282 |
-
class FlatCosineAnnealingLrUpdaterHook(LrUpdaterHook):
|
283 |
-
"""Flat + Cosine lr schedule.
|
284 |
-
|
285 |
-
Modified from https://github.com/fastai/fastai/blob/master/fastai/callback/schedule.py#L128 # noqa: E501
|
286 |
-
|
287 |
-
Args:
|
288 |
-
start_percent (float): When to start annealing the learning rate
|
289 |
-
after the percentage of the total training steps.
|
290 |
-
The value should be in range [0, 1).
|
291 |
-
Default: 0.75
|
292 |
-
min_lr (float, optional): The minimum lr. Default: None.
|
293 |
-
min_lr_ratio (float, optional): The ratio of minimum lr to the base lr.
|
294 |
-
Either `min_lr` or `min_lr_ratio` should be specified.
|
295 |
-
Default: None.
|
296 |
-
"""
|
297 |
-
|
298 |
-
def __init__(self,
|
299 |
-
start_percent=0.75,
|
300 |
-
min_lr=None,
|
301 |
-
min_lr_ratio=None,
|
302 |
-
**kwargs):
|
303 |
-
assert (min_lr is None) ^ (min_lr_ratio is None)
|
304 |
-
if start_percent < 0 or start_percent > 1 or not isinstance(
|
305 |
-
start_percent, float):
|
306 |
-
raise ValueError(
|
307 |
-
'expected float between 0 and 1 start_percent, but '
|
308 |
-
f'got {start_percent}')
|
309 |
-
self.start_percent = start_percent
|
310 |
-
self.min_lr = min_lr
|
311 |
-
self.min_lr_ratio = min_lr_ratio
|
312 |
-
super(FlatCosineAnnealingLrUpdaterHook, self).__init__(**kwargs)
|
313 |
-
|
314 |
-
def get_lr(self, runner, base_lr):
|
315 |
-
if self.by_epoch:
|
316 |
-
start = round(runner.max_epochs * self.start_percent)
|
317 |
-
progress = runner.epoch - start
|
318 |
-
max_progress = runner.max_epochs - start
|
319 |
-
else:
|
320 |
-
start = round(runner.max_iters * self.start_percent)
|
321 |
-
progress = runner.iter - start
|
322 |
-
max_progress = runner.max_iters - start
|
323 |
-
|
324 |
-
if self.min_lr_ratio is not None:
|
325 |
-
target_lr = base_lr * self.min_lr_ratio
|
326 |
-
else:
|
327 |
-
target_lr = self.min_lr
|
328 |
-
|
329 |
-
if progress < 0:
|
330 |
-
return base_lr
|
331 |
-
else:
|
332 |
-
return annealing_cos(base_lr, target_lr, progress / max_progress)
|
333 |
-
|
334 |
-
|
335 |
-
@HOOKS.register_module()
|
336 |
-
class CosineRestartLrUpdaterHook(LrUpdaterHook):
|
337 |
-
"""Cosine annealing with restarts learning rate scheme.
|
338 |
-
|
339 |
-
Args:
|
340 |
-
periods (list[int]): Periods for each cosine anneling cycle.
|
341 |
-
restart_weights (list[float], optional): Restart weights at each
|
342 |
-
restart iteration. Default: [1].
|
343 |
-
min_lr (float, optional): The minimum lr. Default: None.
|
344 |
-
min_lr_ratio (float, optional): The ratio of minimum lr to the base lr.
|
345 |
-
Either `min_lr` or `min_lr_ratio` should be specified.
|
346 |
-
Default: None.
|
347 |
-
"""
|
348 |
-
|
349 |
-
def __init__(self,
|
350 |
-
periods,
|
351 |
-
restart_weights=[1],
|
352 |
-
min_lr=None,
|
353 |
-
min_lr_ratio=None,
|
354 |
-
**kwargs):
|
355 |
-
assert (min_lr is None) ^ (min_lr_ratio is None)
|
356 |
-
self.periods = periods
|
357 |
-
self.min_lr = min_lr
|
358 |
-
self.min_lr_ratio = min_lr_ratio
|
359 |
-
self.restart_weights = restart_weights
|
360 |
-
assert (len(self.periods) == len(self.restart_weights)
|
361 |
-
), 'periods and restart_weights should have the same length.'
|
362 |
-
super(CosineRestartLrUpdaterHook, self).__init__(**kwargs)
|
363 |
-
|
364 |
-
self.cumulative_periods = [
|
365 |
-
sum(self.periods[0:i + 1]) for i in range(0, len(self.periods))
|
366 |
-
]
|
367 |
-
|
368 |
-
def get_lr(self, runner, base_lr):
|
369 |
-
if self.by_epoch:
|
370 |
-
progress = runner.epoch
|
371 |
-
else:
|
372 |
-
progress = runner.iter
|
373 |
-
|
374 |
-
if self.min_lr_ratio is not None:
|
375 |
-
target_lr = base_lr * self.min_lr_ratio
|
376 |
-
else:
|
377 |
-
target_lr = self.min_lr
|
378 |
-
|
379 |
-
idx = get_position_from_periods(progress, self.cumulative_periods)
|
380 |
-
current_weight = self.restart_weights[idx]
|
381 |
-
nearest_restart = 0 if idx == 0 else self.cumulative_periods[idx - 1]
|
382 |
-
current_periods = self.periods[idx]
|
383 |
-
|
384 |
-
alpha = min((progress - nearest_restart) / current_periods, 1)
|
385 |
-
return annealing_cos(base_lr, target_lr, alpha, current_weight)
|
386 |
-
|
387 |
-
|
388 |
-
def get_position_from_periods(iteration, cumulative_periods):
|
389 |
-
"""Get the position from a period list.
|
390 |
-
|
391 |
-
It will return the index of the right-closest number in the period list.
|
392 |
-
For example, the cumulative_periods = [100, 200, 300, 400],
|
393 |
-
if iteration == 50, return 0;
|
394 |
-
if iteration == 210, return 2;
|
395 |
-
if iteration == 300, return 3.
|
396 |
-
|
397 |
-
Args:
|
398 |
-
iteration (int): Current iteration.
|
399 |
-
cumulative_periods (list[int]): Cumulative period list.
|
400 |
-
|
401 |
-
Returns:
|
402 |
-
int: The position of the right-closest number in the period list.
|
403 |
-
"""
|
404 |
-
for i, period in enumerate(cumulative_periods):
|
405 |
-
if iteration < period:
|
406 |
-
return i
|
407 |
-
raise ValueError(f'Current iteration {iteration} exceeds '
|
408 |
-
f'cumulative_periods {cumulative_periods}')
|
409 |
-
|
410 |
-
|
411 |
-
@HOOKS.register_module()
|
412 |
-
class CyclicLrUpdaterHook(LrUpdaterHook):
|
413 |
-
"""Cyclic LR Scheduler.
|
414 |
-
|
415 |
-
Implement the cyclical learning rate policy (CLR) described in
|
416 |
-
https://arxiv.org/pdf/1506.01186.pdf
|
417 |
-
|
418 |
-
Different from the original paper, we use cosine annealing rather than
|
419 |
-
triangular policy inside a cycle. This improves the performance in the
|
420 |
-
3D detection area.
|
421 |
-
|
422 |
-
Args:
|
423 |
-
by_epoch (bool): Whether to update LR by epoch.
|
424 |
-
target_ratio (tuple[float]): Relative ratio of the highest LR and the
|
425 |
-
lowest LR to the initial LR.
|
426 |
-
cyclic_times (int): Number of cycles during training
|
427 |
-
step_ratio_up (float): The ratio of the increasing process of LR in
|
428 |
-
the total cycle.
|
429 |
-
anneal_strategy (str): {'cos', 'linear'}
|
430 |
-
Specifies the annealing strategy: 'cos' for cosine annealing,
|
431 |
-
'linear' for linear annealing. Default: 'cos'.
|
432 |
-
"""
|
433 |
-
|
434 |
-
def __init__(self,
|
435 |
-
by_epoch=False,
|
436 |
-
target_ratio=(10, 1e-4),
|
437 |
-
cyclic_times=1,
|
438 |
-
step_ratio_up=0.4,
|
439 |
-
anneal_strategy='cos',
|
440 |
-
**kwargs):
|
441 |
-
if isinstance(target_ratio, float):
|
442 |
-
target_ratio = (target_ratio, target_ratio / 1e5)
|
443 |
-
elif isinstance(target_ratio, tuple):
|
444 |
-
target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \
|
445 |
-
if len(target_ratio) == 1 else target_ratio
|
446 |
-
else:
|
447 |
-
raise ValueError('target_ratio should be either float '
|
448 |
-
f'or tuple, got {type(target_ratio)}')
|
449 |
-
|
450 |
-
assert len(target_ratio) == 2, \
|
451 |
-
'"target_ratio" must be list or tuple of two floats'
|
452 |
-
assert 0 <= step_ratio_up < 1.0, \
|
453 |
-
'"step_ratio_up" must be in range [0,1)'
|
454 |
-
|
455 |
-
self.target_ratio = target_ratio
|
456 |
-
self.cyclic_times = cyclic_times
|
457 |
-
self.step_ratio_up = step_ratio_up
|
458 |
-
self.lr_phases = [] # init lr_phases
|
459 |
-
# validate anneal_strategy
|
460 |
-
if anneal_strategy not in ['cos', 'linear']:
|
461 |
-
raise ValueError('anneal_strategy must be one of "cos" or '
|
462 |
-
f'"linear", instead got {anneal_strategy}')
|
463 |
-
elif anneal_strategy == 'cos':
|
464 |
-
self.anneal_func = annealing_cos
|
465 |
-
elif anneal_strategy == 'linear':
|
466 |
-
self.anneal_func = annealing_linear
|
467 |
-
|
468 |
-
assert not by_epoch, \
|
469 |
-
'currently only support "by_epoch" = False'
|
470 |
-
super(CyclicLrUpdaterHook, self).__init__(by_epoch, **kwargs)
|
471 |
-
|
472 |
-
def before_run(self, runner):
|
473 |
-
super(CyclicLrUpdaterHook, self).before_run(runner)
|
474 |
-
# initiate lr_phases
|
475 |
-
# total lr_phases are separated as up and down
|
476 |
-
max_iter_per_phase = runner.max_iters // self.cyclic_times
|
477 |
-
iter_up_phase = int(self.step_ratio_up * max_iter_per_phase)
|
478 |
-
self.lr_phases.append(
|
479 |
-
[0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]])
|
480 |
-
self.lr_phases.append([
|
481 |
-
iter_up_phase, max_iter_per_phase, max_iter_per_phase,
|
482 |
-
self.target_ratio[0], self.target_ratio[1]
|
483 |
-
])
|
484 |
-
|
485 |
-
def get_lr(self, runner, base_lr):
|
486 |
-
curr_iter = runner.iter
|
487 |
-
for (start_iter, end_iter, max_iter_per_phase, start_ratio,
|
488 |
-
end_ratio) in self.lr_phases:
|
489 |
-
curr_iter %= max_iter_per_phase
|
490 |
-
if start_iter <= curr_iter < end_iter:
|
491 |
-
progress = curr_iter - start_iter
|
492 |
-
return self.anneal_func(base_lr * start_ratio,
|
493 |
-
base_lr * end_ratio,
|
494 |
-
progress / (end_iter - start_iter))
|
495 |
-
|
496 |
-
|
497 |
-
@HOOKS.register_module()
|
498 |
-
class OneCycleLrUpdaterHook(LrUpdaterHook):
|
499 |
-
"""One Cycle LR Scheduler.
|
500 |
-
|
501 |
-
The 1cycle learning rate policy changes the learning rate after every
|
502 |
-
batch. The one cycle learning rate policy is described in
|
503 |
-
https://arxiv.org/pdf/1708.07120.pdf
|
504 |
-
|
505 |
-
Args:
|
506 |
-
max_lr (float or list): Upper learning rate boundaries in the cycle
|
507 |
-
for each parameter group.
|
508 |
-
total_steps (int, optional): The total number of steps in the cycle.
|
509 |
-
Note that if a value is not provided here, it will be the max_iter
|
510 |
-
of runner. Default: None.
|
511 |
-
pct_start (float): The percentage of the cycle (in number of steps)
|
512 |
-
spent increasing the learning rate.
|
513 |
-
Default: 0.3
|
514 |
-
anneal_strategy (str): {'cos', 'linear'}
|
515 |
-
Specifies the annealing strategy: 'cos' for cosine annealing,
|
516 |
-
'linear' for linear annealing.
|
517 |
-
Default: 'cos'
|
518 |
-
div_factor (float): Determines the initial learning rate via
|
519 |
-
initial_lr = max_lr/div_factor
|
520 |
-
Default: 25
|
521 |
-
final_div_factor (float): Determines the minimum learning rate via
|
522 |
-
min_lr = initial_lr/final_div_factor
|
523 |
-
Default: 1e4
|
524 |
-
three_phase (bool): If three_phase is True, use a third phase of the
|
525 |
-
schedule to annihilate the learning rate according to
|
526 |
-
final_div_factor instead of modifying the second phase (the first
|
527 |
-
two phases will be symmetrical about the step indicated by
|
528 |
-
pct_start).
|
529 |
-
Default: False
|
530 |
-
"""
|
531 |
-
|
532 |
-
def __init__(self,
|
533 |
-
max_lr,
|
534 |
-
total_steps=None,
|
535 |
-
pct_start=0.3,
|
536 |
-
anneal_strategy='cos',
|
537 |
-
div_factor=25,
|
538 |
-
final_div_factor=1e4,
|
539 |
-
three_phase=False,
|
540 |
-
**kwargs):
|
541 |
-
# validate by_epoch, currently only support by_epoch = False
|
542 |
-
if 'by_epoch' not in kwargs:
|
543 |
-
kwargs['by_epoch'] = False
|
544 |
-
else:
|
545 |
-
assert not kwargs['by_epoch'], \
|
546 |
-
'currently only support "by_epoch" = False'
|
547 |
-
if not isinstance(max_lr, (numbers.Number, list, dict)):
|
548 |
-
raise ValueError('the type of max_lr must be the one of list or '
|
549 |
-
f'dict, but got {type(max_lr)}')
|
550 |
-
self._max_lr = max_lr
|
551 |
-
if total_steps is not None:
|
552 |
-
if not isinstance(total_steps, int):
|
553 |
-
raise ValueError('the type of total_steps must be int, but'
|
554 |
-
f'got {type(total_steps)}')
|
555 |
-
self.total_steps = total_steps
|
556 |
-
# validate pct_start
|
557 |
-
if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float):
|
558 |
-
raise ValueError('expected float between 0 and 1 pct_start, but '
|
559 |
-
f'got {pct_start}')
|
560 |
-
self.pct_start = pct_start
|
561 |
-
# validate anneal_strategy
|
562 |
-
if anneal_strategy not in ['cos', 'linear']:
|
563 |
-
raise ValueError('anneal_strategy must be one of "cos" or '
|
564 |
-
f'"linear", instead got {anneal_strategy}')
|
565 |
-
elif anneal_strategy == 'cos':
|
566 |
-
self.anneal_func = annealing_cos
|
567 |
-
elif anneal_strategy == 'linear':
|
568 |
-
self.anneal_func = annealing_linear
|
569 |
-
self.div_factor = div_factor
|
570 |
-
self.final_div_factor = final_div_factor
|
571 |
-
self.three_phase = three_phase
|
572 |
-
self.lr_phases = [] # init lr_phases
|
573 |
-
super(OneCycleLrUpdaterHook, self).__init__(**kwargs)
|
574 |
-
|
575 |
-
def before_run(self, runner):
|
576 |
-
if hasattr(self, 'total_steps'):
|
577 |
-
total_steps = self.total_steps
|
578 |
-
else:
|
579 |
-
total_steps = runner.max_iters
|
580 |
-
if total_steps < runner.max_iters:
|
581 |
-
raise ValueError(
|
582 |
-
'The total steps must be greater than or equal to max '
|
583 |
-
f'iterations {runner.max_iters} of runner, but total steps '
|
584 |
-
f'is {total_steps}.')
|
585 |
-
|
586 |
-
if isinstance(runner.optimizer, dict):
|
587 |
-
self.base_lr = {}
|
588 |
-
for k, optim in runner.optimizer.items():
|
589 |
-
_max_lr = format_param(k, optim, self._max_lr)
|
590 |
-
self.base_lr[k] = [lr / self.div_factor for lr in _max_lr]
|
591 |
-
for group, lr in zip(optim.param_groups, self.base_lr[k]):
|
592 |
-
group.setdefault('initial_lr', lr)
|
593 |
-
else:
|
594 |
-
k = type(runner.optimizer).__name__
|
595 |
-
_max_lr = format_param(k, runner.optimizer, self._max_lr)
|
596 |
-
self.base_lr = [lr / self.div_factor for lr in _max_lr]
|
597 |
-
for group, lr in zip(runner.optimizer.param_groups, self.base_lr):
|
598 |
-
group.setdefault('initial_lr', lr)
|
599 |
-
|
600 |
-
if self.three_phase:
|
601 |
-
self.lr_phases.append(
|
602 |
-
[float(self.pct_start * total_steps) - 1, 1, self.div_factor])
|
603 |
-
self.lr_phases.append([
|
604 |
-
float(2 * self.pct_start * total_steps) - 2, self.div_factor, 1
|
605 |
-
])
|
606 |
-
self.lr_phases.append(
|
607 |
-
[total_steps - 1, 1, 1 / self.final_div_factor])
|
608 |
-
else:
|
609 |
-
self.lr_phases.append(
|
610 |
-
[float(self.pct_start * total_steps) - 1, 1, self.div_factor])
|
611 |
-
self.lr_phases.append(
|
612 |
-
[total_steps - 1, self.div_factor, 1 / self.final_div_factor])
|
613 |
-
|
614 |
-
def get_lr(self, runner, base_lr):
|
615 |
-
curr_iter = runner.iter
|
616 |
-
start_iter = 0
|
617 |
-
for i, (end_iter, start_lr, end_lr) in enumerate(self.lr_phases):
|
618 |
-
if curr_iter <= end_iter:
|
619 |
-
pct = (curr_iter - start_iter) / (end_iter - start_iter)
|
620 |
-
lr = self.anneal_func(base_lr * start_lr, base_lr * end_lr,
|
621 |
-
pct)
|
622 |
-
break
|
623 |
-
start_iter = end_iter
|
624 |
-
return lr
|
625 |
-
|
626 |
-
|
627 |
-
def annealing_cos(start, end, factor, weight=1):
|
628 |
-
"""Calculate annealing cos learning rate.
|
629 |
-
|
630 |
-
Cosine anneal from `weight * start + (1 - weight) * end` to `end` as
|
631 |
-
percentage goes from 0.0 to 1.0.
|
632 |
-
|
633 |
-
Args:
|
634 |
-
start (float): The starting learning rate of the cosine annealing.
|
635 |
-
end (float): The ending learing rate of the cosine annealing.
|
636 |
-
factor (float): The coefficient of `pi` when calculating the current
|
637 |
-
percentage. Range from 0.0 to 1.0.
|
638 |
-
weight (float, optional): The combination factor of `start` and `end`
|
639 |
-
when calculating the actual starting learning rate. Default to 1.
|
640 |
-
"""
|
641 |
-
cos_out = cos(pi * factor) + 1
|
642 |
-
return end + 0.5 * weight * (start - end) * cos_out
|
643 |
-
|
644 |
-
|
645 |
-
def annealing_linear(start, end, factor):
|
646 |
-
"""Calculate annealing linear learning rate.
|
647 |
-
|
648 |
-
Linear anneal from `start` to `end` as percentage goes from 0.0 to 1.0.
|
649 |
-
|
650 |
-
Args:
|
651 |
-
start (float): The starting learning rate of the linear annealing.
|
652 |
-
end (float): The ending learing rate of the linear annealing.
|
653 |
-
factor (float): The coefficient of `pi` when calculating the current
|
654 |
-
percentage. Range from 0.0 to 1.0.
|
655 |
-
"""
|
656 |
-
return start + (end - start) * factor
|
657 |
-
|
658 |
-
|
659 |
-
def format_param(name, optim, param):
|
660 |
-
if isinstance(param, numbers.Number):
|
661 |
-
return [param] * len(optim.param_groups)
|
662 |
-
elif isinstance(param, (list, tuple)): # multi param groups
|
663 |
-
if len(param) != len(optim.param_groups):
|
664 |
-
raise ValueError(f'expected {len(optim.param_groups)} '
|
665 |
-
f'values for {name}, got {len(param)}')
|
666 |
-
return param
|
667 |
-
else: # multi optimizers
|
668 |
-
if name not in param:
|
669 |
-
raise KeyError(f'{name} is not found in {param.keys()}')
|
670 |
-
return param[name]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/log_buffer.py
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
from collections import OrderedDict
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
|
7 |
-
class LogBuffer:
|
8 |
-
|
9 |
-
def __init__(self):
|
10 |
-
self.val_history = OrderedDict()
|
11 |
-
self.n_history = OrderedDict()
|
12 |
-
self.output = OrderedDict()
|
13 |
-
self.ready = False
|
14 |
-
|
15 |
-
def clear(self):
|
16 |
-
self.val_history.clear()
|
17 |
-
self.n_history.clear()
|
18 |
-
self.clear_output()
|
19 |
-
|
20 |
-
def clear_output(self):
|
21 |
-
self.output.clear()
|
22 |
-
self.ready = False
|
23 |
-
|
24 |
-
def update(self, vars, count=1):
|
25 |
-
assert isinstance(vars, dict)
|
26 |
-
for key, var in vars.items():
|
27 |
-
if key not in self.val_history:
|
28 |
-
self.val_history[key] = []
|
29 |
-
self.n_history[key] = []
|
30 |
-
self.val_history[key].append(var)
|
31 |
-
self.n_history[key].append(count)
|
32 |
-
|
33 |
-
def average(self, n=0):
|
34 |
-
"""Average latest n values or all values."""
|
35 |
-
assert n >= 0
|
36 |
-
for key in self.val_history:
|
37 |
-
values = np.array(self.val_history[key][-n:])
|
38 |
-
nums = np.array(self.n_history[key][-n:])
|
39 |
-
avg = np.sum(values * nums) / np.sum(nums)
|
40 |
-
self.output[key] = avg
|
41 |
-
self.ready = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/__init__.py
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
from .ade import ADE20KDataset
|
2 |
-
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
|
3 |
-
from .chase_db1 import ChaseDB1Dataset
|
4 |
-
from .cityscapes import CityscapesDataset
|
5 |
-
from .custom import CustomDataset
|
6 |
-
from .dataset_wrappers import ConcatDataset, RepeatDataset
|
7 |
-
from .drive import DRIVEDataset
|
8 |
-
from .hrf import HRFDataset
|
9 |
-
from .pascal_context import PascalContextDataset, PascalContextDataset59
|
10 |
-
from .stare import STAREDataset
|
11 |
-
from .voc import PascalVOCDataset
|
12 |
-
|
13 |
-
__all__ = [
|
14 |
-
'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
|
15 |
-
'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset',
|
16 |
-
'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset',
|
17 |
-
'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset',
|
18 |
-
'STAREDataset'
|
19 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/install/__init__.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
"""For modules related to installing packages.
|
2 |
-
"""
|
|
|
|
|
|
spaces/Audio-AGI/AudioSep/pipeline.py
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
import yaml
|
2 |
-
from typing import Dict, List
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
import numpy as np
|
6 |
-
import librosa
|
7 |
-
from scipy.io.wavfile import write
|
8 |
-
from utils import ignore_warnings; ignore_warnings()
|
9 |
-
from utils import parse_yaml, load_ss_model
|
10 |
-
from models.clap_encoder import CLAP_Encoder
|
11 |
-
|
12 |
-
|
13 |
-
def build_audiosep(config_yaml, checkpoint_path, device):
|
14 |
-
configs = parse_yaml(config_yaml)
|
15 |
-
|
16 |
-
query_encoder = CLAP_Encoder().eval()
|
17 |
-
model = load_ss_model(
|
18 |
-
configs=configs,
|
19 |
-
checkpoint_path=checkpoint_path,
|
20 |
-
query_encoder=query_encoder
|
21 |
-
).eval().to(device)
|
22 |
-
|
23 |
-
print(f'Load AudioSep model from [{checkpoint_path}]')
|
24 |
-
return model
|
25 |
-
|
26 |
-
|
27 |
-
def inference(model, audio_file, text, output_file, device='cuda'):
|
28 |
-
print(f'Separate audio from [{audio_file}] with textual query [{text}]')
|
29 |
-
mixture, fs = librosa.load(audio_file, sr=32000, mono=True)
|
30 |
-
with torch.no_grad():
|
31 |
-
text = [text]
|
32 |
-
|
33 |
-
conditions = model.query_encoder.get_query_embed(
|
34 |
-
modality='text',
|
35 |
-
text=text,
|
36 |
-
device=device
|
37 |
-
)
|
38 |
-
|
39 |
-
input_dict = {
|
40 |
-
"mixture": torch.Tensor(mixture)[None, None, :].to(device),
|
41 |
-
"condition": conditions,
|
42 |
-
}
|
43 |
-
|
44 |
-
sep_segment = model.ss_model.chunk_inference(input_dict)
|
45 |
-
|
46 |
-
write(output_file, 32000, np.round(sep_segment * 32767).astype(np.int16))
|
47 |
-
print(f'Write separated audio to [{output_file}]')
|
48 |
-
|
49 |
-
|
50 |
-
if __name__ == '__main__':
|
51 |
-
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
52 |
-
model = build_audiosep(
|
53 |
-
config_yaml='config/audiosep_base.yaml',
|
54 |
-
checkpoint_path='checkpoint/step=3920000.ckpt',
|
55 |
-
device=device)
|
56 |
-
|
57 |
-
audio_file = '/mnt/bn/data-xubo/project/AudioShop/YT_audios/Y3VHpLxtd498.wav'
|
58 |
-
text = 'pigeons are cooing in the background'
|
59 |
-
output_file='separated_audio.wav'
|
60 |
-
|
61 |
-
inference(model, audio_file, text, output_file, device)
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BMukhtar/facemaskDetector/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: FacemaskDetector
|
3 |
-
emoji: 🦀
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.19.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/infer/modules/ipex/__init__.py.py
DELETED
@@ -1,165 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import sys
|
3 |
-
import contextlib
|
4 |
-
import torch
|
5 |
-
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
|
6 |
-
from .hijacks import ipex_hijacks
|
7 |
-
from .attention import attention_init
|
8 |
-
|
9 |
-
# pylint: disable=protected-access, missing-function-docstring, line-too-long
|
10 |
-
|
11 |
-
def ipex_init(): # pylint: disable=too-many-statements
|
12 |
-
try:
|
13 |
-
#Replace cuda with xpu:
|
14 |
-
torch.cuda.current_device = torch.xpu.current_device
|
15 |
-
torch.cuda.current_stream = torch.xpu.current_stream
|
16 |
-
torch.cuda.device = torch.xpu.device
|
17 |
-
torch.cuda.device_count = torch.xpu.device_count
|
18 |
-
torch.cuda.device_of = torch.xpu.device_of
|
19 |
-
torch.cuda.getDeviceIdListForCard = torch.xpu.getDeviceIdListForCard
|
20 |
-
torch.cuda.get_device_name = torch.xpu.get_device_name
|
21 |
-
torch.cuda.get_device_properties = torch.xpu.get_device_properties
|
22 |
-
torch.cuda.init = torch.xpu.init
|
23 |
-
torch.cuda.is_available = torch.xpu.is_available
|
24 |
-
torch.cuda.is_initialized = torch.xpu.is_initialized
|
25 |
-
torch.cuda.is_current_stream_capturing = lambda: False
|
26 |
-
torch.cuda.set_device = torch.xpu.set_device
|
27 |
-
torch.cuda.stream = torch.xpu.stream
|
28 |
-
torch.cuda.synchronize = torch.xpu.synchronize
|
29 |
-
torch.cuda.Event = torch.xpu.Event
|
30 |
-
torch.cuda.Stream = torch.xpu.Stream
|
31 |
-
torch.cuda.FloatTensor = torch.xpu.FloatTensor
|
32 |
-
torch.Tensor.cuda = torch.Tensor.xpu
|
33 |
-
torch.Tensor.is_cuda = torch.Tensor.is_xpu
|
34 |
-
torch.cuda._initialization_lock = torch.xpu.lazy_init._initialization_lock
|
35 |
-
torch.cuda._initialized = torch.xpu.lazy_init._initialized
|
36 |
-
torch.cuda._lazy_seed_tracker = torch.xpu.lazy_init._lazy_seed_tracker
|
37 |
-
torch.cuda._queued_calls = torch.xpu.lazy_init._queued_calls
|
38 |
-
torch.cuda._tls = torch.xpu.lazy_init._tls
|
39 |
-
torch.cuda.threading = torch.xpu.lazy_init.threading
|
40 |
-
torch.cuda.traceback = torch.xpu.lazy_init.traceback
|
41 |
-
torch.cuda.Optional = torch.xpu.Optional
|
42 |
-
torch.cuda.__cached__ = torch.xpu.__cached__
|
43 |
-
torch.cuda.__loader__ = torch.xpu.__loader__
|
44 |
-
torch.cuda.ComplexFloatStorage = torch.xpu.ComplexFloatStorage
|
45 |
-
torch.cuda.Tuple = torch.xpu.Tuple
|
46 |
-
torch.cuda.streams = torch.xpu.streams
|
47 |
-
torch.cuda._lazy_new = torch.xpu._lazy_new
|
48 |
-
torch.cuda.FloatStorage = torch.xpu.FloatStorage
|
49 |
-
torch.cuda.Any = torch.xpu.Any
|
50 |
-
torch.cuda.__doc__ = torch.xpu.__doc__
|
51 |
-
torch.cuda.default_generators = torch.xpu.default_generators
|
52 |
-
torch.cuda.HalfTensor = torch.xpu.HalfTensor
|
53 |
-
torch.cuda._get_device_index = torch.xpu._get_device_index
|
54 |
-
torch.cuda.__path__ = torch.xpu.__path__
|
55 |
-
torch.cuda.Device = torch.xpu.Device
|
56 |
-
torch.cuda.IntTensor = torch.xpu.IntTensor
|
57 |
-
torch.cuda.ByteStorage = torch.xpu.ByteStorage
|
58 |
-
torch.cuda.set_stream = torch.xpu.set_stream
|
59 |
-
torch.cuda.BoolStorage = torch.xpu.BoolStorage
|
60 |
-
torch.cuda.os = torch.xpu.os
|
61 |
-
torch.cuda.torch = torch.xpu.torch
|
62 |
-
torch.cuda.BFloat16Storage = torch.xpu.BFloat16Storage
|
63 |
-
torch.cuda.Union = torch.xpu.Union
|
64 |
-
torch.cuda.DoubleTensor = torch.xpu.DoubleTensor
|
65 |
-
torch.cuda.ShortTensor = torch.xpu.ShortTensor
|
66 |
-
torch.cuda.LongTensor = torch.xpu.LongTensor
|
67 |
-
torch.cuda.IntStorage = torch.xpu.IntStorage
|
68 |
-
torch.cuda.LongStorage = torch.xpu.LongStorage
|
69 |
-
torch.cuda.__annotations__ = torch.xpu.__annotations__
|
70 |
-
torch.cuda.__package__ = torch.xpu.__package__
|
71 |
-
torch.cuda.__builtins__ = torch.xpu.__builtins__
|
72 |
-
torch.cuda.CharTensor = torch.xpu.CharTensor
|
73 |
-
torch.cuda.List = torch.xpu.List
|
74 |
-
torch.cuda._lazy_init = torch.xpu._lazy_init
|
75 |
-
torch.cuda.BFloat16Tensor = torch.xpu.BFloat16Tensor
|
76 |
-
torch.cuda.DoubleStorage = torch.xpu.DoubleStorage
|
77 |
-
torch.cuda.ByteTensor = torch.xpu.ByteTensor
|
78 |
-
torch.cuda.StreamContext = torch.xpu.StreamContext
|
79 |
-
torch.cuda.ComplexDoubleStorage = torch.xpu.ComplexDoubleStorage
|
80 |
-
torch.cuda.ShortStorage = torch.xpu.ShortStorage
|
81 |
-
torch.cuda._lazy_call = torch.xpu._lazy_call
|
82 |
-
torch.cuda.HalfStorage = torch.xpu.HalfStorage
|
83 |
-
torch.cuda.random = torch.xpu.random
|
84 |
-
torch.cuda._device = torch.xpu._device
|
85 |
-
torch.cuda.classproperty = torch.xpu.classproperty
|
86 |
-
torch.cuda.__name__ = torch.xpu.__name__
|
87 |
-
torch.cuda._device_t = torch.xpu._device_t
|
88 |
-
torch.cuda.warnings = torch.xpu.warnings
|
89 |
-
torch.cuda.__spec__ = torch.xpu.__spec__
|
90 |
-
torch.cuda.BoolTensor = torch.xpu.BoolTensor
|
91 |
-
torch.cuda.CharStorage = torch.xpu.CharStorage
|
92 |
-
torch.cuda.__file__ = torch.xpu.__file__
|
93 |
-
torch.cuda._is_in_bad_fork = torch.xpu.lazy_init._is_in_bad_fork
|
94 |
-
#torch.cuda.is_current_stream_capturing = torch.xpu.is_current_stream_capturing
|
95 |
-
|
96 |
-
#Memory:
|
97 |
-
torch.cuda.memory = torch.xpu.memory
|
98 |
-
if 'linux' in sys.platform and "WSL2" in os.popen("uname -a").read():
|
99 |
-
torch.xpu.empty_cache = lambda: None
|
100 |
-
torch.cuda.empty_cache = torch.xpu.empty_cache
|
101 |
-
torch.cuda.memory_stats = torch.xpu.memory_stats
|
102 |
-
torch.cuda.memory_summary = torch.xpu.memory_summary
|
103 |
-
torch.cuda.memory_snapshot = torch.xpu.memory_snapshot
|
104 |
-
torch.cuda.memory_allocated = torch.xpu.memory_allocated
|
105 |
-
torch.cuda.max_memory_allocated = torch.xpu.max_memory_allocated
|
106 |
-
torch.cuda.memory_reserved = torch.xpu.memory_reserved
|
107 |
-
torch.cuda.memory_cached = torch.xpu.memory_reserved
|
108 |
-
torch.cuda.max_memory_reserved = torch.xpu.max_memory_reserved
|
109 |
-
torch.cuda.max_memory_cached = torch.xpu.max_memory_reserved
|
110 |
-
torch.cuda.reset_peak_memory_stats = torch.xpu.reset_peak_memory_stats
|
111 |
-
torch.cuda.reset_max_memory_cached = torch.xpu.reset_peak_memory_stats
|
112 |
-
torch.cuda.reset_max_memory_allocated = torch.xpu.reset_peak_memory_stats
|
113 |
-
torch.cuda.memory_stats_as_nested_dict = torch.xpu.memory_stats_as_nested_dict
|
114 |
-
torch.cuda.reset_accumulated_memory_stats = torch.xpu.reset_accumulated_memory_stats
|
115 |
-
|
116 |
-
#RNG:
|
117 |
-
torch.cuda.get_rng_state = torch.xpu.get_rng_state
|
118 |
-
torch.cuda.get_rng_state_all = torch.xpu.get_rng_state_all
|
119 |
-
torch.cuda.set_rng_state = torch.xpu.set_rng_state
|
120 |
-
torch.cuda.set_rng_state_all = torch.xpu.set_rng_state_all
|
121 |
-
torch.cuda.manual_seed = torch.xpu.manual_seed
|
122 |
-
torch.cuda.manual_seed_all = torch.xpu.manual_seed_all
|
123 |
-
torch.cuda.seed = torch.xpu.seed
|
124 |
-
torch.cuda.seed_all = torch.xpu.seed_all
|
125 |
-
torch.cuda.initial_seed = torch.xpu.initial_seed
|
126 |
-
|
127 |
-
#AMP:
|
128 |
-
torch.cuda.amp = torch.xpu.amp
|
129 |
-
if not hasattr(torch.cuda.amp, "common"):
|
130 |
-
torch.cuda.amp.common = contextlib.nullcontext()
|
131 |
-
torch.cuda.amp.common.amp_definitely_not_available = lambda: False
|
132 |
-
try:
|
133 |
-
torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler
|
134 |
-
except Exception: # pylint: disable=broad-exception-caught
|
135 |
-
try:
|
136 |
-
from .gradscaler import gradscaler_init # pylint: disable=import-outside-toplevel, import-error
|
137 |
-
gradscaler_init()
|
138 |
-
torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler
|
139 |
-
except Exception: # pylint: disable=broad-exception-caught
|
140 |
-
torch.cuda.amp.GradScaler = ipex.cpu.autocast._grad_scaler.GradScaler
|
141 |
-
|
142 |
-
#C
|
143 |
-
torch._C._cuda_getCurrentRawStream = ipex._C._getCurrentStream
|
144 |
-
ipex._C._DeviceProperties.major = 2023
|
145 |
-
ipex._C._DeviceProperties.minor = 2
|
146 |
-
|
147 |
-
#Fix functions with ipex:
|
148 |
-
torch.cuda.mem_get_info = lambda device=None: [(torch.xpu.get_device_properties(device).total_memory - torch.xpu.memory_allocated(device)), torch.xpu.get_device_properties(device).total_memory]
|
149 |
-
torch._utils._get_available_device_type = lambda: "xpu"
|
150 |
-
torch.has_cuda = True
|
151 |
-
torch.cuda.has_half = True
|
152 |
-
torch.cuda.is_bf16_supported = lambda *args, **kwargs: True
|
153 |
-
torch.cuda.is_fp16_supported = lambda *args, **kwargs: True
|
154 |
-
torch.version.cuda = "11.7"
|
155 |
-
torch.cuda.get_device_capability = lambda *args, **kwargs: [11,7]
|
156 |
-
torch.cuda.get_device_properties.major = 11
|
157 |
-
torch.cuda.get_device_properties.minor = 7
|
158 |
-
torch.cuda.ipc_collect = lambda *args, **kwargs: None
|
159 |
-
torch.cuda.utilization = lambda *args, **kwargs: 0
|
160 |
-
|
161 |
-
ipex_hijacks()
|
162 |
-
attention_init()
|
163 |
-
except Exception as e:
|
164 |
-
return False, e
|
165 |
-
return True, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Apk Mod 8 Piscina De Bolas 5.11.2.md
DELETED
@@ -1,151 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Mod APK 8 Ball Pool 5.11.2: Cómo descargar y jugar el mejor juego de billar en Android</h1>
|
3 |
-
<p>¿Te gusta jugar juegos de billar en tu smartphone? Si es así, entonces debes haber oído hablar de <strong>8 Ball Pool</strong>, el juego de billar más popular y adictivo en Android. Pero ¿sabías que hay una manera de hacer este juego aún más divertido y emocionante? Sí, estamos hablando de <strong>mod apk 8 bola piscina 5.11.2</strong>, la última versión de la aplicación modificada que le da monedas ilimitadas, dinero en efectivo, señales, y más. </p>
|
4 |
-
<h2>apk mod 8 piscina de bolas 5.11.2</h2><br /><p><b><b>DOWNLOAD</b> ⚡ <a href="https://bltlly.com/2v6MZ1">https://bltlly.com/2v6MZ1</a></b></p><br /><br />
|
5 |
-
<p>En este artículo, le diremos todo lo que necesita saber sobre apk mod 8 piscina de bolas 5.11.2, incluyendo lo que es, cómo descargar e instalar, y cómo jugarlo. También lo compararemos con la versión original del juego y destacaremos sus pros y sus contras. Así que, sin más preámbulos, ¡vamos a bucear! </p>
|
6 |
-
<h2>¿Qué es la piscina de bolas 8? </h2>
|
7 |
-
<p>Antes de hablar de apk mod 8 piscina de bolas 5.11.2, primero vamos a entender lo que es <strong>8 Ball Pool</strong> y por qué es tan popular entre millones de jugadores en todo el mundo. </p>
|
8 |
-
<h3>Las características y el juego de 8 Ball Pool</h3>
|
9 |
-
<p>8 Ball Pool es un juego de billar realista e inmersivo que te permite jugar online con tus amigos u otros jugadores de todo el mundo. Puedes elegir entre diferentes modos de juego, tales como partidos <strong>1-on-1</strong>, <strong>torneos</strong>, <strong>, <strong>juegos de 9 bolas</strong>, o <strong>modo de práctica</strong>. También puedes personalizar tu entrada, tabla, avatar, frases de chat y más. </p>
|
10 |
-
<p></p>
|
11 |
-
<p>El juego de 8 Ball Pool es simple e intuitivo. Solo tienes que deslizar el dedo en la pantalla para apuntar la señal, ajustar la potencia y soltar para golpear la pelota. También puede utilizar la función de giro para agregar un poco de curva o ángulo a sus disparos. El objetivo es embolsarte todas tus bolas antes que tu oponente, siguiendo las reglas estándar del pool. </p>
|
12 |
-
<h3>Los beneficios y desventajas de jugar 8 Ball Pool</h3>
|
13 |
-
|
14 |
-
<ul>
|
15 |
-
<li><strong>Mejorar tus habilidades</strong>: Jugar juegos de billar puede ayudarte a mejorar tu concentración, precisión, estrategia y conocimientos de física. </li>
|
16 |
-
<li><strong>Hacer nuevos amigos</strong>: Jugar online con otros jugadores puede ayudarte a socializar, chatear y hacer nuevos amigos de diferentes países y culturas. </li>
|
17 |
-
<li><strong>Ganar recompensas</strong>: Jugar partidos y torneos puede ayudarte a ganar monedas, dinero, señales, trofeos y otras recompensas que puedes usar para mejorar tu juego. </li>
|
18 |
-
</ul>
|
19 |
-
<p>Sin embargo, jugar 8 Ball Pool también puede tener algunos inconvenientes, como:</p>
|
20 |
-
<ul>
|
21 |
-
<li><strong>Gastar demasiado dinero</strong>: Jugar a 8 Ball Pool puede ser tentador para gastar dinero real para comprar monedas, efectivo, tacos u otros artículos que pueden darte una ventaja sobre tus oponentes. Sin embargo, esto puede ser arriesgado y derrochador, ya que puede no obtener el valor que espera o perder su cuenta debido a la piratería o la prohibición. </li>
|
22 |
-
<li><strong>Volverse adicto</strong>: Jugar 8 Ball Pool puede ser muy adictivo, ya que es posible que desee jugar más y más partidos para ganar más recompensas, clasificar o vencer a sus rivales. Sin embargo, esto puede ser perjudicial para su salud, productividad y relaciones, ya que puede descuidar sus otras responsabilidades y aficiones. </li>
|
23 |
-
<li><strong>Frente a la competencia desleal</strong>: Jugar al billar de 8 bolas puede ser frustrante e injusto, ya que puedes enfrentarte a oponentes que usan trucos, hacks, mods o bots para ganar juegos fácil e injustamente. Esto puede arruinar tu experiencia de juego y hacerte perder tu motivación y confianza. </li>
|
24 |
-
</ul>
|
25 |
-
<p>Entonces, ¿cómo se puede disfrutar de 8 Ball Pool sin hacer frente a estos inconvenientes? Bueno, una posible solución es utilizar mod apk 8 ball pool 5.11.2. </p>
|
26 |
-
<h2>¿Qué es la piscina de bolas mod apk 8 5.11.2? </h2>
|
27 |
-
|
28 |
-
<h3>Las diferencias entre el original y la versión modificada de 8 Ball Pool</h3>
|
29 |
-
<p>Las principales diferencias entre el original y la versión modificada de 8 Ball Pool son:</p>
|
30 |
-
<tabla>
|
31 |
-
<tr>
|
32 |
-
<th>Versión original</th>
|
33 |
-
<th>Versión modificada</th>
|
34 |
-
</tr>
|
35 |
-
<tr>
|
36 |
-
<td>Monedas y efectivo limitados</td>
|
37 |
-
<td>Monedas y efectivo ilimitados</td>
|
38 |
-
</tr>
|
39 |
-
<tr>
|
40 |
-
<td>Claves y tablas básicas</td>
|
41 |
-
<td>Claves y tablas premium</td>
|
42 |
-
</tr>
|
43 |
-
<tr>
|
44 |
-
<td>Juego normal y dificultad</td>
|
45 |
-
<td>Fácil juego y dificultad</td>
|
46 |
-
</tr>
|
47 |
-
<tr>
|
48 |
-
<td>No hay características adicionales u opciones</td>
|
49 |
-
<td>Muchas características y opciones adicionales</td>
|
50 |
-
</tr>
|
51 |
-
<tr>
|
52 |
-
<td>Seguro y protegido</td>
|
53 |
-
<td>Arriesgado e inseguro</td>
|
54 |
-
</tr>
|
55 |
-
</tabla>
|
56 |
-
<h3>Las ventajas y desventajas de usar la piscina de bolas mod apk 8 5.11.2</h3>
|
57 |
-
<p>El uso de la piscina de bolas mod apk 8 5.11.2 puede tener algunas ventajas y desventajas, como:</p>
|
58 |
-
<ul>
|
59 |
-
<li><strong>Ventajas</strong>: <ul>
|
60 |
-
<li>Puedes disfrutar de monedas ilimitadas y dinero en efectivo que puedes usar para comprar lo que quieras en el juego. </li>
|
61 |
-
<li> Puede usar señales y tablas premium que tienen mejores estadísticas y diseños que las básicas. </li>
|
62 |
-
<li>Puedes jugar juegos más fáciles y ganar más partidos sin mucho esfuerzo o habilidad. </li>
|
63 |
-
<li> Puede acceder a muchas características y opciones adicionales que pueden mejorar su experiencia de juego y diversión. </li>
|
64 |
-
</ul>
|
65 |
-
</li>
|
66 |
-
<li><strong>Desventajas</strong>: <ul>
|
67 |
-
<li>Usted puede enfrentar problemas legales o sanciones por violar los términos y condiciones del juego original. </li>
|
68 |
-
<li>Puede perder su cuenta o progreso si la aplicación modded es detectada o prohibida por los desarrolladores o autoridades del juego. </li>
|
69 |
-
<li>Puede exponer su dispositivo o datos a malware o virus que pueden dañar su seguridad o privacidad. </li>
|
70 |
-
<li>Usted puede perder el desafío y la emoción de jugar juegos justos y competitivos con jugadores reales. </li>
|
71 |
-
</ul>
|
72 |
-
</li>
|
73 |
-
</ul>
|
74 |
-
|
75 |
-
<h2>¿Cómo descargar e instalar mod apk 8 ball pool 5.11.2? </h2>
|
76 |
-
<p>Si has decidido probar mod apk 8 ball pool 5.11.2, debes seguir algunos pasos para descargarlo e instalarlo en tu dispositivo Android. Estos son los pasos:</p>
|
77 |
-
<h3>Los pasos para descargar mod apk 8 bola piscina 5.11.2 de una fuente confiable</h3>
|
78 |
-
<ol>
|
79 |
-
<li>En primer lugar, es necesario encontrar una fuente confiable que ofrece apk mod 8 bola piscina 5.11.2 para su descarga gratuita. Usted puede buscar en Google u otros motores de búsqueda de palabras clave como "apk mod 8 bola piscina 5.11.2 descarga" o "apk mod 8 bola piscina 5.11.2 descarga gratuita". Sin embargo, debes tener cuidado y evitar cualquier enlace sospechoso o falso que pueda contener malware o virus. </li>
|
80 |
-
<li>En segundo lugar, es necesario elegir una fuente confiable y de buena reputación que tiene comentarios positivos y calificaciones de otros usuarios. También puede comprobar los comentarios y comentarios de otros usuarios que han descargado y utilizado apk mod 8 ball pool 5.11.2 de esa fuente. También puede verificar la autenticidad y seguridad de la fuente utilizando herramientas como VirusTotal o Malwarebytes.</li>
|
81 |
-
<li>En tercer lugar, es necesario hacer clic en el enlace de descarga o botón proporcionado por la fuente y esperar a que el archivo apk mod para ser descargado en su dispositivo. Es posible que necesite permitir algunos permisos o habilitar algunos ajustes para permitir que el proceso de descarga se realice sin problemas. </li>
|
82 |
-
</ol>
|
83 |
-
<h3>Las precauciones y consejos para evitar el malware y los virus al descargar apk mod 8 piscina de bolas 5.11.2</h3>
|
84 |
-
<p>Descargar mod apk 8 ball pool 5.11.2 puede ser arriesgado y peligroso, ya que puede exponer su dispositivo o datos a malware o virus que pueden dañar su seguridad o privacidad. Por lo tanto, es necesario tomar algunas precauciones y consejos para evitar el malware y los virus al descargar apk mod 8 piscina de bolas 5.11.2, tales como:</p>
|
85 |
-
<ul>
|
86 |
-
|
87 |
-
<li><strong>Utilice un antivirus</strong>: El uso de un antivirus puede ayudarle a escanear y detectar cualquier malware o virus que puedan estar ocultos en el archivo apk mod o en el sitio web de origen. También puede ayudarlo a eliminar o poner en cuarentena cualquier archivo o programa malicioso que pueda infectar su dispositivo o datos. </li>
|
88 |
-
<li><strong>Utilice una copia de seguridad</strong>: El uso de una copia de seguridad puede ayudarle a guardar y restaurar su dispositivo o datos en caso de cualquier daño o pérdida causada por malware o virus. Puede utilizar un servicio en la nube, un dispositivo de almacenamiento externo o una herramienta de recuperación para realizar copias de seguridad de su dispositivo o datos con regularidad. </li>
|
89 |
-
</ul>
|
90 |
-
<h3>Las instrucciones para instalar y ejecutar mod apk 8 ball pool 5.11.2 en su dispositivo Android</h3>
|
91 |
-
<ol>
|
92 |
-
<li>Primero, necesita desinstalar la versión original de 8 Ball Pool desde su dispositivo si ya lo tiene instalado. Esto se debe a que la versión modificada puede no funcionar correctamente o causar conflictos con la versión original. </li>
|
93 |
-
<li>En segundo lugar, es necesario habilitar la instalación de aplicaciones de fuentes desconocidas en el dispositivo. Esto se debe a mod apk 8 piscina de bolas 5.11.2 no está disponible en el oficial de Google Play Store y es considerado como una fuente desconocida por su dispositivo. Para habilitar esta opción, debe ir a Configuración > Seguridad > Fuentes desconocidas y activarla. </li>
|
94 |
-
<li>En tercer lugar, es necesario localizar el archivo apk mod que ha descargado en su dispositivo y toque en él para iniciar el proceso de instalación. Es posible que necesite seguir algunas instrucciones o aceptar algunos términos y condiciones para completar el proceso de instalación. </li>
|
95 |
-
<li>Cuarto, es necesario iniciar la aplicación modded desde el cajón de la aplicación o la pantalla de inicio y disfrutar de jugar apk mod 8 piscina de bolas 5.11.2 con recursos y características ilimitadas. </li>
|
96 |
-
</ol>
|
97 |
-
<h2>¿Cómo se juega apk mod 8 bola piscina 5.11.2? </h2>
|
98 |
-
|
99 |
-
<h3>Las reglas básicas y los controles de la piscina de bola mod apk 8 5.11.2</h3>
|
100 |
-
<p>Las reglas y controles básicos de mod apk 8 ball pool 5.11.2 son:</p>
|
101 |
-
<ul>
|
102 |
-
<li>Puedes jugar online con tus amigos u otros jugadores de todo el mundo en diferentes modos de juego, como partidos <strong>1-on-1</strong>, <strong>torneos</strong>, <strong>, <strong>juegos de 9 bolas</strong>, o <strong>modo de práctica</strong>. </li>
|
103 |
-
<li>Puede deslizar el dedo sobre la pantalla para apuntar su señal, ajustar la potencia y soltar para golpear la pelota. También puede utilizar la función de giro para agregar alguna curva o ángulo a sus disparos. </li>
|
104 |
-
<li>Puedes meter todas tus bolas antes que tu oponente, siguiendo las reglas estándar del pool. </li>
|
105 |
-
<li> Puede personalizar su señal, tabla, avatar, frases de chat, y más con monedas ilimitadas y dinero en efectivo que tiene en la aplicación modded. </li>
|
106 |
-
<li>Puedes usar señales y tablas premium que tienen mejores estadísticas y diseños que las básicas. También puedes desbloquear y usar pistas y tablas exclusivas que no están disponibles en el juego original. </li>
|
107 |
-
<li>Puedes jugar juegos más fáciles y ganar más partidos sin mucho esfuerzo o habilidad. También puedes usar trucos, hacks, mods o bots para ganar juegos fácil e injustamente. </li>
|
108 |
-
<li>Puede acceder a muchas características y opciones adicionales que pueden mejorar su experiencia de juego y diversión. Por ejemplo, puedes usar la función auto-win para ganar cualquier juego al instante, la función long-line para extender tu línea de puntería, la función anti-van para evitar la detección o la prohibición, y más. </li>
|
109 |
-
</ul>
|
110 |
-
<h3>Los modos y desafíos de mod apk 8 bola piscina 5.11.2</h3>
|
111 |
-
<p>Los modos y desafíos de mod apk 8 ball pool 5.11.2 son:</p>
|
112 |
-
<ul>
|
113 |
-
<li><strong>1-on-1 matches</strong>: Puedes jugar contra otro jugador en un solo juego de 8 bolas. Puedes elegir la cantidad de apuesta, la mesa y las reglas. El ganador se lleva todas las monedas y trofeos. </li>
|
114 |
-
|
115 |
-
<li><strong>9-ball games</strong>: Puedes jugar contra otro jugador en un solo juego de 9 bolas. Tienes que meter las bolas en orden numérico, del 1 al 9. El primer jugador en meter la bola 9 gana el juego. </li>
|
116 |
-
<li><strong>Modo de práctica</strong>: Puedes jugar solo en un juego de pool de 8 bolas o pool de 9 bolas. Puedes practicar tus habilidades, probar diferentes pistas y mesas, y divertirte sin ninguna presión o competencia. </li>
|
117 |
-
</ul>
|
118 |
-
<h3>Los consejos y trucos para ganar más juegos y monedas en apk mod 8 bola piscina 5.11.2</h3>
|
119 |
-
<p>Aunque mod apk 8 bola piscina 5.11.2 le da recursos ilimitados y características que pueden hacer que su experiencia de juego más divertido y emocionante, todavía necesita algunos consejos y trucos para ganar más juegos y monedas en apk mod 8 bola piscina 5.11.2, tales como:</p>
|
120 |
-
<ul>
|
121 |
-
<li><strong>Elige tu señal sabiamente</strong>: Diferentes señales tienen diferentes estadísticas, como poder, objetivo, efectos y tiempo. Debe elegir un taco que se adapte a su estilo y preferencia. También puede actualizar su taco con monedas o dinero en efectivo para mejorar sus estadísticas. </li>
|
122 |
-
<li><strong>Usa la función de giro</strong>: La función de giro puede ayudarte a añadir alguna curva o ángulo a tus disparos, lo que puede ayudarte a evitar obstáculos, crear mejores posiciones o meter bolas complicadas. Puedes usar la función de giro pulsando el icono de bola blanca en la esquina superior derecha de la pantalla y arrastrándolo para ajustar la dirección e intensidad del giro. </li>
|
123 |
-
<li><strong>Planifica tus tiros</strong>: Antes de golpear la pelota, debes planificar tus tiros y pensar en las consecuencias. Debe considerar factores como la posición de la bola, el ángulo de referencia, la potencia, el giro, el diseño de la mesa y las reglas. También debe intentar predecir dónde terminarán la bola blanca y las bolas de objeto después de su disparo. </li>
|
124 |
-
|
125 |
-
</ul>
|
126 |
-
<h2>Conclusión</h2>
|
127 |
-
<p>En conclusión, mod apk 8 ball pool 5.11.2 es una versión modificada de la aplicación original 8 Ball Pool que le da acceso a recursos ilimitados y características que no están disponibles en el juego oficial. Es una aplicación de terceros creada por hackers o desarrolladores que modifican el código original del juego y añaden nuevas funciones y elementos. </p>
|
128 |
-
<p>Mod apk 8 piscina de bolas 5.11.2 puede ser muy divertido y emocionante, ya que le ofrece muchas ventajas, como monedas ilimitadas y dinero en efectivo, pistas y mesas premium, fácil juego y dificultad, y muchas características y opciones adicionales. Sin embargo, también puede ser arriesgado y peligroso, ya que puede exponerlo a problemas legales o sanciones, pérdida de cuentas o progreso, malware o virus y competencia desleal. </p>
|
129 |
-
<p>Por lo tanto, debe sopesar los pros y los contras de usar apk mod 8 ball pool 5.11.2 antes de decidir descargarlo e instalarlo en su dispositivo. También debe seguir algunos pasos y consejos para descargar e instalar de forma segura. También debes aprender algunos consejos y trucos para jugarlo de manera efectiva y agradable. </p>
|
130 |
-
<p>Esperamos que este artículo le ha ayudado a entender lo que apk mod 8 ball pool 5.11.2 es, cómo descargarlo e instalarlo, y cómo jugarlo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Gracias por leer! </p>
|
131 |
-
<h2>Preguntas frecuentes</h2>
|
132 |
-
<p>Aquí hay algunas preguntas frecuentes sobre la piscina de bola mod apk 8 5.11.2:</p>
|
133 |
-
<ol>
|
134 |
-
<li><strong>¿Es mod apk 8 piscina de bolas 5.11.2 legal? </strong>
|
135 |
-
<p>No, mod apk 8 ball pool 5.11.2 no es legal, ya que viola los términos y condiciones del juego original. También se considera piratería, ya que utiliza el contenido original del juego sin permiso ni pago. El uso de mod apk 8 ball pool 5.11.2 puede resultar en acciones legales o sanciones de los desarrolladores o autoridades del juego. </p></li>
|
136 |
-
<li><strong>Es mod apk 8 piscina de bolas 5.11.2 seguro? </strong>
|
137 |
-
|
138 |
-
<li><strong>Es mod apk 8 bola piscina 5.11.2 compatible con todos los dispositivos Android? </strong>
|
139 |
-
<p>no, mod apk 8 ball pool 5.11.2 puede no ser compatible con todos los dispositivos Android, ya que puede requerir ciertas especificaciones o permisos que pueden no estar disponibles en algunos dispositivos. También puede causar algunos errores o fallos en algunos dispositivos debido a problemas de compatibilidad. Usar mod apk 8 ball pool 5.11.2 puede afectar el rendimiento o la funcionalidad de su dispositivo. </p></li>
|
140 |
-
<li><strong>¿Puedo jugar en línea con otros jugadores usando mod apk 8 ball pool 5.11.2? </strong>
|
141 |
-
<p>Sí, se puede jugar en línea con otros jugadores usando apk mod 8 pool de bolas 5.11.2, pero puede enfrentar algunos problemas, como:</p>
|
142 |
-
<ul>
|
143 |
-
<li>Es posible que no pueda unirse a algunos juegos o salas que están restringidos a la versión original del juego. </li>
|
144 |
-
<li>Usted puede ser emparejado con otros jugadores que también están utilizando apk mod 8 piscina de bolas 5.11.2, que puede hacer que los juegos aburridos o injustos. </li>
|
145 |
-
<li>Puedes ser reportado o marcado por otros jugadores que están usando la versión original del juego, lo que puede llevar a la detección o prohibición. </li>
|
146 |
-
</ul></li>
|
147 |
-
<li><strong>¿Puedo actualizar la piscina de bolas mod apk 8 5.11.2? </strong>
|
148 |
-
<p>No, no se puede actualizar el mod apk 8 ball pool 5.11.2, ya que no es compatible con los desarrolladores del juego o las autoridades. Si intenta actualizarlo, puede perder sus características o recursos modificados, o puede enfrentar algunos errores o problemas debido a problemas de compatibilidad. </p></li>
|
149 |
-
</ol></p> 64aa2da5cf<br />
|
150 |
-
<br />
|
151 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar 28 Semanas Despus.md
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo descargar 28 semanas después, la aterradora secuela a 28 días después</h1>
|
3 |
-
<p>Si eres un fan de las películas de terror, probablemente hayas oído hablar de <em>28 Days Later</em>, la aclamada película británica que representa un apocalipsis zombi causado por un virus mortal. ¿Pero sabías que hay una secuela de esta película, llamada <em>28 semanas después</em>, que es aún más aterradora y emocionante? </em></p>
|
4 |
-
<h2>descargar 28 semanas después</h2><br /><p><b><b>Download Zip</b> ✦ <a href="https://bltlly.com/2v6LdK">https://bltlly.com/2v6LdK</a></b></p><br /><br />
|
5 |
-
<p><em>28 Weeks Later</em> es una película de terror post-apocalíptica dirigida por Juan Carlos Fresnadillo, quien co-escribió con Rowan Joffé, Enrique López Lavigne y Jesus Olmo. La secuela independiente de <em>28 Days Later</em>, está protagonizada por Robert Carlyle, Rose Byrne, Jeremy Renner, Harold Perrineau, Catherine McCormack, Mackintosh Muggleton, Imogen Poots e Idris Elba.</p>
|
6 |
-
<p>La película se desarrolla seis meses después de los acontecimientos de la primera película, cuando las fuerzas de la OTAN han declarado a Gran Bretaña a salvo del virus de la rabia y han comenzado a repoblar Londres. Sin embargo, las cosas van horriblemente mal cuando un portador del virus entra en la ciudad y desencadena un nuevo brote. Los sobrevivientes deben luchar por sus vidas contra las hordas infectadas y las fuerzas militares que tratan de contenerlas. </p>
|
7 |
-
<p>En este artículo, te diremos por qué deberías ver <em>28 Weeks Later</em>, donde puedes encontrarlo online, y cómo puedes descargarlo de forma legal y segura. Por lo tanto, si usted está listo para un poco de acción palpitante y suspenso, siga leyendo! </p>
|
8 |
-
<h2>Por qué deberías ver 28 Weeks Later</h2>
|
9 |
-
<p><em>28 Weeks Later</em> no es solo una película de zombis sin sentido. Es una película inteligente y bien hecha que explora temas como la supervivencia, la familia, la moralidad y la humanidad en un escenario distópico. También ofrece una descripción realista y arenosa de lo que podría suceder si una pandemia se saliera de control. </p>
|
10 |
-
<p></p>
|
11 |
-
|
12 |
-
<p>Además, <em>28 Weeks Later</em> presenta algunas de las escenas más intensas y memorables de la historia del cine de terror. Desde la secuencia de apertura donde Don escapa de una granja atacada por los infectados, a la escena de la persecución en helicóptero donde Doyle corta una multitud de zombies, a la toma final de la Torre Eiffel rodeado de infectados corriendo descontrolado en París, usted estará en el borde de su asiento durante toda la película. </p>
|
13 |
-
<p>Finalmente, <em>28 Weeks Later</em> ha recibido críticas y valoraciones positivas de críticos y audiencias por igual. Tiene una calificación de aprobación del 71% en Rotten Tomatoes, basada en 195 revisiones, con una calificación promedio de 6.6/10. También tiene una puntuación de 7/10 en IMDb, basada en 260.000 votos. La película fue elogiada por su dirección, actuación, cinematografía y atmósfera. </p>
|
14 |
-
<h2>Dónde encontrar 28 semanas más tarde en línea</h2>
|
15 |
-
<p>Si te estás preguntando dónde puedes ver <em>28 Weeks Later</em> online, tienes varias opciones para elegir. Estos son algunos de los mejores servicios de streaming y plataformas que ofrecen la película:</p>
|
16 |
-
<ul>
|
17 |
-
<li><strong>Netflix</strong>: Netflix es uno de los servicios de streaming más populares y ampliamente utilizados en el mundo. Tiene una enorme biblioteca de películas y programas, incluyendo <em>28 Weeks Later</em>. Puede ver la película en Netflix con un plan de suscripción que comienza desde $8.99 por mes. También puede descargar la película en su dispositivo y verla sin conexión. </li>
|
18 |
-
<li><strong>Hulu</strong>: Hulu es otro gran servicio de streaming que ofrece una variedad de contenido, incluyendo <em>28 Weeks Later</em>. Puedes ver la película en Hulu con un plan de suscripción que comienza desde $5.99 por mes. También puedes agregar canales de TV en vivo y redes premium a tu plan por un cargo adicional. </li>
|
19 |
-
|
20 |
-
<li><strong>iTunes</strong>: iTunes es una plataforma que te permite comprar o alquilar películas y programas de Apple. Puedes comprar <em>28 Weeks Later</em> en iTunes por $9.99 o alquilarlo por $3.99. También puedes descargar la película en tu dispositivo y verla sin conexión. </li>
|
21 |
-
<li><strong>Vudu</strong>: Vudu es una plataforma que le permite comprar o alquilar películas y programas de Walmart. Puedes comprar <em>28 semanas más tarde</em> en Vudu por $9.99 o alquilarlo por $3.99. También puedes descargar la película en tu dispositivo y verla sin conexión. </li>
|
22 |
-
</ul>
|
23 |
-
<p>Estos son algunos de los pros y contras de cada servicio de streaming:</p>
|
24 |
-
<tabla>
|
25 |
-
<tr>
|
26 |
-
<th>Servicio de streaming</th>
|
27 |
-
<th>Pros</th>
|
28 |
-
<th>Contras</th>
|
29 |
-
</tr>
|
30 |
-
<tr>
|
31 |
-
<td>Netflix</td>
|
32 |
-
<td>- Gran selección de películas y programas - Planes de suscripción asequibles - Opción de visualización sin conexión - No hay anuncios</td>
|
33 |
-
<td>- La disponibilidad de contenido puede variar según la región - La cuota de suscripción puede aumentar con el tiempo - No hay canales de TV en vivo o redes premium</td>
|
34 |
-
</tr>
|
35 |
-
<tr>
|
36 |
-
<td>Hulu</td>
|
37 |
-
<td>- Gran selección de películas y programas - Planes de suscripción asequibles - Opción de visualización sin conexión - Canales de TV en vivo y redes premium disponibles</td>
|
38 |
-
<td>- La disponibilidad de contenido puede variar según la región - La cuota de suscripción puede aumentar con el tiempo - Los anuncios pueden interrumpir su experiencia de visualización a menos que pague extra</td>
|
39 |
-
</tr>
|
40 |
-
<tr>
|
41 |
-
<td>Amazon Prime Video</td>
|
42 |
-
<td>- Gran selección de películas y programas - Opción de visualización sin conexión - No hay anuncios - Otros beneficios de la membresía de Amazon Prime como envío gratuito, música, libros, etc.</td>
|
43 |
-
<td>- La disponibilidad de contenido puede variar según la región - La cuota de suscripción puede aumentar con el tiempo - No hay canales de televisión en vivo o redes premium incluidas en la membresía</td>
|
44 |
-
</tr>
|
45 |
-
<tr>
|
46 |
-
<td>iTunes</td>
|
47 |
-
<td>- Vídeo y audio de alta calidad - Opción de visualización sin conexión - Sin anuncios - Compatible con los dispositivos y servicios de Apple</td>
|
48 |
-
<td>- La disponibilidad de contenido puede variar según la región - No hay opción de suscripción - Solo disponible en los dispositivos y servicios de Apple - No hay canales de TV en vivo o redes premium</td>
|
49 |
-
</tr>
|
50 |
-
<tr>
|
51 |
-
|
52 |
-
<td>- Video y audio de alta calidad - Opción de visualización sin conexión - Sin anuncios - Compatible con varios dispositivos y servicios</td>
|
53 |
-
<td>- La disponibilidad de contenido puede variar según la región - No hay opción de suscripción - Solo disponible en los Estados Unidos - No hay canales de televisión en vivo o redes premium</td>
|
54 |
-
</tr>
|
55 |
-
</tabla>
|
56 |
-
<p> <h2>Cómo descargar 28 Weeks Later de forma legal y segura</h2>
|
57 |
-
<p>Ahora que sabes dónde puedes ver <em>28 Weeks Later</em> online, es posible que te estés preguntando cómo puedes descargarlo de forma legal y segura. Descargar películas en línea puede ser un negocio complicado y arriesgado, ya que hay muchos sitios web y aplicaciones ilegales y poco éticas que ofrecen contenido pirata, malware, virus y estafas. Por lo tanto, siempre debe tener cuidado y precaución al descargar películas en línea, y siga estos consejos:</p>
|
58 |
-
<ul>
|
59 |
-
<li><strong>Utilice un sitio web o aplicación de buena reputación</strong>: Siempre debe usar un sitio web o aplicación que tenga licencia y autorización para ofrecer <em>28 Weeks Later</em> para su descarga. Algunos de los mejores sitios web y aplicaciones que le permiten descargar <em>28 Weeks Later</em> de forma legal y segura son Netflix, Hulu, Amazon Prime Video, iTunes y Vudu. Estas plataformas tienen métodos de pago seguros, tecnología de cifrado y soporte al cliente para garantizar su seguridad y satisfacción. </li>
|
60 |
-
<li><strong>Evite los torrents o el intercambio entre pares</strong>: Nunca debe usar torrents o sitios web o aplicaciones para compartir entre pares para descargar <em>28 Weeks Later</em>, ya que son ilegales y poco éticos. Torrenting o intercambio entre pares implica la descarga de archivos de otros usuarios que pueden haber infectado o archivos dañados, que pueden dañar su dispositivo o exponer su información personal. Además, torrenting o peer-to-peer sharing viola los derechos de propiedad intelectual de los creadores y distribuidores de <em>28 Weeks Later</em>, lo que puede resultar en consecuencias legales. </li>
|
61 |
-
|
62 |
-
<li><strong>Elija la calidad y el formato adecuados</strong>: Siempre debe elegir la calidad y el formato adecuados para descargar <em>28 Weeks Later</em>, ya que pueden afectar su experiencia de visualización y almacenamiento de dispositivos. La calidad de una película se refiere a la resolución o claridad de la imagen y el sonido, que puede variar de baja a alta. El formato de una película se refiere al tipo de archivo o extensión, que puede variar dependiendo del dispositivo o plataforma que utilice. Algunas de las opciones de calidad y formato más comunes para descargar <em>28 Weeks Later</em> son SD (definición estándar), HD (alta definición), 4K (ultra alta definición), MP4 (MPEG-4), AVI (Audio Video Interleave), MKV (Matroska) y MOV (QuickTime). </li>
|
63 |
-
<li><strong>Agrega subtítulos si es necesario</strong>: Siempre debes agregar subtítulos si es necesario al descargar <em>28 Weeks Later</em>, ya que pueden mejorar tu comprensión y disfrute de la película. Los subtítulos son versiones de texto del diálogo o narración de una película, que se pueden mostrar en la pantalla en diferentes idiomas o estilos. Algunos de los sitios web y aplicaciones que ofrecen subtítulos para <em>28 Weeks Later</em> son Netflix, Hulu, Amazon Prime Video, iTunes y Vudu. También puede descargar subtítulos de otras fuentes como Subscene, OpenSubtitles o YIFY Subtitles.</li>
|
64 |
-
</ul>
|
65 |
-
<h2>Conclusión</h2>
|
66 |
-
<p>En conclusión, <em>28 Weeks Later</em> es una película imprescindible para los fanáticos del terror, ya que es una secuela aterradora y emocionante de <em>28 Days Later</em>. Es una película inteligente y bien hecha que explora temas como la supervivencia, la familia, la moralidad y la humanidad en un escenario distópico. También presenta algunas de las escenas más intensas y memorables de la historia del cine de terror. </p>
|
67 |
-
|
68 |
-
<p>Entonces, ¿qué estás esperando? Descarga <em>28 Weeks Later</em> hoy y disfruta de esta increíble película con tus amigos o familiares. Y no olvides compartir tus comentarios y opiniones sobre la película con otros dejando un comentario a continuación o en las redes sociales. </p>
|
69 |
-
<h3>Preguntas frecuentes</h3>
|
70 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre <em>28 semanas después</em>:</p <ul>
|
71 |
-
<li><strong>Is 28 Weeks Later a remake or a sequel? </strong>: <em>28 Weeks Later</em> is a sequel to <em>28 Days Later</em>, not a remake. Se desarrolla seis meses después de los eventos de la primera película, y sigue un grupo diferente de personajes y una nueva historia. </li>
|
72 |
-
<li><strong>¿Necesito ver 28 días después antes de 28 semanas después? </strong>: No es necesario ver <em>28 días después</em> antes de <em>28 semanas después</em>, ya que las películas son independientes y tienen conexiones mínimas. Sin embargo, se recomienda ver <em>28 Days Later</em> primero, ya que te dará más contexto y antecedentes sobre el virus de la rabia y el mundo de las películas. </li>
|
73 |
-
<li><strong>Is 28 Weeks Later based on a true story or a book? </strong>: No, <em>28 Weeks Later</em> is not based on a true story or a book. Es un guion original escrito por Juan Carlos Fresnadillo, Rowan Joffé, Enrique López Lavigne y Jesus Olmo.</li>
|
74 |
-
<li><strong>Es 28 semanas más tarde adecuado para los niños o los espectadores sensibles? </strong>: No, <em>28 semanas más tarde</em> no es adecuado para niños o espectadores sensibles. Está clasificado como R por su fuerte violencia, su lenguaje y su desnudez. Contiene escenas de violencia gráfica, sangre, mutilación, muerte y horror que pueden ser perturbadores o perturbadores para algunos espectadores. </li>
|
75 |
-
|
76 |
-
</ul></p> 64aa2da5cf<br />
|
77 |
-
<br />
|
78 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_nms_rotated.py
DELETED
@@ -1,159 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
from __future__ import absolute_import, division, print_function, unicode_literals
|
3 |
-
import unittest
|
4 |
-
import torch
|
5 |
-
from torchvision import ops
|
6 |
-
|
7 |
-
from detectron2.layers import batched_nms, batched_nms_rotated, nms_rotated
|
8 |
-
|
9 |
-
|
10 |
-
class TestNMSRotated(unittest.TestCase):
|
11 |
-
def reference_horizontal_nms(self, boxes, scores, iou_threshold):
|
12 |
-
"""
|
13 |
-
Args:
|
14 |
-
box_scores (N, 5): boxes in corner-form and probabilities.
|
15 |
-
(Note here 5 == 4 + 1, i.e., 4-dim horizontal box + 1-dim prob)
|
16 |
-
iou_threshold: intersection over union threshold.
|
17 |
-
Returns:
|
18 |
-
picked: a list of indexes of the kept boxes
|
19 |
-
"""
|
20 |
-
picked = []
|
21 |
-
_, indexes = scores.sort(descending=True)
|
22 |
-
while len(indexes) > 0:
|
23 |
-
current = indexes[0]
|
24 |
-
picked.append(current.item())
|
25 |
-
if len(indexes) == 1:
|
26 |
-
break
|
27 |
-
current_box = boxes[current, :]
|
28 |
-
indexes = indexes[1:]
|
29 |
-
rest_boxes = boxes[indexes, :]
|
30 |
-
iou = ops.box_iou(rest_boxes, current_box.unsqueeze(0)).squeeze(1)
|
31 |
-
indexes = indexes[iou <= iou_threshold]
|
32 |
-
|
33 |
-
return torch.as_tensor(picked)
|
34 |
-
|
35 |
-
def _create_tensors(self, N):
|
36 |
-
boxes = torch.rand(N, 4) * 100
|
37 |
-
# Note: the implementation of this function in torchvision is:
|
38 |
-
# boxes[:, 2:] += torch.rand(N, 2) * 100
|
39 |
-
# but it does not guarantee non-negative widths/heights constraints:
|
40 |
-
# boxes[:, 2] >= boxes[:, 0] and boxes[:, 3] >= boxes[:, 1]:
|
41 |
-
boxes[:, 2:] += boxes[:, :2]
|
42 |
-
scores = torch.rand(N)
|
43 |
-
return boxes, scores
|
44 |
-
|
45 |
-
def test_batched_nms_rotated_0_degree_cpu(self):
|
46 |
-
# torch.manual_seed(0)
|
47 |
-
N = 2000
|
48 |
-
num_classes = 50
|
49 |
-
boxes, scores = self._create_tensors(N)
|
50 |
-
idxs = torch.randint(0, num_classes, (N,))
|
51 |
-
rotated_boxes = torch.zeros(N, 5)
|
52 |
-
rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
|
53 |
-
rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
|
54 |
-
rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
|
55 |
-
rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
|
56 |
-
err_msg = "Rotated NMS with 0 degree is incompatible with horizontal NMS for IoU={}"
|
57 |
-
for iou in [0.2, 0.5, 0.8]:
|
58 |
-
backup = boxes.clone()
|
59 |
-
keep_ref = batched_nms(boxes, scores, idxs, iou)
|
60 |
-
assert torch.allclose(boxes, backup), "boxes modified by batched_nms"
|
61 |
-
backup = rotated_boxes.clone()
|
62 |
-
keep = batched_nms_rotated(rotated_boxes, scores, idxs, iou)
|
63 |
-
assert torch.allclose(
|
64 |
-
rotated_boxes, backup
|
65 |
-
), "rotated_boxes modified by batched_nms_rotated"
|
66 |
-
assert torch.equal(keep, keep_ref), err_msg.format(iou)
|
67 |
-
|
68 |
-
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
|
69 |
-
def test_batched_nms_rotated_0_degree_cuda(self):
|
70 |
-
# torch.manual_seed(0)
|
71 |
-
N = 2000
|
72 |
-
num_classes = 50
|
73 |
-
boxes, scores = self._create_tensors(N)
|
74 |
-
idxs = torch.randint(0, num_classes, (N,))
|
75 |
-
rotated_boxes = torch.zeros(N, 5)
|
76 |
-
rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
|
77 |
-
rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
|
78 |
-
rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
|
79 |
-
rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
|
80 |
-
err_msg = "Rotated NMS with 0 degree is incompatible with horizontal NMS for IoU={}"
|
81 |
-
for iou in [0.2, 0.5, 0.8]:
|
82 |
-
backup = boxes.clone()
|
83 |
-
keep_ref = batched_nms(boxes.cuda(), scores.cuda(), idxs, iou)
|
84 |
-
assert torch.allclose(boxes, backup), "boxes modified by batched_nms"
|
85 |
-
backup = rotated_boxes.clone()
|
86 |
-
keep = batched_nms_rotated(rotated_boxes.cuda(), scores.cuda(), idxs, iou)
|
87 |
-
assert torch.allclose(
|
88 |
-
rotated_boxes, backup
|
89 |
-
), "rotated_boxes modified by batched_nms_rotated"
|
90 |
-
assert torch.equal(keep, keep_ref), err_msg.format(iou)
|
91 |
-
|
92 |
-
def test_nms_rotated_0_degree_cpu(self):
|
93 |
-
N = 1000
|
94 |
-
boxes, scores = self._create_tensors(N)
|
95 |
-
rotated_boxes = torch.zeros(N, 5)
|
96 |
-
rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
|
97 |
-
rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
|
98 |
-
rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
|
99 |
-
rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
|
100 |
-
err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}"
|
101 |
-
for iou in [0.5]:
|
102 |
-
keep_ref = self.reference_horizontal_nms(boxes, scores, iou)
|
103 |
-
keep = nms_rotated(rotated_boxes, scores, iou)
|
104 |
-
assert torch.equal(keep, keep_ref), err_msg.format(iou)
|
105 |
-
|
106 |
-
def test_nms_rotated_90_degrees_cpu(self):
|
107 |
-
N = 1000
|
108 |
-
boxes, scores = self._create_tensors(N)
|
109 |
-
rotated_boxes = torch.zeros(N, 5)
|
110 |
-
rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
|
111 |
-
rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
|
112 |
-
# Note for rotated_boxes[:, 2] and rotated_boxes[:, 3]:
|
113 |
-
# widths and heights are intentionally swapped here for 90 degrees case
|
114 |
-
# so that the reference horizontal nms could be used
|
115 |
-
rotated_boxes[:, 2] = boxes[:, 3] - boxes[:, 1]
|
116 |
-
rotated_boxes[:, 3] = boxes[:, 2] - boxes[:, 0]
|
117 |
-
|
118 |
-
rotated_boxes[:, 4] = torch.ones(N) * 90
|
119 |
-
err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}"
|
120 |
-
for iou in [0.2, 0.5, 0.8]:
|
121 |
-
keep_ref = self.reference_horizontal_nms(boxes, scores, iou)
|
122 |
-
keep = nms_rotated(rotated_boxes, scores, iou)
|
123 |
-
assert torch.equal(keep, keep_ref), err_msg.format(iou)
|
124 |
-
|
125 |
-
def test_nms_rotated_180_degrees_cpu(self):
|
126 |
-
N = 1000
|
127 |
-
boxes, scores = self._create_tensors(N)
|
128 |
-
rotated_boxes = torch.zeros(N, 5)
|
129 |
-
rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
|
130 |
-
rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
|
131 |
-
rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
|
132 |
-
rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
|
133 |
-
rotated_boxes[:, 4] = torch.ones(N) * 180
|
134 |
-
err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}"
|
135 |
-
for iou in [0.2, 0.5, 0.8]:
|
136 |
-
keep_ref = self.reference_horizontal_nms(boxes, scores, iou)
|
137 |
-
keep = nms_rotated(rotated_boxes, scores, iou)
|
138 |
-
assert torch.equal(keep, keep_ref), err_msg.format(iou)
|
139 |
-
|
140 |
-
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
|
141 |
-
def test_nms_rotated_0_degree_cuda(self):
|
142 |
-
N = 1000
|
143 |
-
boxes, scores = self._create_tensors(N)
|
144 |
-
rotated_boxes = torch.zeros(N, 5)
|
145 |
-
rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
|
146 |
-
rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
|
147 |
-
rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
|
148 |
-
rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
|
149 |
-
err_msg = "Rotated NMS incompatible between CPU and CUDA for IoU={}"
|
150 |
-
|
151 |
-
for iou in [0.2, 0.5, 0.8]:
|
152 |
-
r_cpu = nms_rotated(rotated_boxes, scores, iou)
|
153 |
-
r_cuda = nms_rotated(rotated_boxes.cuda(), scores.cuda(), iou)
|
154 |
-
|
155 |
-
assert torch.equal(r_cpu, r_cuda.cpu()), err_msg.format(iou)
|
156 |
-
|
157 |
-
|
158 |
-
if __name__ == "__main__":
|
159 |
-
unittest.main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/dependencies/cub/cmake/CubBuildCompilerTargets.cmake
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# This file defines the `cub_build_compiler_targets()` function, which
|
3 |
-
# creates the following interface targets:
|
4 |
-
#
|
5 |
-
# cub.compiler_interface
|
6 |
-
# - Interface target providing compiler-specific options needed to build
|
7 |
-
# Thrust's tests, examples, etc.
|
8 |
-
|
9 |
-
function(cub_build_compiler_targets)
|
10 |
-
set(cxx_compile_definitions)
|
11 |
-
set(cxx_compile_options)
|
12 |
-
|
13 |
-
if ("MSVC" STREQUAL "${CMAKE_CXX_COMPILER_ID}")
|
14 |
-
# TODO Enable /Wall
|
15 |
-
append_option_if_available("/WX" cxx_compile_options)
|
16 |
-
|
17 |
-
# Disabled loss-of-data conversion warnings.
|
18 |
-
# TODO Re-enable.
|
19 |
-
append_option_if_available("/wd4244" cxx_compile_options)
|
20 |
-
append_option_if_available("/wd4267" cxx_compile_options)
|
21 |
-
|
22 |
-
# Suppress numeric conversion-to-bool warnings.
|
23 |
-
# TODO Re-enable.
|
24 |
-
append_option_if_available("/wd4800" cxx_compile_options)
|
25 |
-
|
26 |
-
# Disable warning about applying unary operator- to unsigned type.
|
27 |
-
append_option_if_available("/wd4146" cxx_compile_options)
|
28 |
-
|
29 |
-
# Some tests require /bigobj to fit everything into their object files:
|
30 |
-
append_option_if_available("/bigobj" cxx_compile_options)
|
31 |
-
else()
|
32 |
-
append_option_if_available("-Werror" cxx_compile_options)
|
33 |
-
append_option_if_available("-Wall" cxx_compile_options)
|
34 |
-
append_option_if_available("-Wextra" cxx_compile_options)
|
35 |
-
append_option_if_available("-Winit-self" cxx_compile_options)
|
36 |
-
append_option_if_available("-Woverloaded-virtual" cxx_compile_options)
|
37 |
-
append_option_if_available("-Wcast-qual" cxx_compile_options)
|
38 |
-
append_option_if_available("-Wno-cast-align" cxx_compile_options)
|
39 |
-
append_option_if_available("-Wno-long-long" cxx_compile_options)
|
40 |
-
append_option_if_available("-Wno-variadic-macros" cxx_compile_options)
|
41 |
-
append_option_if_available("-Wno-unused-function" cxx_compile_options)
|
42 |
-
append_option_if_available("-Wno-unused-variable" cxx_compile_options)
|
43 |
-
|
44 |
-
# CUB uses deprecated texture functions (cudaBindTexture, etc). These
|
45 |
-
# need to be replaced, but silence the warnings for now.
|
46 |
-
append_option_if_available("-Wno-deprecated-declarations" cxx_compile_options)
|
47 |
-
endif()
|
48 |
-
|
49 |
-
if ("GNU" STREQUAL "${CMAKE_CXX_COMPILER_ID}")
|
50 |
-
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 4.5)
|
51 |
-
# This isn't available until GCC 4.3, and misfires on TMP code until
|
52 |
-
# GCC 4.5.
|
53 |
-
append_option_if_available("-Wlogical-op" cxx_compile_options)
|
54 |
-
endif()
|
55 |
-
|
56 |
-
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 7.3)
|
57 |
-
# GCC 7.3 complains about name mangling changes due to `noexcept`
|
58 |
-
# becoming part of the type system; we don't care.
|
59 |
-
append_option_if_available("-Wno-noexcept-type" cxx_compile_options)
|
60 |
-
endif()
|
61 |
-
endif()
|
62 |
-
|
63 |
-
if (("Clang" STREQUAL "${CMAKE_CXX_COMPILER_ID}") OR
|
64 |
-
("XL" STREQUAL "${CMAKE_CXX_COMPILER_ID}"))
|
65 |
-
# xlC and Clang warn about unused parameters in uninstantiated templates.
|
66 |
-
# This causes xlC to choke on the OMP backend, which is mostly #ifdef'd out
|
67 |
-
# (and thus has unused parameters) when you aren't using it.
|
68 |
-
append_option_if_available("-Wno-unused-parameters" cxx_compile_options)
|
69 |
-
endif()
|
70 |
-
|
71 |
-
if ("Clang" STREQUAL "${CMAKE_CXX_COMPILER_ID}")
|
72 |
-
# -Wunneeded-internal-declaration misfires in the unit test framework
|
73 |
-
# on older versions of Clang.
|
74 |
-
append_option_if_available("-Wno-unneeded-internal-declaration" cxx_compile_options)
|
75 |
-
endif()
|
76 |
-
|
77 |
-
add_library(cub.compiler_interface INTERFACE)
|
78 |
-
|
79 |
-
foreach (cxx_option IN LISTS cxx_compile_options)
|
80 |
-
target_compile_options(cub.compiler_interface INTERFACE
|
81 |
-
$<$<COMPILE_LANGUAGE:CXX>:${cxx_option}>
|
82 |
-
# Only use -Xcompiler with NVCC, not Feta.
|
83 |
-
#
|
84 |
-
# CMake can't split genexs, so this can't be formatted better :(
|
85 |
-
# This is:
|
86 |
-
# if (using CUDA and CUDA_COMPILER is NVCC) add -Xcompiler=opt:
|
87 |
-
$<$<AND:$<COMPILE_LANGUAGE:CUDA>,$<CUDA_COMPILER_ID:NVIDIA>>:-Xcompiler=${cxx_option}>
|
88 |
-
)
|
89 |
-
endforeach()
|
90 |
-
|
91 |
-
# Add these for both CUDA and CXX targets:
|
92 |
-
target_compile_definitions(cub.compiler_interface INTERFACE
|
93 |
-
${cxx_compile_definitions}
|
94 |
-
)
|
95 |
-
|
96 |
-
# Promote warnings and display diagnostic numbers for nvcc:
|
97 |
-
target_compile_options(cub.compiler_interface INTERFACE
|
98 |
-
# If using CUDA w/ NVCC...
|
99 |
-
$<$<AND:$<COMPILE_LANGUAGE:CUDA>,$<CUDA_COMPILER_ID:NVIDIA>>:-Xcudafe=--display_error_number>
|
100 |
-
$<$<AND:$<COMPILE_LANGUAGE:CUDA>,$<CUDA_COMPILER_ID:NVIDIA>>:-Xcudafe=--promote_warnings>
|
101 |
-
)
|
102 |
-
endfunction()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/MonoScene/monoscene/__init__.py
DELETED
File without changes
|
spaces/CVPR/WALT/mmdet/models/detectors/cascade_rcnn.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
from ..builder import DETECTORS
|
2 |
-
from .two_stage import TwoStageDetector
|
3 |
-
|
4 |
-
|
5 |
-
@DETECTORS.register_module()
|
6 |
-
class CascadeRCNN(TwoStageDetector):
|
7 |
-
r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
|
8 |
-
Detection <https://arxiv.org/abs/1906.09756>`_"""
|
9 |
-
|
10 |
-
def __init__(self,
|
11 |
-
backbone,
|
12 |
-
neck=None,
|
13 |
-
rpn_head=None,
|
14 |
-
roi_head=None,
|
15 |
-
train_cfg=None,
|
16 |
-
test_cfg=None,
|
17 |
-
pretrained=None):
|
18 |
-
super(CascadeRCNN, self).__init__(
|
19 |
-
backbone=backbone,
|
20 |
-
neck=neck,
|
21 |
-
rpn_head=rpn_head,
|
22 |
-
roi_head=roi_head,
|
23 |
-
train_cfg=train_cfg,
|
24 |
-
test_cfg=test_cfg,
|
25 |
-
pretrained=pretrained)
|
26 |
-
|
27 |
-
def show_result(self, data, result, **kwargs):
|
28 |
-
"""Show prediction results of the detector.
|
29 |
-
|
30 |
-
Args:
|
31 |
-
data (str or np.ndarray): Image filename or loaded image.
|
32 |
-
result (Tensor or tuple): The results to draw over `img`
|
33 |
-
bbox_result or (bbox_result, segm_result).
|
34 |
-
|
35 |
-
Returns:
|
36 |
-
np.ndarray: The image with bboxes drawn on it.
|
37 |
-
"""
|
38 |
-
if self.with_mask:
|
39 |
-
ms_bbox_result, ms_segm_result = result
|
40 |
-
if isinstance(ms_bbox_result, dict):
|
41 |
-
result = (ms_bbox_result['ensemble'],
|
42 |
-
ms_segm_result['ensemble'])
|
43 |
-
else:
|
44 |
-
if isinstance(result, dict):
|
45 |
-
result = result['ensemble']
|
46 |
-
return super(CascadeRCNN, self).show_result(data, result, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/necks/fpg.py
DELETED
@@ -1,398 +0,0 @@
|
|
1 |
-
import torch.nn as nn
|
2 |
-
import torch.nn.functional as F
|
3 |
-
from mmcv.cnn import ConvModule, caffe2_xavier_init, constant_init, is_norm
|
4 |
-
|
5 |
-
from ..builder import NECKS
|
6 |
-
|
7 |
-
|
8 |
-
class Transition(nn.Module):
|
9 |
-
"""Base class for transition.
|
10 |
-
|
11 |
-
Args:
|
12 |
-
in_channels (int): Number of input channels.
|
13 |
-
out_channels (int): Number of output channels.
|
14 |
-
"""
|
15 |
-
|
16 |
-
def __init__(self, in_channels, out_channels):
|
17 |
-
super().__init__()
|
18 |
-
self.in_channels = in_channels
|
19 |
-
self.out_channels = out_channels
|
20 |
-
|
21 |
-
def forward(x):
|
22 |
-
pass
|
23 |
-
|
24 |
-
|
25 |
-
class UpInterpolationConv(Transition):
|
26 |
-
"""A transition used for up-sampling.
|
27 |
-
|
28 |
-
Up-sample the input by interpolation then refines the feature by
|
29 |
-
a convolution layer.
|
30 |
-
|
31 |
-
Args:
|
32 |
-
in_channels (int): Number of input channels.
|
33 |
-
out_channels (int): Number of output channels.
|
34 |
-
scale_factor (int): Up-sampling factor. Default: 2.
|
35 |
-
mode (int): Interpolation mode. Default: nearest.
|
36 |
-
align_corners (bool): Whether align corners when interpolation.
|
37 |
-
Default: None.
|
38 |
-
kernel_size (int): Kernel size for the conv. Default: 3.
|
39 |
-
"""
|
40 |
-
|
41 |
-
def __init__(self,
|
42 |
-
in_channels,
|
43 |
-
out_channels,
|
44 |
-
scale_factor=2,
|
45 |
-
mode='nearest',
|
46 |
-
align_corners=None,
|
47 |
-
kernel_size=3,
|
48 |
-
**kwargs):
|
49 |
-
super().__init__(in_channels, out_channels)
|
50 |
-
self.mode = mode
|
51 |
-
self.scale_factor = scale_factor
|
52 |
-
self.align_corners = align_corners
|
53 |
-
self.conv = ConvModule(
|
54 |
-
in_channels,
|
55 |
-
out_channels,
|
56 |
-
kernel_size,
|
57 |
-
padding=(kernel_size - 1) // 2,
|
58 |
-
**kwargs)
|
59 |
-
|
60 |
-
def forward(self, x):
|
61 |
-
x = F.interpolate(
|
62 |
-
x,
|
63 |
-
scale_factor=self.scale_factor,
|
64 |
-
mode=self.mode,
|
65 |
-
align_corners=self.align_corners)
|
66 |
-
x = self.conv(x)
|
67 |
-
return x
|
68 |
-
|
69 |
-
|
70 |
-
class LastConv(Transition):
|
71 |
-
"""A transition used for refining the output of the last stage.
|
72 |
-
|
73 |
-
Args:
|
74 |
-
in_channels (int): Number of input channels.
|
75 |
-
out_channels (int): Number of output channels.
|
76 |
-
num_inputs (int): Number of inputs of the FPN features.
|
77 |
-
kernel_size (int): Kernel size for the conv. Default: 3.
|
78 |
-
"""
|
79 |
-
|
80 |
-
def __init__(self,
|
81 |
-
in_channels,
|
82 |
-
out_channels,
|
83 |
-
num_inputs,
|
84 |
-
kernel_size=3,
|
85 |
-
**kwargs):
|
86 |
-
super().__init__(in_channels, out_channels)
|
87 |
-
self.num_inputs = num_inputs
|
88 |
-
self.conv_out = ConvModule(
|
89 |
-
in_channels,
|
90 |
-
out_channels,
|
91 |
-
kernel_size,
|
92 |
-
padding=(kernel_size - 1) // 2,
|
93 |
-
**kwargs)
|
94 |
-
|
95 |
-
def forward(self, inputs):
|
96 |
-
assert len(inputs) == self.num_inputs
|
97 |
-
return self.conv_out(inputs[-1])
|
98 |
-
|
99 |
-
|
100 |
-
@NECKS.register_module()
|
101 |
-
class FPG(nn.Module):
|
102 |
-
"""FPG.
|
103 |
-
|
104 |
-
Implementation of `Feature Pyramid Grids (FPG)
|
105 |
-
<https://arxiv.org/abs/2004.03580>`_.
|
106 |
-
This implementation only gives the basic structure stated in the paper.
|
107 |
-
But users can implement different type of transitions to fully explore the
|
108 |
-
the potential power of the structure of FPG.
|
109 |
-
|
110 |
-
Args:
|
111 |
-
in_channels (int): Number of input channels (feature maps of all levels
|
112 |
-
should have the same channels).
|
113 |
-
out_channels (int): Number of output channels (used at each scale)
|
114 |
-
num_outs (int): Number of output scales.
|
115 |
-
stack_times (int): The number of times the pyramid architecture will
|
116 |
-
be stacked.
|
117 |
-
paths (list[str]): Specify the path order of each stack level.
|
118 |
-
Each element in the list should be either 'bu' (bottom-up) or
|
119 |
-
'td' (top-down).
|
120 |
-
inter_channels (int): Number of inter channels.
|
121 |
-
same_up_trans (dict): Transition that goes down at the same stage.
|
122 |
-
same_down_trans (dict): Transition that goes up at the same stage.
|
123 |
-
across_lateral_trans (dict): Across-pathway same-stage
|
124 |
-
across_down_trans (dict): Across-pathway bottom-up connection.
|
125 |
-
across_up_trans (dict): Across-pathway top-down connection.
|
126 |
-
across_skip_trans (dict): Across-pathway skip connection.
|
127 |
-
output_trans (dict): Transition that trans the output of the
|
128 |
-
last stage.
|
129 |
-
start_level (int): Index of the start input backbone level used to
|
130 |
-
build the feature pyramid. Default: 0.
|
131 |
-
end_level (int): Index of the end input backbone level (exclusive) to
|
132 |
-
build the feature pyramid. Default: -1, which means the last level.
|
133 |
-
add_extra_convs (bool): It decides whether to add conv
|
134 |
-
layers on top of the original feature maps. Default to False.
|
135 |
-
If True, its actual mode is specified by `extra_convs_on_inputs`.
|
136 |
-
norm_cfg (dict): Config dict for normalization layer. Default: None.
|
137 |
-
"""
|
138 |
-
|
139 |
-
transition_types = {
|
140 |
-
'conv': ConvModule,
|
141 |
-
'interpolation_conv': UpInterpolationConv,
|
142 |
-
'last_conv': LastConv,
|
143 |
-
}
|
144 |
-
|
145 |
-
def __init__(self,
|
146 |
-
in_channels,
|
147 |
-
out_channels,
|
148 |
-
num_outs,
|
149 |
-
stack_times,
|
150 |
-
paths,
|
151 |
-
inter_channels=None,
|
152 |
-
same_down_trans=None,
|
153 |
-
same_up_trans=dict(
|
154 |
-
type='conv', kernel_size=3, stride=2, padding=1),
|
155 |
-
across_lateral_trans=dict(type='conv', kernel_size=1),
|
156 |
-
across_down_trans=dict(type='conv', kernel_size=3),
|
157 |
-
across_up_trans=None,
|
158 |
-
across_skip_trans=dict(type='identity'),
|
159 |
-
output_trans=dict(type='last_conv', kernel_size=3),
|
160 |
-
start_level=0,
|
161 |
-
end_level=-1,
|
162 |
-
add_extra_convs=False,
|
163 |
-
norm_cfg=None,
|
164 |
-
skip_inds=None):
|
165 |
-
super(FPG, self).__init__()
|
166 |
-
assert isinstance(in_channels, list)
|
167 |
-
self.in_channels = in_channels
|
168 |
-
self.out_channels = out_channels
|
169 |
-
self.num_ins = len(in_channels)
|
170 |
-
self.num_outs = num_outs
|
171 |
-
if inter_channels is None:
|
172 |
-
self.inter_channels = [out_channels for _ in range(num_outs)]
|
173 |
-
elif isinstance(inter_channels, int):
|
174 |
-
self.inter_channels = [inter_channels for _ in range(num_outs)]
|
175 |
-
else:
|
176 |
-
assert isinstance(inter_channels, list)
|
177 |
-
assert len(inter_channels) == num_outs
|
178 |
-
self.inter_channels = inter_channels
|
179 |
-
self.stack_times = stack_times
|
180 |
-
self.paths = paths
|
181 |
-
assert isinstance(paths, list) and len(paths) == stack_times
|
182 |
-
for d in paths:
|
183 |
-
assert d in ('bu', 'td')
|
184 |
-
|
185 |
-
self.same_down_trans = same_down_trans
|
186 |
-
self.same_up_trans = same_up_trans
|
187 |
-
self.across_lateral_trans = across_lateral_trans
|
188 |
-
self.across_down_trans = across_down_trans
|
189 |
-
self.across_up_trans = across_up_trans
|
190 |
-
self.output_trans = output_trans
|
191 |
-
self.across_skip_trans = across_skip_trans
|
192 |
-
|
193 |
-
self.with_bias = norm_cfg is None
|
194 |
-
# skip inds must be specified if across skip trans is not None
|
195 |
-
if self.across_skip_trans is not None:
|
196 |
-
skip_inds is not None
|
197 |
-
self.skip_inds = skip_inds
|
198 |
-
assert len(self.skip_inds[0]) <= self.stack_times
|
199 |
-
|
200 |
-
if end_level == -1:
|
201 |
-
self.backbone_end_level = self.num_ins
|
202 |
-
assert num_outs >= self.num_ins - start_level
|
203 |
-
else:
|
204 |
-
# if end_level < inputs, no extra level is allowed
|
205 |
-
self.backbone_end_level = end_level
|
206 |
-
assert end_level <= len(in_channels)
|
207 |
-
assert num_outs == end_level - start_level
|
208 |
-
self.start_level = start_level
|
209 |
-
self.end_level = end_level
|
210 |
-
self.add_extra_convs = add_extra_convs
|
211 |
-
|
212 |
-
# build lateral 1x1 convs to reduce channels
|
213 |
-
self.lateral_convs = nn.ModuleList()
|
214 |
-
for i in range(self.start_level, self.backbone_end_level):
|
215 |
-
l_conv = nn.Conv2d(self.in_channels[i],
|
216 |
-
self.inter_channels[i - self.start_level], 1)
|
217 |
-
self.lateral_convs.append(l_conv)
|
218 |
-
|
219 |
-
extra_levels = num_outs - self.backbone_end_level + self.start_level
|
220 |
-
self.extra_downsamples = nn.ModuleList()
|
221 |
-
for i in range(extra_levels):
|
222 |
-
if self.add_extra_convs:
|
223 |
-
fpn_idx = self.backbone_end_level - self.start_level + i
|
224 |
-
extra_conv = nn.Conv2d(
|
225 |
-
self.inter_channels[fpn_idx - 1],
|
226 |
-
self.inter_channels[fpn_idx],
|
227 |
-
3,
|
228 |
-
stride=2,
|
229 |
-
padding=1)
|
230 |
-
self.extra_downsamples.append(extra_conv)
|
231 |
-
else:
|
232 |
-
self.extra_downsamples.append(nn.MaxPool2d(1, stride=2))
|
233 |
-
|
234 |
-
self.fpn_transitions = nn.ModuleList() # stack times
|
235 |
-
for s in range(self.stack_times):
|
236 |
-
stage_trans = nn.ModuleList() # num of feature levels
|
237 |
-
for i in range(self.num_outs):
|
238 |
-
# same, across_lateral, across_down, across_up
|
239 |
-
trans = nn.ModuleDict()
|
240 |
-
if s in self.skip_inds[i]:
|
241 |
-
stage_trans.append(trans)
|
242 |
-
continue
|
243 |
-
# build same-stage down trans (used in bottom-up paths)
|
244 |
-
if i == 0 or self.same_up_trans is None:
|
245 |
-
same_up_trans = None
|
246 |
-
else:
|
247 |
-
same_up_trans = self.build_trans(
|
248 |
-
self.same_up_trans, self.inter_channels[i - 1],
|
249 |
-
self.inter_channels[i])
|
250 |
-
trans['same_up'] = same_up_trans
|
251 |
-
# build same-stage up trans (used in top-down paths)
|
252 |
-
if i == self.num_outs - 1 or self.same_down_trans is None:
|
253 |
-
same_down_trans = None
|
254 |
-
else:
|
255 |
-
same_down_trans = self.build_trans(
|
256 |
-
self.same_down_trans, self.inter_channels[i + 1],
|
257 |
-
self.inter_channels[i])
|
258 |
-
trans['same_down'] = same_down_trans
|
259 |
-
# build across lateral trans
|
260 |
-
across_lateral_trans = self.build_trans(
|
261 |
-
self.across_lateral_trans, self.inter_channels[i],
|
262 |
-
self.inter_channels[i])
|
263 |
-
trans['across_lateral'] = across_lateral_trans
|
264 |
-
# build across down trans
|
265 |
-
if i == self.num_outs - 1 or self.across_down_trans is None:
|
266 |
-
across_down_trans = None
|
267 |
-
else:
|
268 |
-
across_down_trans = self.build_trans(
|
269 |
-
self.across_down_trans, self.inter_channels[i + 1],
|
270 |
-
self.inter_channels[i])
|
271 |
-
trans['across_down'] = across_down_trans
|
272 |
-
# build across up trans
|
273 |
-
if i == 0 or self.across_up_trans is None:
|
274 |
-
across_up_trans = None
|
275 |
-
else:
|
276 |
-
across_up_trans = self.build_trans(
|
277 |
-
self.across_up_trans, self.inter_channels[i - 1],
|
278 |
-
self.inter_channels[i])
|
279 |
-
trans['across_up'] = across_up_trans
|
280 |
-
if self.across_skip_trans is None:
|
281 |
-
across_skip_trans = None
|
282 |
-
else:
|
283 |
-
across_skip_trans = self.build_trans(
|
284 |
-
self.across_skip_trans, self.inter_channels[i - 1],
|
285 |
-
self.inter_channels[i])
|
286 |
-
trans['across_skip'] = across_skip_trans
|
287 |
-
# build across_skip trans
|
288 |
-
stage_trans.append(trans)
|
289 |
-
self.fpn_transitions.append(stage_trans)
|
290 |
-
|
291 |
-
self.output_transition = nn.ModuleList() # output levels
|
292 |
-
for i in range(self.num_outs):
|
293 |
-
trans = self.build_trans(
|
294 |
-
self.output_trans,
|
295 |
-
self.inter_channels[i],
|
296 |
-
self.out_channels,
|
297 |
-
num_inputs=self.stack_times + 1)
|
298 |
-
self.output_transition.append(trans)
|
299 |
-
|
300 |
-
self.relu = nn.ReLU(inplace=True)
|
301 |
-
|
302 |
-
def build_trans(self, cfg, in_channels, out_channels, **extra_args):
|
303 |
-
cfg_ = cfg.copy()
|
304 |
-
trans_type = cfg_.pop('type')
|
305 |
-
trans_cls = self.transition_types[trans_type]
|
306 |
-
return trans_cls(in_channels, out_channels, **cfg_, **extra_args)
|
307 |
-
|
308 |
-
def init_weights(self):
|
309 |
-
for m in self.modules():
|
310 |
-
if isinstance(m, nn.Conv2d):
|
311 |
-
caffe2_xavier_init(m)
|
312 |
-
elif is_norm(m):
|
313 |
-
constant_init(m, 1.0)
|
314 |
-
|
315 |
-
def fuse(self, fuse_dict):
|
316 |
-
out = None
|
317 |
-
for item in fuse_dict.values():
|
318 |
-
if item is not None:
|
319 |
-
if out is None:
|
320 |
-
out = item
|
321 |
-
else:
|
322 |
-
out = out + item
|
323 |
-
return out
|
324 |
-
|
325 |
-
def forward(self, inputs):
|
326 |
-
assert len(inputs) == len(self.in_channels)
|
327 |
-
|
328 |
-
# build all levels from original feature maps
|
329 |
-
feats = [
|
330 |
-
lateral_conv(inputs[i + self.start_level])
|
331 |
-
for i, lateral_conv in enumerate(self.lateral_convs)
|
332 |
-
]
|
333 |
-
for downsample in self.extra_downsamples:
|
334 |
-
feats.append(downsample(feats[-1]))
|
335 |
-
|
336 |
-
outs = [feats]
|
337 |
-
|
338 |
-
for i in range(self.stack_times):
|
339 |
-
current_outs = outs[-1]
|
340 |
-
next_outs = []
|
341 |
-
direction = self.paths[i]
|
342 |
-
for j in range(self.num_outs):
|
343 |
-
if i in self.skip_inds[j]:
|
344 |
-
next_outs.append(outs[-1][j])
|
345 |
-
continue
|
346 |
-
# feature level
|
347 |
-
if direction == 'td':
|
348 |
-
lvl = self.num_outs - j - 1
|
349 |
-
else:
|
350 |
-
lvl = j
|
351 |
-
# get transitions
|
352 |
-
if direction == 'td':
|
353 |
-
same_trans = self.fpn_transitions[i][lvl]['same_down']
|
354 |
-
else:
|
355 |
-
same_trans = self.fpn_transitions[i][lvl]['same_up']
|
356 |
-
across_lateral_trans = self.fpn_transitions[i][lvl][
|
357 |
-
'across_lateral']
|
358 |
-
across_down_trans = self.fpn_transitions[i][lvl]['across_down']
|
359 |
-
across_up_trans = self.fpn_transitions[i][lvl]['across_up']
|
360 |
-
across_skip_trans = self.fpn_transitions[i][lvl]['across_skip']
|
361 |
-
# init output
|
362 |
-
to_fuse = dict(
|
363 |
-
same=None, lateral=None, across_up=None, across_down=None)
|
364 |
-
# same downsample/upsample
|
365 |
-
if same_trans is not None:
|
366 |
-
to_fuse['same'] = same_trans(next_outs[-1])
|
367 |
-
# across lateral
|
368 |
-
if across_lateral_trans is not None:
|
369 |
-
to_fuse['lateral'] = across_lateral_trans(
|
370 |
-
current_outs[lvl])
|
371 |
-
# across downsample
|
372 |
-
if lvl > 0 and across_up_trans is not None:
|
373 |
-
to_fuse['across_up'] = across_up_trans(current_outs[lvl -
|
374 |
-
1])
|
375 |
-
# across upsample
|
376 |
-
if (lvl < self.num_outs - 1 and across_down_trans is not None):
|
377 |
-
to_fuse['across_down'] = across_down_trans(
|
378 |
-
current_outs[lvl + 1])
|
379 |
-
if across_skip_trans is not None:
|
380 |
-
to_fuse['across_skip'] = across_skip_trans(outs[0][lvl])
|
381 |
-
x = self.fuse(to_fuse)
|
382 |
-
next_outs.append(x)
|
383 |
-
|
384 |
-
if direction == 'td':
|
385 |
-
outs.append(next_outs[::-1])
|
386 |
-
else:
|
387 |
-
outs.append(next_outs)
|
388 |
-
|
389 |
-
# output trans
|
390 |
-
final_outs = []
|
391 |
-
for i in range(self.num_outs):
|
392 |
-
lvl_out_list = []
|
393 |
-
for s in range(len(outs)):
|
394 |
-
lvl_out_list.append(outs[s][i])
|
395 |
-
lvl_out = self.output_transition[i](lvl_out_list)
|
396 |
-
final_outs.append(lvl_out)
|
397 |
-
|
398 |
-
return final_outs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Celestinian/Prompt-Generator/app.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM, GPT2LMHeadModel
|
2 |
-
import gradio as gr
|
3 |
-
import torch
|
4 |
-
import git
|
5 |
-
|
6 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
7 |
-
|
8 |
-
tokenizer = AutoTokenizer.from_pretrained("Celestinian/PromptGPT")
|
9 |
-
model = AutoModelForCausalLM.from_pretrained("Celestinian/PromptGPT")
|
10 |
-
|
11 |
-
def generate_text(prompt, max_length, do_sample, temperature, top_k, top_p):
|
12 |
-
formatted_prompt = "\n" + prompt
|
13 |
-
if not ',' in prompt:
|
14 |
-
formatted_prompt += ','
|
15 |
-
prompt = tokenizer(formatted_prompt, return_tensors='pt')
|
16 |
-
prompt = {key: value.to(device) for key, value in prompt.items()}
|
17 |
-
out = model.generate(**prompt, max_length=max_length, do_sample=do_sample, temperature=temperature,
|
18 |
-
no_repeat_ngram_size=3, top_k=top_k, top_p=top_p)
|
19 |
-
output = tokenizer.decode(out[0])
|
20 |
-
clean_output = output.replace('\n', '\n')
|
21 |
-
print(clean_output)
|
22 |
-
return clean_output
|
23 |
-
|
24 |
-
input_text = gr.inputs.Textbox(lines=5, label="Input Text")
|
25 |
-
max_length = gr.inputs.Slider(minimum=10, maximum=100, default=30, label="Max Length")
|
26 |
-
do_sample = gr.inputs.Checkbox(default=True, label="Do Sample")
|
27 |
-
temperature = gr.inputs.Slider(minimum=0.1, maximum=1.0, step=0.1, default=0.4, label="Temperature")
|
28 |
-
top_k = gr.inputs.Slider(minimum=1, maximum=100, step=1, default=50, label="Top K")
|
29 |
-
top_p = gr.inputs.Slider(minimum=0.1, maximum=1.0, step=1, default=0.2, label="Top P")
|
30 |
-
|
31 |
-
output_text = gr.outputs.Textbox(label="Generated Text")
|
32 |
-
|
33 |
-
gr.Interface(generate_text, inputs=[input_text, max_length, do_sample, temperature, top_k, top_p],
|
34 |
-
outputs=output_text).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CguCsie/README/README.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: README_ryExp001
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: static
|
7 |
-
pinned: false
|
8 |
-
license: openrail
|
9 |
-
---
|
10 |
-
|
11 |
-
Edit this `README.md` markdown file to author your organization card 🔥
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CjangCjengh/Sanskrit-TTS/monotonic_align/__init__.py
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
from numpy import zeros, int32, float32
|
2 |
-
from torch import from_numpy
|
3 |
-
|
4 |
-
from .core import maximum_path_jit
|
5 |
-
|
6 |
-
def maximum_path(neg_cent, mask):
|
7 |
-
""" numba optimized version.
|
8 |
-
neg_cent: [b, t_t, t_s]
|
9 |
-
mask: [b, t_t, t_s]
|
10 |
-
"""
|
11 |
-
device = neg_cent.device
|
12 |
-
dtype = neg_cent.dtype
|
13 |
-
neg_cent = neg_cent.data.cpu().numpy().astype(float32)
|
14 |
-
path = zeros(neg_cent.shape, dtype=int32)
|
15 |
-
|
16 |
-
t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32)
|
17 |
-
t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32)
|
18 |
-
maximum_path_jit(path, neg_cent, t_t_max, t_s_max)
|
19 |
-
return from_numpy(path).to(device=device, dtype=dtype)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/common/__init__.py
DELETED
File without changes
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/encodings/MacRoman.py
DELETED
@@ -1,258 +0,0 @@
|
|
1 |
-
MacRoman = [
|
2 |
-
"NUL",
|
3 |
-
"Eth",
|
4 |
-
"eth",
|
5 |
-
"Lslash",
|
6 |
-
"lslash",
|
7 |
-
"Scaron",
|
8 |
-
"scaron",
|
9 |
-
"Yacute",
|
10 |
-
"yacute",
|
11 |
-
"HT",
|
12 |
-
"LF",
|
13 |
-
"Thorn",
|
14 |
-
"thorn",
|
15 |
-
"CR",
|
16 |
-
"Zcaron",
|
17 |
-
"zcaron",
|
18 |
-
"DLE",
|
19 |
-
"DC1",
|
20 |
-
"DC2",
|
21 |
-
"DC3",
|
22 |
-
"DC4",
|
23 |
-
"onehalf",
|
24 |
-
"onequarter",
|
25 |
-
"onesuperior",
|
26 |
-
"threequarters",
|
27 |
-
"threesuperior",
|
28 |
-
"twosuperior",
|
29 |
-
"brokenbar",
|
30 |
-
"minus",
|
31 |
-
"multiply",
|
32 |
-
"RS",
|
33 |
-
"US",
|
34 |
-
"space",
|
35 |
-
"exclam",
|
36 |
-
"quotedbl",
|
37 |
-
"numbersign",
|
38 |
-
"dollar",
|
39 |
-
"percent",
|
40 |
-
"ampersand",
|
41 |
-
"quotesingle",
|
42 |
-
"parenleft",
|
43 |
-
"parenright",
|
44 |
-
"asterisk",
|
45 |
-
"plus",
|
46 |
-
"comma",
|
47 |
-
"hyphen",
|
48 |
-
"period",
|
49 |
-
"slash",
|
50 |
-
"zero",
|
51 |
-
"one",
|
52 |
-
"two",
|
53 |
-
"three",
|
54 |
-
"four",
|
55 |
-
"five",
|
56 |
-
"six",
|
57 |
-
"seven",
|
58 |
-
"eight",
|
59 |
-
"nine",
|
60 |
-
"colon",
|
61 |
-
"semicolon",
|
62 |
-
"less",
|
63 |
-
"equal",
|
64 |
-
"greater",
|
65 |
-
"question",
|
66 |
-
"at",
|
67 |
-
"A",
|
68 |
-
"B",
|
69 |
-
"C",
|
70 |
-
"D",
|
71 |
-
"E",
|
72 |
-
"F",
|
73 |
-
"G",
|
74 |
-
"H",
|
75 |
-
"I",
|
76 |
-
"J",
|
77 |
-
"K",
|
78 |
-
"L",
|
79 |
-
"M",
|
80 |
-
"N",
|
81 |
-
"O",
|
82 |
-
"P",
|
83 |
-
"Q",
|
84 |
-
"R",
|
85 |
-
"S",
|
86 |
-
"T",
|
87 |
-
"U",
|
88 |
-
"V",
|
89 |
-
"W",
|
90 |
-
"X",
|
91 |
-
"Y",
|
92 |
-
"Z",
|
93 |
-
"bracketleft",
|
94 |
-
"backslash",
|
95 |
-
"bracketright",
|
96 |
-
"asciicircum",
|
97 |
-
"underscore",
|
98 |
-
"grave",
|
99 |
-
"a",
|
100 |
-
"b",
|
101 |
-
"c",
|
102 |
-
"d",
|
103 |
-
"e",
|
104 |
-
"f",
|
105 |
-
"g",
|
106 |
-
"h",
|
107 |
-
"i",
|
108 |
-
"j",
|
109 |
-
"k",
|
110 |
-
"l",
|
111 |
-
"m",
|
112 |
-
"n",
|
113 |
-
"o",
|
114 |
-
"p",
|
115 |
-
"q",
|
116 |
-
"r",
|
117 |
-
"s",
|
118 |
-
"t",
|
119 |
-
"u",
|
120 |
-
"v",
|
121 |
-
"w",
|
122 |
-
"x",
|
123 |
-
"y",
|
124 |
-
"z",
|
125 |
-
"braceleft",
|
126 |
-
"bar",
|
127 |
-
"braceright",
|
128 |
-
"asciitilde",
|
129 |
-
"DEL",
|
130 |
-
"Adieresis",
|
131 |
-
"Aring",
|
132 |
-
"Ccedilla",
|
133 |
-
"Eacute",
|
134 |
-
"Ntilde",
|
135 |
-
"Odieresis",
|
136 |
-
"Udieresis",
|
137 |
-
"aacute",
|
138 |
-
"agrave",
|
139 |
-
"acircumflex",
|
140 |
-
"adieresis",
|
141 |
-
"atilde",
|
142 |
-
"aring",
|
143 |
-
"ccedilla",
|
144 |
-
"eacute",
|
145 |
-
"egrave",
|
146 |
-
"ecircumflex",
|
147 |
-
"edieresis",
|
148 |
-
"iacute",
|
149 |
-
"igrave",
|
150 |
-
"icircumflex",
|
151 |
-
"idieresis",
|
152 |
-
"ntilde",
|
153 |
-
"oacute",
|
154 |
-
"ograve",
|
155 |
-
"ocircumflex",
|
156 |
-
"odieresis",
|
157 |
-
"otilde",
|
158 |
-
"uacute",
|
159 |
-
"ugrave",
|
160 |
-
"ucircumflex",
|
161 |
-
"udieresis",
|
162 |
-
"dagger",
|
163 |
-
"degree",
|
164 |
-
"cent",
|
165 |
-
"sterling",
|
166 |
-
"section",
|
167 |
-
"bullet",
|
168 |
-
"paragraph",
|
169 |
-
"germandbls",
|
170 |
-
"registered",
|
171 |
-
"copyright",
|
172 |
-
"trademark",
|
173 |
-
"acute",
|
174 |
-
"dieresis",
|
175 |
-
"notequal",
|
176 |
-
"AE",
|
177 |
-
"Oslash",
|
178 |
-
"infinity",
|
179 |
-
"plusminus",
|
180 |
-
"lessequal",
|
181 |
-
"greaterequal",
|
182 |
-
"yen",
|
183 |
-
"mu",
|
184 |
-
"partialdiff",
|
185 |
-
"summation",
|
186 |
-
"product",
|
187 |
-
"pi",
|
188 |
-
"integral",
|
189 |
-
"ordfeminine",
|
190 |
-
"ordmasculine",
|
191 |
-
"Omega",
|
192 |
-
"ae",
|
193 |
-
"oslash",
|
194 |
-
"questiondown",
|
195 |
-
"exclamdown",
|
196 |
-
"logicalnot",
|
197 |
-
"radical",
|
198 |
-
"florin",
|
199 |
-
"approxequal",
|
200 |
-
"Delta",
|
201 |
-
"guillemotleft",
|
202 |
-
"guillemotright",
|
203 |
-
"ellipsis",
|
204 |
-
"nbspace",
|
205 |
-
"Agrave",
|
206 |
-
"Atilde",
|
207 |
-
"Otilde",
|
208 |
-
"OE",
|
209 |
-
"oe",
|
210 |
-
"endash",
|
211 |
-
"emdash",
|
212 |
-
"quotedblleft",
|
213 |
-
"quotedblright",
|
214 |
-
"quoteleft",
|
215 |
-
"quoteright",
|
216 |
-
"divide",
|
217 |
-
"lozenge",
|
218 |
-
"ydieresis",
|
219 |
-
"Ydieresis",
|
220 |
-
"fraction",
|
221 |
-
"currency",
|
222 |
-
"guilsinglleft",
|
223 |
-
"guilsinglright",
|
224 |
-
"fi",
|
225 |
-
"fl",
|
226 |
-
"daggerdbl",
|
227 |
-
"periodcentered",
|
228 |
-
"quotesinglbase",
|
229 |
-
"quotedblbase",
|
230 |
-
"perthousand",
|
231 |
-
"Acircumflex",
|
232 |
-
"Ecircumflex",
|
233 |
-
"Aacute",
|
234 |
-
"Edieresis",
|
235 |
-
"Egrave",
|
236 |
-
"Iacute",
|
237 |
-
"Icircumflex",
|
238 |
-
"Idieresis",
|
239 |
-
"Igrave",
|
240 |
-
"Oacute",
|
241 |
-
"Ocircumflex",
|
242 |
-
"apple",
|
243 |
-
"Ograve",
|
244 |
-
"Uacute",
|
245 |
-
"Ucircumflex",
|
246 |
-
"Ugrave",
|
247 |
-
"dotlessi",
|
248 |
-
"circumflex",
|
249 |
-
"tilde",
|
250 |
-
"macron",
|
251 |
-
"breve",
|
252 |
-
"dotaccent",
|
253 |
-
"ring",
|
254 |
-
"cedilla",
|
255 |
-
"hungarumlaut",
|
256 |
-
"ogonek",
|
257 |
-
"caron",
|
258 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|