Commit
·
0bb411c
1
Parent(s):
f7454e6
Update parquet files (step 32 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/0xSynapse/Image_captioner/app.py +0 -62
- spaces/101-5/gpt4free/g4f/.v1/gpt4free/deepai/__init__.py +0 -46
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Golmaal Again 1080p Hd Hindi Full Fix Movie.md +0 -15
- spaces/1gistliPinn/ChatGPT4/Examples/Calculus By Howard Anton 8th Edition Free !FULL! Download.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Design My Home Makeover Games APK - A Fun and Relaxing Word Game with Home Decoration.md +0 -96
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bloons TD 6 on PC and Experience the Most Epic Tower Defense Game.md +0 -152
- spaces/1phancelerku/anime-remove-background/Baixe o Livro de Regras do RPG Ordem Paranormal criado por Cellbit e seus amigos.md +0 -124
- spaces/2023Liu2023/bingo/src/app/loading.css +0 -68
- spaces/AB-TW/team-ai/agents/tools/smart_domain/common.py +0 -18
- spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/audio/stft.py +0 -180
- spaces/AP123/text-to-3D/README.md +0 -13
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/perspective/Perspective.js +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollbar/ScrollBar.d.ts +0 -67
- spaces/Algoworks/Image_Face_Upscale_Restoration-GFPGAN_pub/app.py +0 -142
- spaces/Ameaou/academic-chatgpt3.1/crazy_functions/批量总结PDF文档pdfminer.py +0 -160
- spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/callback/__init__.py +0 -16
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/README_sdxl.md +0 -189
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py +0 -1196
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/get_modified_files.py +0 -34
- spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py +0 -5
- spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py +0 -12
- spaces/Andy1621/uniformer_image_detection/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py +0 -4
- spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/yolo_head.py +0 -577
- spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py +0 -6
- spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py +0 -39
- spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py +0 -2
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/apis/inference.py +0 -136
- spaces/ArcAhmedEssam/CLIP-Interrogator-2/share_btn.py +0 -70
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/install/wheel.py +0 -740
- spaces/Benson/text-generation/Examples/Avarampoo Descarga De Pelculas Pelculas.md +0 -71
- spaces/Benson/text-generation/Examples/Betty Noir Fuente Descargar.md +0 -81
- spaces/Benson/text-generation/Examples/Cubic 234 Juegos De Jugadores Mod Apk.md +0 -86
- spaces/Big-Web/MMSD/env/Lib/site-packages/jmespath/ast.py +0 -90
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/file_proxy.py +0 -57
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_resources/_itertools.py +0 -35
- spaces/Binguii/Venus_Proxy/Dockerfile +0 -21
- spaces/CVPR/Dual-Key_Backdoor_Attacks/README.md +0 -13
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/grid-feats-vqa/grid_feats/build_loader.py +0 -105
- spaces/CVPR/WALT/mmdet/models/backbones/hourglass.py +0 -198
- spaces/CVPR/WALT/walt/apis/__init__.py +0 -6
- spaces/CVPR/WALT/walt/datasets/pipelines/test_time_aug.py +0 -119
- spaces/CVPR/v-doc_abstractive_mac/extract_feature.py +0 -51
- spaces/Cat125/text-generator-v3/README.md +0 -14
- spaces/CognitiveLabs/Research-Assistant/agent/llm_utils.py +0 -39
- spaces/Cong723/gpt-academic-public/crazy_functions/下载arxiv论文翻译摘要.py +0 -194
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/streams/stapled.py +0 -140
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_auth.py +0 -347
- spaces/Dantra1/CeliaSensei/attentions.py +0 -300
- spaces/Dauzy/whisper-webui/src/whisper/fasterWhisperContainer.py +0 -207
- spaces/Detomo/ai-comic-generation/src/app/ocr.tsx +0 -3
spaces/0xSynapse/Image_captioner/app.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
#imported all required libraries
|
2 |
-
import streamlit as st
|
3 |
-
import torch
|
4 |
-
import requests
|
5 |
-
from PIL import Image
|
6 |
-
from io import BytesIO
|
7 |
-
from transformers import ViTFeatureExtractor, AutoTokenizer, VisionEncoderDecoderModel
|
8 |
-
|
9 |
-
|
10 |
-
#used a pretrained model hosted on huggingface
|
11 |
-
loc = "ydshieh/vit-gpt2-coco-en"
|
12 |
-
|
13 |
-
feature_extractor = ViTFeatureExtractor.from_pretrained(loc)
|
14 |
-
tokenizer = AutoTokenizer.from_pretrained(loc)
|
15 |
-
model = VisionEncoderDecoderModel.from_pretrained(loc)
|
16 |
-
model.eval()
|
17 |
-
|
18 |
-
#defined a function for prediction
|
19 |
-
|
20 |
-
def predict(image):
|
21 |
-
pixel_values = feature_extractor(images=image, return_tensors="pt").pixel_values
|
22 |
-
|
23 |
-
with torch.no_grad():
|
24 |
-
output_ids = model.generate(pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True).sequences
|
25 |
-
|
26 |
-
preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
|
27 |
-
preds = [pred.strip() for pred in preds]
|
28 |
-
|
29 |
-
return preds
|
30 |
-
|
31 |
-
#defined a function for Streamlit App
|
32 |
-
def app():
|
33 |
-
st.title("ImaginateAI")
|
34 |
-
st.write("ViT and GPT2 are used to generate Image Caption for the uploaded image. COCO Dataset was used for training. This image captioning model might have some biases that I couldn’t figure during testing")
|
35 |
-
st.write("Upload an image or paste a URL to get predicted captions.")
|
36 |
-
|
37 |
-
upload_option = st.selectbox("Choose an option:", ("Upload Image", "Paste URL"))
|
38 |
-
|
39 |
-
if upload_option == "Upload Image":
|
40 |
-
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg"])
|
41 |
-
|
42 |
-
if uploaded_file is not None:
|
43 |
-
image = Image.open(uploaded_file)
|
44 |
-
preds = predict(image)
|
45 |
-
st.image(image, caption="Uploaded Image", use_column_width=True)
|
46 |
-
st.write("Predicted Caption:", preds)
|
47 |
-
|
48 |
-
|
49 |
-
elif upload_option == "Paste URL":
|
50 |
-
image_url = st.text_input("Enter Image URL")
|
51 |
-
if st.button("Submit") and image_url:
|
52 |
-
try:
|
53 |
-
response = requests.get(image_url, stream=True)
|
54 |
-
image = Image.open(BytesIO(response.content))
|
55 |
-
preds = predict(image)
|
56 |
-
st.image(image, caption="Image from URL", use_column_width=True)
|
57 |
-
st.write("Predicted Caption:", preds)
|
58 |
-
except:
|
59 |
-
st.write("Error: Invalid URL or unable to fetch image.")
|
60 |
-
|
61 |
-
if __name__ == "__main__":
|
62 |
-
app()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/101-5/gpt4free/g4f/.v1/gpt4free/deepai/__init__.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
import requests
|
2 |
-
import json
|
3 |
-
import hashlib
|
4 |
-
import random
|
5 |
-
import string
|
6 |
-
from fake_useragent import UserAgent
|
7 |
-
|
8 |
-
class ChatCompletion:
|
9 |
-
@classmethod
|
10 |
-
def md5(self, text):
|
11 |
-
return hashlib.md5(text.encode()).hexdigest()[::-1]
|
12 |
-
|
13 |
-
@classmethod
|
14 |
-
def get_api_key(self, user_agent):
|
15 |
-
part1 = str(random.randint(0, 10**11))
|
16 |
-
part2 = self.md5(user_agent+self.md5(user_agent+self.md5(user_agent+part1+"x")))
|
17 |
-
return f"tryit-{part1}-{part2}"
|
18 |
-
|
19 |
-
@classmethod
|
20 |
-
def create(self, messages):
|
21 |
-
user_agent = UserAgent().random
|
22 |
-
api_key = self.get_api_key(user_agent)
|
23 |
-
headers = {
|
24 |
-
"api-key": api_key,
|
25 |
-
"user-agent": user_agent
|
26 |
-
}
|
27 |
-
files = {
|
28 |
-
"chat_style": (None, "chat"),
|
29 |
-
"chatHistory": (None, json.dumps(messages))
|
30 |
-
}
|
31 |
-
|
32 |
-
r = requests.post("https://api.deepai.org/chat_response", headers=headers, files=files, stream=True)
|
33 |
-
|
34 |
-
for chunk in r.iter_content(chunk_size=None):
|
35 |
-
r.raise_for_status()
|
36 |
-
yield chunk.decode()
|
37 |
-
|
38 |
-
class Completion:
|
39 |
-
@classmethod
|
40 |
-
def create(self, prompt):
|
41 |
-
return ChatCompletion.create([
|
42 |
-
{
|
43 |
-
"role": "user",
|
44 |
-
"content": prompt
|
45 |
-
}
|
46 |
-
])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Golmaal Again 1080p Hd Hindi Full Fix Movie.md
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Golmaal Again: A Hilarious and Spooky Comedy</h1>
|
3 |
-
<p>Golmaal Again is a 2017 Hindi comedy film directed by Rohit Shetty and starring Ajay Devgn, Parineeti Chopra, Tabu, Arshad Warsi, Tusshar Kapoor, Shreyas Talpade, Kunal Khemu, Prakash Raj and Neil Nitin Mukesh. It is the fourth installment of the Golmaal film series and a sequel to Golmaal 3 (2010).</p>
|
4 |
-
<p>The film follows the adventures of Gopal, Madhav, Lucky, Laxman 1 and Laxman 2, five friends who grew up in an orphanage and reunite after many years. They discover that their childhood friend Khushi, who they thought was dead, is actually alive and has some supernatural powers. They also encounter a ghost named Anna Mathew, who reveals some shocking secrets about their past.</p>
|
5 |
-
<h2>Golmaal Again 1080p hd hindi full movie</h2><br /><p><b><b>DOWNLOAD</b> ⚹ <a href="https://byltly.com/2uKwWm">https://byltly.com/2uKwWm</a></b></p><br /><br />
|
6 |
-
<p>Golmaal Again is a blend of comedy, horror and action that will keep you entertained throughout. The film has many hilarious scenes, such as the one where Gopal gets scared by a snake or the one where Laxman 2 mimics a lion. The film also has some emotional moments, such as the one where Khushi reunites with her long-lost father or the one where Gopal confesses his love for Khushi.</p>
|
7 |
-
<p>Golmaal Again is a blockbuster hit that grossed over â¹300 crore worldwide and became one of the highest-grossing Indian films of all time. The film received positive reviews from critics and audiences alike, who praised its humor, performances, music and direction. The film also won several awards, including the Filmfare Award for Best Actor in a Comic Role for Ajay Devgn.</p>
|
8 |
-
<p>If you are looking for a fun-filled and spooky movie to watch with your family or friends, Golmaal Again is the perfect choice for you. You can watch Golmaal Again in full HD quality on Disney+ Hotstar[^1^] [^2^] or Bilibili[^3^]. Don't miss this laughter riot that will make you go "Golmaal Golmaal"!</p>
|
9 |
-
|
10 |
-
<p>Golmaal Again is the fourth film in the Golmaal series, which started with Golmaal: Fun Unlimited (2006), followed by Golmaal Returns (2008) and Golmaal 3 (2010). The series is known for its slapstick comedy, quirky characters and catchy songs. The films are loosely inspired by the Hollywood comedy franchise Police Academy.</p>
|
11 |
-
<p>The film features a star-studded cast of actors who have worked together in previous Golmaal films. Ajay Devgn plays Gopal, the leader of the gang who is afraid of ghosts and loves Khushi. Parineeti Chopra plays Khushi, the bubbly and innocent girl who has a special connection with Gopal. Tabu plays Anna Mathew, the librarian and ghost whisperer who helps the gang solve the mystery. Arshad Warsi plays Madhav, the prankster who often clashes with Gopal. Tusshar Kapoor plays Lucky, the mute and funny member of the gang. Shreyas Talpade plays Laxman 1, the stammering and loyal friend of Gopal. Kunal Khemu plays Laxman 2, the smart and witty brother of Laxman 1. Prakash Raj plays Sheru Bhai, the don of Ooty who has a grudge against the gang. Neil Nitin Mukesh plays Nikhil, the son of Sheru Bhai who falls in love with Khushi.</p>
|
12 |
-
<p>The film also has some special appearances by actors such as Sanjay Mishra, Johnny Lever, Mukesh Tiwari, Vrajesh Hirjee, Murali Sharma and Sachin Khedekar. The film has a cameo by Nana Patekar, who lends his voice to the ghost of Jamnadas, the owner of the orphanage where the gang grew up. The film also features a song by Ranveer Singh, who dances with the gang in the end credits.</p>
|
13 |
-
<p></p> 81aa517590<br />
|
14 |
-
<br />
|
15 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Calculus By Howard Anton 8th Edition Free !FULL! Download.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Calculus By Howard Anton 8th Edition Free Download</h2><br /><p><b><b>Download</b> ••• <a href="https://imgfil.com/2uy0oS">https://imgfil.com/2uy0oS</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Howard Anton's Handbook of Exercise Calculus, 8th edition.Jun 05, 2015 • 75 Likes • 28,712 views. (Photo: Depositphotos) While we try to keep our promises, we must not forget that we still have to keep our promises. We must remember that not all of our promises are true and that we must be careful when we decide what we promise. This is especially true in the financial world. Our financial promises are not perfect, and they don't have to be. The financial promises don't change. 8a78ff9644<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Design My Home Makeover Games APK - A Fun and Relaxing Word Game with Home Decoration.md
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Design My Home: Makeover Games APK - A Fun and Creative Casual Game</h1>
|
3 |
-
<p>Do you love home design and word games? If yes, then you will love Design My Home: Makeover Games APK, a casual game that combines both genres in a fun and creative way. In this game, you can design your dream home, solve word puzzles, and compete with other players around the world. Here is everything you need to know about this game, including how to download and install it, what are its features, and what are some tips and tricks for playing it.</p>
|
4 |
-
<h2>What is Design My Home: Makeover Games?</h2>
|
5 |
-
<p>Design My Home: Makeover Games is a casual game developed by Holy Cow Studio. The APK has been available since May 2020. Design My Home: Makeover Games has been downloaded 1+ million times. It's currently not in the top ranks on Google Play. It's rated 4.59 out of 5 stars, based on 22,537 ratings. The game has three main aspects:</p>
|
6 |
-
<h2>design my home makeover games apk</h2><br /><p><b><b>Download</b> ➡ <a href="https://urlin.us/2uT2qm">https://urlin.us/2uT2qm</a></b></p><br /><br />
|
7 |
-
<h3>A casual game that lets you design your dream home</h3>
|
8 |
-
<p>In this game, you can unleash your inner designer and create beautiful rooms according to your taste and style. You can choose from hundreds of levels and rooms, such as living rooms, bedrooms, kitchens, bathrooms, gardens, and more. You can also customize every detail, such as the walls, floors, furniture, decor, lighting, plants, and accessories. You can mix and match different styles, colors, patterns, and textures to create your own unique designs.</p>
|
9 |
-
<h3>A word game that challenges your vocabulary and creativity</h3>
|
10 |
-
<p>To design each room, you need to solve word puzzles that are related to the theme of the room. For example, if you are designing a kitchen, you need to find words that are associated with cooking, food, utensils, appliances, etc. The word puzzles are in the form of crosswords or word searches. You need to swipe the letters on the screen to form words. You can use hints or shuffle the letters if you are stuck. The more words you find, the more coins you earn.</p>
|
11 |
-
<h3>A social game that lets you share your designs and compete with others</h3>
|
12 |
-
<p>You can also share your designs with other players and see their ratings and comments. You can also rate and comment on other players' designs and get inspired by their ideas. You can also join clubs and chat with other designers. You can also participate in tournaments and events where you can compete with other players for prizes and glory.</p>
|
13 |
-
<h2>How to download and install Design My Home: Makeover Games APK?</h2>
|
14 |
-
<p>If you want to play Design My Home: Makeover Games APK on your Android device, you need to follow these steps:</p>
|
15 |
-
<h3>Download the APK file from a trusted source</h3>
|
16 |
-
<p>You can download the APK file from [AppBrain](^1^), a reliable website that offers free APK downloads for Android apps. You can also scan the QR code on the website to download the file directly to your device. The APK file size is 99 MB and the latest version is 1.2.9.</p>
|
17 |
-
<h3>Enable unknown sources on your device settings</h3>
|
18 |
-
<p>Before you can install the APK file, you need to enable unknown sources on your device settings. This will allow you to install apps that are not from the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message that says installing apps from unknown sources may harm your device. Tap OK to proceed.</p>
|
19 |
-
<h3>Install the APK file and enjoy the game</h3>
|
20 |
-
<p>Once you have downloaded and enabled unknown sources, you can install the APK file by tapping on it. You may see a prompt that asks you to confirm the installation. Tap Install and wait for the process to finish. After the installation is complete, you can open the game and start designing your home.</p>
|
21 |
-
<h2>What are the features of Design My Home: Makeover Games APK?</h2>
|
22 |
-
<p>Design My Home: Makeover Games APK has many features that make it a fun and creative casual game. Here are some of them:</p>
|
23 |
-
<p>design my home makeover word life apk<br />
|
24 |
-
design my home makeover games offline apk<br />
|
25 |
-
design my home makeover games mod apk<br />
|
26 |
-
design my home makeover games free download apk<br />
|
27 |
-
design my home makeover games for android apk<br />
|
28 |
-
design my home makeover games latest version apk<br />
|
29 |
-
design my home makeover games hack apk<br />
|
30 |
-
design my home makeover games unlimited money apk<br />
|
31 |
-
design my home makeover games 3d apk<br />
|
32 |
-
design my home makeover games online apk<br />
|
33 |
-
design my home makeover games with friends apk<br />
|
34 |
-
design my home makeover games no ads apk<br />
|
35 |
-
design my home makeover games premium apk<br />
|
36 |
-
design my home makeover games pro apk<br />
|
37 |
-
design my home makeover games full apk<br />
|
38 |
-
design my home makeover games fun apk<br />
|
39 |
-
design my home makeover games best apk<br />
|
40 |
-
design my home makeover games new apk<br />
|
41 |
-
design my home makeover games update apk<br />
|
42 |
-
design my home makeover games puzzle apk<br />
|
43 |
-
design my home makeover games simulation apk<br />
|
44 |
-
design my home makeover games adventure apk<br />
|
45 |
-
design my home makeover games casual apk<br />
|
46 |
-
design my home makeover games creative apk<br />
|
47 |
-
design my home makeover games realistic apk<br />
|
48 |
-
design my home makeover games easy apk<br />
|
49 |
-
design my home makeover games challenging apk<br />
|
50 |
-
design my home makeover games relaxing apk<br />
|
51 |
-
design my home makeover games addictive apk<br />
|
52 |
-
design my home makeover games educational apk<br />
|
53 |
-
design my home makeover games family apk<br />
|
54 |
-
design my home makeover games kids apk<br />
|
55 |
-
design my home makeover games adults apk<br />
|
56 |
-
design my home makeover games girls apk<br />
|
57 |
-
design my home makeover games boys apk<br />
|
58 |
-
design my home makeover games cute apk<br />
|
59 |
-
design my home makeover games beautiful apk<br />
|
60 |
-
design my home makeover games awesome apk<br />
|
61 |
-
design my home makeover games amazing apk<br />
|
62 |
-
design my home makeover games cool apk</p>
|
63 |
-
<h3>Hundreds of levels and rooms to design</h3>
|
64 |
-
<p>The game has hundreds of levels and rooms that you can design according to your preferences. You can start with simple rooms like bedrooms and living rooms, and progress to more complex ones like kitchens and gardens. You can also unlock special rooms like studios, spas, cinemas, and more. Each room has a different theme and requirement that you need to follow. For example, some rooms may require you to use a certain style or color scheme, while others may give you more freedom to express yourself.</p>
|
65 |
-
<h3>Thousands of furniture and decor items to choose from</h3>
|
66 |
-
<p>The game has thousands of furniture and decor items that you can use to decorate your rooms. You can choose from different categories, such as sofas, beds, tables, chairs, lamps, rugs, paintings, plants, and more. You can also filter the items by style, color, price, or rating. You can also preview the items before you buy them with your coins. You can also rotate, resize, or move the items to fit your design.</p>
|
67 |
-
<h3>Daily rewards and bonuses to boost your progress</h3>
|
68 |
-
<p>The game also gives you daily rewards and bonuses that can help you progress faster in the game. You can get free coins, hints, shuffles, stars, or items every day by logging in, watching ads, or completing tasks. You can also get extra rewards by spinning the wheel of fortune or opening the mystery box. You can use these rewards to buy more items, solve more puzzles, or unlock more rooms.</p>
|
69 |
-
<h3>Offline mode and cloud save support</h3>
|
70 |
-
<p>The game also supports offline mode and cloud save features. This means that you can play the game without an internet connection and your progress will be saved automatically. You can also sync your progress across different devices by logging in with your Facebook account. This way, you can enjoy the game anytime and anywhere.</p>
|
71 |
-
<h2>What are the tips and tricks for playing Design My Home: Makeover Games APK?</h2>
|
72 |
-
<p>If you want to master Design My Home: Makeover Games APK, here are some tips and tricks that you can follow:</p>
|
73 |
-
<h3>Use hints and shuffle when you are stuck on a word puzzle</h3>
|
74 |
-
<p>Sometimes, you may find it hard to solve a word puzzle because you don't know the word or you can't see it on the screen. In that case, you can use hints or shuffle to help you out. Hints will reveal one letter of the word for you, while shuffle will rearrange the letters on the screen. You can get hints or shuffle by spending coins or watching ads.</p>
|
75 |
-
<h3>Follow the design themes and requirements for each room</h3>
|
76 |
-
<p>Each room in the game has a specific theme and requirement that you need to follow in order to complete it. For example, some rooms may require you to use a certain style or color scheme, while others may give you more freedom to express yourself. You can see the theme and requirement at the top of the screen when you enter a room. You can also see how many stars you need to earn in order to finish the room.</p>
|
77 |
-
<h3>Collect stars and coins to unlock new items and rooms</h3>
|
78 |
-
<p>Stars and coins are the main currencies in the game that you need to collect in order to unlock new items and rooms. You can earn stars by completing word puzzles and designing rooms according to their themes and requirements. You can earn coins by finding words, watching ads, spinning the wheel of fortune, opening the mystery box, or completing tasks. You can also buy coins with real money if you want.</p>
|
79 |
-
<h3>Watch ads and complete tasks to earn extra rewards</h3>
|
80 |
-
<p>Another way to earn more stars and coins in the game is to watch ads and complete tasks. You can watch ads to get free hints, shuffles, coins, or items. You can also complete tasks that are given to you by the game or by other players. These tasks may involve designing a specific room, finding a certain word, or rating other players' designs. You can get rewards such as coins, stars, items, or badges for completing these tasks.</p>
|
81 |
-
<h2>Conclusion</h2>
|
82 |
-
<p>Design My Home: Makeover Games APK is a fun and creative casual game that lets you design your dream home, solve word puzzles, and compete with other players. You can download and install the APK file from a trusted source and enjoy the game on your Android device. You can also explore the features of the game, such as hundreds of levels and rooms, thousands of furniture and decor items, daily rewards and bonuses, offline mode and cloud save support, and more. You can also follow some tips and tricks to master the game, such as using hints and shuffle, following the design themes and requirements, collecting stars and coins, watching ads and completing tasks, and more. If you love home design and word games, you should give Design My Home: Makeover Games APK a try.</p>
|
83 |
-
<h2>FAQs</h2>
|
84 |
-
<p>Here are some frequently asked questions about Design My Home: Makeover Games APK:</p>
|
85 |
-
<h3>Q: Is Design My Home: Makeover Games APK safe to download and install?</h3>
|
86 |
-
<p>A: Yes, Design My Home: Makeover Games APK is safe to download and install as long as you get it from a trusted source like AppBrain. You should also enable unknown sources on your device settings before installing the APK file.</p>
|
87 |
-
<h3>Q: How can I update Design My Home: Makeover Games APK?</h3>
|
88 |
-
<p>A: You can update Design My Home: Makeover Games APK by downloading the latest version of the APK file from AppBrain or by checking for updates within the game. You should always update the game to enjoy the latest features and bug fixes.</p>
|
89 |
-
<h3>Q: How can I contact the developer of Design My Home: Makeover Games APK?</h3>
|
90 |
-
<p>A: You can contact the developer of Design My Home: Makeover Games APK by sending an email to [email protected] or by visiting their website at https://holycow.studio/. You can also follow them on Facebook at https://www.facebook.com/holycowstudio/ or on Instagram at https://www.instagram.com/holycowstudio/.</p>
|
91 |
-
<h3>Q: How can I delete Design My Home: Makeover Games APK from my device?</h3>
|
92 |
-
<p>A: You can delete Design My Home: Makeover Games APK from your device by going to Settings > Apps > Design My Home > Uninstall. You can also delete the APK file from your device storage if you don't need it anymore.</p>
|
93 |
-
<h3>Q: Can I play Design My Home: Makeover Games APK on my PC or laptop?</h3>
|
94 |
-
<p>A: Yes, you can play Design My Home: Makeover Games APK on your PC or laptop by using an Android emulator like BlueStacks or NoxPlayer. These emulators will allow you to run Android apps on your PC or laptop. However, you may experience some performance issues or compatibility problems depending on your device specifications.</p> 197e85843d<br />
|
95 |
-
<br />
|
96 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bloons TD 6 on PC and Experience the Most Epic Tower Defense Game.md
DELETED
@@ -1,152 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bloons TD 6: A Fun and Challenging Tower Defense Game</h1>
|
3 |
-
<p>If you are a fan of tower defense games, you might have heard of Bloons TD 6, the latest installment in the Bloons Tower Defense series by Ninja Kiwi. Bloons TD 6 is a strategy game that challenges you to pop all the invading balloons (or bloons) before they reach the end of the track. You can use a variety of monkey towers, heroes, and powers to stop them. But be careful, as the bloons come in different shapes, sizes, and abilities, and some of them are very tough to pop.</p>
|
4 |
-
<h2>bloons td 6 free download pc</h2><br /><p><b><b>Download</b> ✯ <a href="https://urlin.us/2uT1rc">https://urlin.us/2uT1rc</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will give you an overview of what Bloons TD 6 is, how to download and play it on your PC, and how to master it with some useful tips and tricks. Let's get started!</p>
|
6 |
-
<h2>What is Bloons TD 6?</h2>
|
7 |
-
<p>Bloons TD 6 is a strategy based video game developed and published by Ninja Kiwi. It was released on June 13, 2018 for Android and iOS, and later brought to Steam for Windows and Macintosh. It is the sixth main game in the Bloons Tower Defense series, which started in 2007 as a web browser game.</p>
|
8 |
-
<p>Bloons TD 6 follows the same tower defense formula as its predecessors, but it also introduces some new features and improvements that make it more fun and challenging. Here are some of the main aspects of the game:</p>
|
9 |
-
<h3>The basic gameplay of Bloons TD 6</h3>
|
10 |
-
<p>The core gameplay of Bloons TD 6 is simple: you have to prevent the bloons from reaching the end of the track by placing monkey towers along the way. Each monkey tower has a different attack range, rate, damage, and cost. You can also upgrade your towers to make them more powerful or give them special abilities.</p>
|
11 |
-
<p>There are four classes of monkey towers: Primary, Military, Magic, and Support. Each class has its own strengths and weaknesses against different types of bloons. For example, Primary towers are good at popping regular bloons, but they struggle against lead or camo bloons. Military towers are good at popping camo bloons, but they are weak against purple bloons. Magic towers can pop almost any bloon, but they are expensive and have low pierce. Support towers do not attack directly, but they provide buffs or debuffs to other towers or bloons.</p>
|
12 |
-
<p>bloons td 6 pc download full version free<br />
|
13 |
-
how to get bloons td 6 for free on pc<br />
|
14 |
-
bloons td 6 free download windows 10<br />
|
15 |
-
bloons td 6 pc game free download<br />
|
16 |
-
bloons tower defense 6 free download pc<br />
|
17 |
-
bloons td 6 online free no download pc<br />
|
18 |
-
bloons td 6 steam free download pc<br />
|
19 |
-
bloons td 6 apk free download pc<br />
|
20 |
-
bloons td 6 cracked download pc free<br />
|
21 |
-
bloons td 6 mod free download pc<br />
|
22 |
-
bloons td 6 emulator free download pc<br />
|
23 |
-
bloons td 6 bluestacks free download pc<br />
|
24 |
-
bloons td 6 play online for free on pc<br />
|
25 |
-
bloons td 6 strategy game free download pc<br />
|
26 |
-
bloons td 6 latest version free download pc<br />
|
27 |
-
bloons td 6 update free download pc<br />
|
28 |
-
bloons td 6 multiplayer free download pc<br />
|
29 |
-
bloons td 6 co op free download pc<br />
|
30 |
-
bloons td 6 sandbox mode free download pc<br />
|
31 |
-
bloons td 6 cheats free download pc<br />
|
32 |
-
bloons td 6 hack free download pc<br />
|
33 |
-
bloons td 6 trainer free download pc<br />
|
34 |
-
bloons td 6 save file free download pc<br />
|
35 |
-
bloons td 6 custom maps free download pc<br />
|
36 |
-
bloons td 6 editor free download pc<br />
|
37 |
-
bloons td 6 best towers free download pc<br />
|
38 |
-
bloons td 6 tier list free download pc<br />
|
39 |
-
bloons td 6 heroes guide free download pc<br />
|
40 |
-
bloons td 6 tips and tricks free download pc<br />
|
41 |
-
bloons td 6 walkthrough free download pc<br />
|
42 |
-
bloons td 6 achievements free download pc<br />
|
43 |
-
bloons td 6 challenges free download pc<br />
|
44 |
-
bloons td 6 daily rewards free download pc<br />
|
45 |
-
bloons td 6 monkey knowledge free download pc<br />
|
46 |
-
bloons td 6 insta monkeys free download pc<br />
|
47 |
-
bloons td 6 skins free download pc<br />
|
48 |
-
bloons td 6 soundtrack free download pc<br />
|
49 |
-
bloons td 6 wallpaper free download pc<br />
|
50 |
-
bloons td 6 review free download pc<br />
|
51 |
-
bloons td 6 reddit free download pc<br />
|
52 |
-
bloons td 6 discord server free download pc<br />
|
53 |
-
bloons td 6 wiki free download pc<br />
|
54 |
-
bloons td 6 official website free download pc<br />
|
55 |
-
bloons td 6 system requirements free download pc<br />
|
56 |
-
how to install bloons td 6 for free on pc <br />
|
57 |
-
how to play bloons td 6 offline for free on pc <br />
|
58 |
-
how to transfer bloons td 6 data for free on pc <br />
|
59 |
-
how to unlock all monkeys in bloons td 6 for free on pc <br />
|
60 |
-
how to get unlimited money in bloons td 6 for free on pc</p>
|
61 |
-
<p>You can choose from several game modes and difficulty levels to suit your preference and skill level. The game modes include Standard (the normal mode), Impoppable (the hardest mode), CHIMPS (a mode where you cannot use Continues, Hearts Lost, Income, Monkey Knowledge, Powers or Selling), Sandbox (a mode where you can test your strategies with unlimited money and lives), Races (a mode where you compete with other players to finish a map as fast as possible), Co-Op (a mode where you can team up with up to three other players), Odysseys (a mode where you have to complete a series of maps with limited tower choices), Boss Events (a mode where you have to face powerful boss bloons), Contested Territory (a 2Ghz or better | | Memory: 4096 MB RAM | Memory: 8192 MB RAM | | Graphics: OpenGL 2.0 compatible, ATI, Nvidia or Intel HD | Graphics: OpenGL 2.0 compatible, ATI, Nvidia or Intel HD | | Storage: 2048 MB available space | Storage: 4096 MB available space | | Sound Card: Windows compatible sound card | Sound Card: Windows compatible sound card | <h3>The steps to download and install Bloons TD 6 on PC</h3>
|
62 |
-
<p>Depending on your preference, you can choose one of the following methods to download and install Bloons TD 6 on your PC:</p>
|
63 |
-
<h4>Using BlueStacks emulator</h4>
|
64 |
-
<p>BlueStacks is a popular Android emulator that allows you to run Android apps and games on your PC. You can use BlueStacks to play Bloons TD 6 on your PC with the same features and performance as on your mobile device. Here are the steps to do this:</p>
|
65 |
-
<ol>
|
66 |
-
<li>Download and install BlueStacks from its official website: <a href="">https://www.bluestacks.com/</a></li>
|
67 |
-
<li>Launch BlueStacks and sign in with your Google account.</li>
|
68 |
-
<li>Go to the Google Play Store app and search for Bloons TD 6.</li>
|
69 |
-
<li>Click on the Install button and wait for the download and installation to complete.</li>
|
70 |
-
<li>Go to the My Apps tab and click on the Bloons TD 6 icon to launch the game.</li>
|
71 |
-
<li>Enjoy playing Bloons TD 6 on your PC with BlueStacks.</li>
|
72 |
-
</ol>
|
73 |
-
<h4>Using Steam or Epic Games store</h4>
|
74 |
-
<p>Steam and Epic Games store are two of the most popular platforms for PC gaming. You can use either of them to buy and play Bloons TD 6 on your PC with enhanced graphics and controls. Here are the steps to do this:</p>
|
75 |
-
<ol>
|
76 |
-
<li>Download and install Steam from its official website: <a href="">https://store.steampowered.com/</a> or Epic Games store from its official website: <a href="">https://www.epicgames.com/store/en-US/</a></li>
|
77 |
-
<li>Create an account and sign in to Steam or Epic Games store.</li>
|
78 |
-
<li>Go to the Store page and search for Bloons TD 6.</li>
|
79 |
-
<li>Click on the Add to Cart button and proceed to checkout.</li>
|
80 |
-
<li>Pay for the game using your preferred payment method.</li>
|
81 |
-
<li>Go to the Library page and click on the Bloons TD 6 icon to launch the game.</li>
|
82 |
-
<li>Enjoy playing Bloons TD 6 on your PC with Steam or Epic Games store.</li>
|
83 |
-
</ol>
|
84 |
-
<h2>How to master Bloons TD 6?</h2>
|
85 |
-
<p>Bloons TD 6 is a fun and challenging game that requires strategy, skill, and creativity. If you want to master the game and beat all the levels, modes, and challenges, you need to learn some tips and tricks that can help you improve your gameplay. Here are some of them:</p>
|
86 |
-
<h3>The best strategies, tips, and tricks for Bloons TD 6</h3>
|
87 |
-
<p>Bloons TD 6 is a game that has many variables and possibilities. There is no one best strategy or solution for every situation. However, there are some general principles and guidelines that can help you make better decisions and optimize your performance. Here are some of them:</p>
|
88 |
-
<h4>Choosing the right monkeys, upgrades, and heroes</h4>
|
89 |
-
<p>The first step to mastering Bloons TD 6 is choosing the right monkeys, upgrades, and heroes for each map, mode, and difficulty. You need to consider several factors such as the track layout, the bloon types, the available money, the tower restrictions, and the synergy between different towers. You also need to experiment with different combinations and see what works best for you.</p>
|
90 |
-
<p>Some of the most popular and effective monkeys in Bloons TD 6 are:</p>
|
91 |
-
<ul>
|
92 |
-
<li>Ninja Monkey: A versatile tower that can pop camo bloons, throw shurikens at high speed, slow down bloons with caltrops or sabotage supply lines, and deal massive damage with grandmaster ninja or master bomber upgrades.</li>
|
93 |
-
<li>Dartling Gunner: A powerful tower that can shoot darts in any direction you point your cursor, pierce through multiple bloons with faster barrel spin or hydro rocket pods upgrades, deal extra damage to MOAB-class bloons with mad shredder or rocket storm upgrades, and unleash a devastating ray of doom or plasma accelerator upgrade.</li>
|
94 |
-
<li>Sun Avatar: A super monkey that can shoot sun beams that pop multiple layers of bloons, create mini sun avatars with sun temple or sun temple upgrades, and become the ultimate bloon destroyer with the true sun god or the legend of the night upgrade.</li>
|
95 |
-
<li>Alchemist: A support tower that can buff other towers with acid mixture dip or stronger stimulant upgrades, pop lead and fortified bloons with acidic mixture dip or unstable concoction upgrades, generate extra income with rubber to gold or lead to gold upgrades, and transform into a powerful spellcaster with total transformation or permanent brew upgrades.</li>
|
96 |
-
<li>Banana Farm: An income tower that can produce bananas that give you money when collected, increase your banana production with greater production or valuable bananas upgrades, create banana crates or banks that store more money, and generate more income with monkey-nomics or monkey wall street upgrades.</li>
|
97 |
-
</ul>
|
98 |
-
<p>Some of the most popular and effective heroes in Bloons TD 6 are:</p>
|
99 |
-
<ul>
|
100 |
-
<li>Quincy: A well-rounded hero that can shoot multiple arrows at once, deal extra damage to MOAB-class bloons with explosive arrows or storm of arrows abilities, and boost the attack speed and pierce of nearby primary monkeys with rapid shot or arrow barrage abilities.</li>
|
101 |
-
<li>Gwendolin: A fiery hero that can set bloons on fire, deal extra damage to ceramic and fortified bloons with firestorm or cocktail of fire abilities, and boost the damage and range of nearby fire-based monkeys with heat it up or firestorm abilities.</li>
|
102 |
-
<li>Benjamin: A hacker hero that can generate extra income with skimming or biohack abilities, hack bloons to make them weaker or give more money with trojan or syphon funding abilities, and disable the abilities of nearby monkeys with bloon trojan or biohack abilities.</li>
|
103 |
-
<li>Adora: A divine hero that can shoot powerful bolts of light, deal massive damage to all bloons on screen with ball of light or blood sacrifice abilities, and sacrifice nearby towers to gain power and level up faster with blood sacrifice or true sun god abilities.</li>
|
104 |
-
</ul>
|
105 |
-
<h4>Placing your towers wisely and using abilities effectively</h4>
|
106 |
-
<p>The second step to mastering Bloons TD 6 is placing your towers wisely and using their abilities effectively. You need to consider several factors such as the track layout, the line of sight, the range, the cost, the synergy, and the timing. You also need to experiment with different placements and see what works best for you.</p>
|
107 |
-
<p>Some of the general tips for placing your towers are:</p>
|
108 |
-
<ul>
|
109 |
-
<li>Place your towers near the start or the end of the track to maximize their attack time and damage output.</li>
|
110 |
-
<li>Place your towers near curves or intersections to maximize their attack range and pierce.</li>
|
111 |
-
<li>Place your towers on elevated platforms or water spots to avoid line-of-sight issues and gain access to exclusive towers.</li>
|
112 |
-
<li>Place your towers in clusters or groups to benefit from buffs or debuffs from other towers.</li>
|
113 |
-
<li>Place your towers strategically to cover different types of bloons and create choke points.</li>
|
114 |
-
</ul>
|
115 |
-
<p>Some of the general tips for using your abilities are:</p>
|
116 |
-
<ul>
|
117 |
-
<li>Use your abilities when you are facing a large wave of bloons or a tough boss bloon.</li>
|
118 |
-
<li>Use your abilities in combination with other abilities or powers to create a powerful effect.</li>
|
119 |
-
<li>Use your abilities sparingly and wisely, as they have a cooldown time and a limited number of uses per game.</li>
|
120 |
-
<li>Use your abilities according to the situation and the type of ability. For example, use offensive abilities to deal damage, defensive abilities to protect yourself, support abilities to buff yourself or debuff enemies, and income abilities to generate money.</li>
|
121 |
-
</ul>
|
122 |
-
<h4>Gaining experience and unlocking new content</h4>
|
123 |
-
<p>The third step to mastering Bloons TD 6 is gaining experience and unlocking new content. You need to play the game regularly and complete different levels, modes, and challenges to earn experience points (XP) and monkey money (MM). You can use XP to unlock new towers, upgrades, heroes, maps, modes, and achievements. You can use MM to buy new powers, insta-monkeys, skins, trophies, and more. You can also earn trophies by completing achievements or participating in events. You can use trophies to buy cosmetic items from the trophy store.</p>
|
124 |
-
<p>Some of the ways to gain more XP and MM are:</p>
|
125 |
-
<ul>
|
126 |
-
<li>Play on higher difficulty levels and harder game modes to earn more XP and MM per game.</li>
|
127 |
-
<li>Play on different maps and use different towers and heroes to earn more XP for each tower and hero class.</li>
|
128 |
-
<li>Play on co-op mode and team up with other players to earn more XP and MM per game.</li>
|
129 |
-
<li>Play on races mode and compete with other players to earn more XP and MM per game.</li>
|
130 |
-
<li>Play on odysseys mode and complete a series of maps with limited tower choices to earn more XP and MM per game.</li>
|
131 |
-
<li>Play on boss events mode and defeat powerful boss bloons to earn more XP and MM per game.</li>
|
132 |
-
<li>Play on daily challenges mode and complete maps with specific rules and restrictions to earn more XP and MM per game.</li>
|
133 |
-
<li>Use monkey knowledge points (MKP) to unlock passive bonuses and perks for your towers, heroes, powers, and income.</li>
|
134 |
-
</ul>
|
135 |
-
<h2>Conclusion</h2>
|
136 |
-
<p>Bloons TD 6 is a fun and challenging tower defense game that offers hours of entertainment and replay value. You can enjoy the game on your mobile device or your PC, and you can customize your game experience with various features and options. You can also improve your skills and strategies by learning from the tips and tricks we shared in this article. We hope you found this article helpful and informative, and we wish you good luck and have fun popping bloons!</p>
|
137 |
-
<h3>FAQs</h3>
|
138 |
-
<p>Here are some of the frequently asked questions about Bloons TD 6:</p>
|
139 |
-
<ul>
|
140 |
-
<li><b>Q: How much does Bloons TD 6 cost?</b></li>
|
141 |
-
<li>A: Bloons TD 6 costs $4.99 on Android and iOS, $9.99 on Steam, and $14.99 on Epic Games store. However, the game often goes on sale or offers discounts, so you can check the prices regularly to find the best deal.</li>
|
142 |
-
<li><b>Q: Is Bloons TD 6 online or offline?</b></li>
|
143 |
-
<li>A: Bloons TD 6 can be played both online and offline. You can play online to access all the features and content of the game, such as co-op mode, races mode, boss events mode, content browser, trophy store, etc. You can also play offline to enjoy the game without an internet connection, but you will not be able to access some of the features and content of the game.</li>
|
144 |
-
<li><b>Q: Is Bloons TD 6 cross-platform?</b></li>
|
145 |
-
<li>A: Bloons TD 6 is cross-platform between Android, iOS, Windows, and Macintosh. You can play with other players or transfer your progress across different devices using the same Ninja Kiwi account. However, Bloons TD 6 is not cross-platform with Epic Games store, so you cannot play with or transfer your progress to or from Epic Games store users.</li>
|
146 |
-
<li><b>Q: Is Bloons TD 6 free to play?</b></li>
|
147 |
-
<li>A: Bloons TD 6 is not free to play, as you have to buy the game to play it. However, Bloons TD 6 does not have any in-app purchases or microtransactions that require real money. You can earn all the in-game currency and items by playing the game normally.</li>
|
148 |
-
<li><b>Q: Is Bloons TD 6 multiplayer?</b></li>
|
149 |
-
<li>A: Bloons TD 6 is multiplayer, as you can play with up to three other players in co-op mode. You can also compete with other players in races mode or contested territory mode. You can also chat with other players using the in-game chat feature or join a clan to socialize with other players.</li>
|
150 |
-
</ul></p> 197e85843d<br />
|
151 |
-
<br />
|
152 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Baixe o Livro de Regras do RPG Ordem Paranormal criado por Cellbit e seus amigos.md
DELETED
@@ -1,124 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Livro de Regras RPG Cellbit PDF Download: Tudo o que você precisa saber</h1>
|
3 |
-
<p>Você é fã do RPG Ordem Paranormal, criado pelo youtuber Cellbit? Quer saber como baixar o livro de regras oficial do jogo em PDF? Então você veio ao lugar certo. Neste artigo, vamos te contar tudo o que você precisa saber sobre o livro de regras RPG Cellbit PDF download, incluindo:</p>
|
4 |
-
<h2>livro de regras rpg cellbit pdf download</h2><br /><p><b><b>DOWNLOAD</b> ->>> <a href="https://jinyurl.com/2uNStv">https://jinyurl.com/2uNStv</a></b></p><br /><br />
|
5 |
-
<ul>
|
6 |
-
<li>O que é o RPG Ordem Paranormal e como jogar;</li>
|
7 |
-
<li>Como baixar o livro de regras oficial do jogo em PDF;</li>
|
8 |
-
<li>Quais são as principais novidades e características do livro;</li>
|
9 |
-
<li>Onde assistir as campanhas de Cellbit e seus convidados;</li>
|
10 |
-
<li>Como adaptar o cenário para outros sistemas de RPG.</li>
|
11 |
-
</ul>
|
12 |
-
<p>Ficou curioso? Então continue lendo e descubra tudo sobre o livro de regras RPG Cellbit PDF download.</p>
|
13 |
-
<h2>O que é o RPG Ordem Paranormal e como jogar</h2>
|
14 |
-
<p>O RPG Ordem Paranormal é um jogo de mesa criado por Rafael Lange, mais conhecido como Cellbit, um dos maiores streamers e youtubers do Brasil. O jogo se passa em um universo onde a realidade é ameaçada por criaturas sobrenaturais do Outro Lado, e os jogadores interpretam agentes da Ordem da Realidade, uma organização secreta que luta para proteger o mundo dos perigos paranormais.</p>
|
15 |
-
<p>O jogo é baseado no sistema d20, o mesmo usado em Dungeons & Dragons e Pathfinder, mas com algumas adaptações e inovações feitas por Cellbit e sua equipe. O jogo usa dados de 20 lados (d20) para resolver as ações dos personagens, e cada personagem tem atributos, habilidades, equipamentos e rituais que definem suas capacidades e características.</p>
|
16 |
-
<p>livro de regras rpg cellbit pdf grátis<br />
|
17 |
-
como baixar o livro de regras rpg cellbit<br />
|
18 |
-
ordem paranormal rpg livro de regras pdf<br />
|
19 |
-
livro de regras rpg cellbit online<br />
|
20 |
-
livro de regras rpg cellbit jambo editora<br />
|
21 |
-
resenha do livro de regras rpg cellbit<br />
|
22 |
-
livro de regras rpg cellbit capa dura<br />
|
23 |
-
livro de regras rpg cellbit versão final<br />
|
24 |
-
livro de regras rpg cellbit ficha de personagem<br />
|
25 |
-
livro de regras rpg cellbit preço<br />
|
26 |
-
livro de regras rpg cellbit comprar<br />
|
27 |
-
livro de regras rpg cellbit pré-venda<br />
|
28 |
-
livro de regras rpg cellbit lançamento<br />
|
29 |
-
livro de regras rpg cellbit sinopse<br />
|
30 |
-
livro de regras rpg cellbit pdf completo<br />
|
31 |
-
livro de regras rpg cellbit epub<br />
|
32 |
-
livro de regras rpg cellbit mobi<br />
|
33 |
-
livro de regras rpg cellbit amazon<br />
|
34 |
-
livro de regras rpg cellbit mercado livre<br />
|
35 |
-
livro de regras rpg cellbit submarino<br />
|
36 |
-
livro de regras rpg cellbit americanas<br />
|
37 |
-
livro de regras rpg cellbit saraiva<br />
|
38 |
-
livro de regras rpg cellbit cultura<br />
|
39 |
-
livro de regras rpg cellbit travessa<br />
|
40 |
-
livro de regras rpg cellbit martins fontes<br />
|
41 |
-
livro de regras rpg cellbit leitura<br />
|
42 |
-
livro de regras rpg cellbit pdf drive<br />
|
43 |
-
livro de regras rpg cellbit pdf mega<br />
|
44 |
-
livro de regras rpg cellbit pdf mediafire<br />
|
45 |
-
livro de regras rpg cellbit pdf 4shared<br />
|
46 |
-
livro de regras rpg cellbit pdf dropbox<br />
|
47 |
-
livro de regras rpg cellbit pdf archive.org<br />
|
48 |
-
livro de regras rpg cellbit pdf studocu<br />
|
49 |
-
livro de regras rpg cellbit pdf scribd<br />
|
50 |
-
livro de regras rpg cellbit pdf academia.edu<br />
|
51 |
-
resumo do livro de regras rpg cellbit pdf<br />
|
52 |
-
análise do livro de regras rpg cellbit pdf<br />
|
53 |
-
crítica do livro de regras rpg cellbit pdf<br />
|
54 |
-
opinião sobre o livro de regras rpg cellbit pdf<br />
|
55 |
-
comentários sobre o livro de regras rpg cellbit pdf<br />
|
56 |
-
dicas para o livro de regras rpg cellbit pdf<br />
|
57 |
-
guia para o livro de regras rpg cellbit pdf<br />
|
58 |
-
tutorial para o livro de regras rpg cellbit pdf<br />
|
59 |
-
vídeo sobre o livro de regras rpg cellbit pdf<br />
|
60 |
-
podcast sobre o livro de regras rpg cellbit pdf<br />
|
61 |
-
blog sobre o livro de regras rpg cellbit pdf<br />
|
62 |
-
site sobre o livro de regras rpg cellbit pdf<br />
|
63 |
-
fórum sobre o livro de regras rpg cellbit pdf<br />
|
64 |
-
grupo sobre o livro de regras rpg cellbit pdf<br />
|
65 |
-
comunidade sobre o livro de regras rpg cellbit pdf</p>
|
66 |
-
<p>O jogo também tem um forte foco na narrativa e na interpretação dos personagens, incentivando os jogadores a criarem histórias envolventes e imersivas. O jogo é dividido em capítulos, que são sessões de jogo com um objetivo definido. Cada capítulo pode ser jogado em uma ou mais sessões, dependendo do ritmo e do estilo dos jogadores e do mestre.</p>
|
67 |
-
<h3>Como baixar o livro de regras oficial do jogo em PDF</h3>
|
68 |
-
<p>O livro de regras oficial do RPG Ordem Paranormal foi produzido por Cellbit em parceria com a editora Jambô, e está disponível para compra no site da editora. O livro tem mais de 300 páginas e contém todas as regras para criar personagens, conduzir as aventuras, usar os rituais e enfrentar os inimigos do Outro Lado.</p>
|
69 |
-
<p>Além disso, o livro também traz informações sobre o cenário do jogo, incluindo a história da Ordem da Realidade, as principais organizações e facções do mundo, os tipos de criaturas sobrenaturais que existem no Outro Lado e as regiões onde as aventuras podem se passar.</p>
|
70 |
-
<p>Para baixar o livro de regras RPG Cellbit PDF download, você precisa comprar o livro físico no site da Jambô. Ao fazer isso, você recebe um link para baixar o arquivo em PDF do livro. Você pode ler o livro no seu computador, tablet ou celular, ou imprimir uma cópia para usar nas suas sessões de jogo.</p>
|
71 |
-
<h4>Quais são as principais novidades e características do livro</h4>
|
72 |
-
<p>O livro de regras RPG Cellbit PDF download traz várias novidades e características que tornam o jogo único e divertido. Algumas delas são:</p>
|
73 |
-
<ul>
|
74 |
-
<li>O sistema de flashbacks: uma mecânica que permite aos jogadores voltarem no tempo para mostrar cenas que aconteceram antes ou durante a aventura, revelando aspectos importantes dos personagens e da trama;</li>
|
75 |
-
<li>O sistema de rituais: uma mecânica que permite aos personagens usarem o poder do Outro Lado para realizar feitos extraordinários, como invocar entidades, manipular a realidade, curar ferimentos e muito mais. Os rituais são baseados em símbolos, ingredientes e palavras de poder, e exigem um teste de habilidade para serem bem-sucedidos;</li>
|
76 |
-
<li>O sistema de sanidade: uma mecânica que representa o impacto psicológico que as situações paranormais causam nos personagens. Os personagens podem perder pontos de sanidade ao presenciarem cenas de horror, violência ou mistério, e podem sofrer consequências como medos, fobias, alucinações e até mesmo insanidade;</li>
|
77 |
-
<li>O sistema de equipamentos: uma mecânica que permite aos personagens usarem diversos tipos de itens para auxiliar nas suas missões, como armas, veículos, dispositivos tecnológicos, itens mágicos e muito mais. Os equipamentos têm características como custo, peso, dano, alcance e efeitos especiais;</li>
|
78 |
-
<li>O sistema de criação de personagens: uma mecânica que permite aos jogadores criarem seus próprios agentes da Ordem da Realidade, escolhendo entre seis arquétipos (Atirador, Combatente, Detetive, Hacker, Médico e Ocultista), seis origens (Americano, Brasileiro, Chinês, Europeu, Indiano e Russo), seis especializações (Armas Brancas, Armas de Fogo, Conhecimento Paranormal, Hacking, Medicina e Rituais) e seis traços (Ambicioso, Corajoso, Curioso, Leal, Protetor e Sarcástico). Os personagens também têm um nome, uma aparência, um histórico e uma motivação pessoal.</li>
|
79 |
-
</ul>
|
80 |
-
<h2>Onde assistir as campanhas de Cellbit e seus convidados</h2>
|
81 |
-
<p>Se você quer se inspirar para jogar o RPG Ordem Paranormal ou simplesmente se divertir assistindo as aventuras de Cellbit e seus convidados, você pode acompanhar as campanhas que ele transmite em seu canal do YouTube. Até o momento, ele já realizou três campanhas completas:</p>
|
82 |
-
<ul>
|
83 |
-
<li>A primeira campanha foi chamada de O Segredo na Floresta e teve 10 capítulos. Ela contou com a participação dos youtubers T3ddy, Saiko, MeiaUm, Pk Regular Game e Gab Araújo. A história girou em torno de um grupo de amigos que foi acampar em uma floresta misteriosa e acabou se envolvendo em uma trama sobrenatural cheia de suspense e terror;</li>
|
84 |
-
<li>A segunda campanha foi chamada de Vampiro a Máscara e teve 12 capítulos. Ela contou com a participação dos youtubers Calango, Felps, Luba, Maethe e Rodrigo Coelho. A história se baseou no cenário de Vampiro: A Máscara, um clássico RPG de horror pessoal, e acompanhou a jornada de um grupo de vampiros recém-criados que tentaram sobreviver em uma cidade dominada por intrigas, conspirações e conflitos entre as seitas vampíricas;</li>
|
85 |
-
<li>A terceira campanha foi chamada de Alice e teve 13 capítulos. Ela contou com a participação dos youtubers Alan, Edu, Guaxinim, Jean L e Mariana. A história foi inspirada no livro Alice no País das Maravilhas, de Lewis Carroll, e seguiu as aventuras de uma garota que foi transportada para um mundo fantástico e bizarro, onde teve que enfrentar diversos desafios e perigos para encontrar o seu caminho de volta.</li>
|
86 |
-
</ul>
|
87 |
-
<p>Você pode assistir todas essas campanhas no canal do Cellbit no YouTube, na playlist chamada RPG Ordem Paranormal. Você também pode conferir as artes, os memes, os comentários e as teorias dos fãs nas redes sociais, usando as hashtags #OrdemParanormal, #OSegredoNaFloresta, #VampiroAMascara e #Alice.</p>
|
88 |
-
<h3>Como adaptar o cenário para outros sistemas de RPG</h3>
|
89 |
-
<p>Se você gostou do cenário do RPG Ordem Paranormal, mas prefere usar outro sistema de RPG para jogar, não se preocupe. Você pode adaptar o cenário para o sistema que você quiser, usando algumas dicas simples:</p>
|
90 |
-
<ul>
|
91 |
-
<li>Escolha um sistema que seja compatível com o gênero e o tom do cenário. Por exemplo, se você quer jogar uma aventura de terror e suspense, você pode usar sistemas como Call of Cthulhu, World of Darkness ou GURPS Horror. Se você quer jogar uma aventura de ação e aventura, você pode usar sistemas como Savage Worlds, Fate ou GURPS Action;</li>
|
92 |
-
<li>Use as regras do sistema escolhido para criar os personagens, os inimigos, os equipamentos e os rituais. Você pode usar as informações do livro de regras RPG Cellbit PDF download como referência, mas não precisa seguir tudo à risca. Você pode modificar ou simplificar as regras conforme a sua preferência e a necessidade da sua história;</li>
|
93 |
-
<li>Use a sua criatividade para criar as suas próprias aventuras ou adaptar as aventuras existentes. Você pode usar as campanhas de Cellbit como inspiração, mas não precisa copiar tudo exatamente. Você pode mudar os personagens, os locais, os eventos e os desfechos conforme o seu gosto e o dos seus jogadores.</li>
|
94 |
-
</ul>
|
95 |
-
<p>O importante é se divertir e aproveitar o cenário do RPG Ordem Paranormal da melhor forma possível.</p>
|
96 |
-
<h2>Conclusão</h2>
|
97 |
-
<p>Neste artigo, você aprendeu tudo o que precisa saber sobre o livro de regras RPG Cellbit PDF download. Você viu o que é o RPG Ordem Paranormal e como jogar, como baixar o livro de regras oficial do jogo em PDF, quais são as principais novidades e características do livro, onde assistir as campanhas de Cellbit e seus convidados e como adaptar o cenário para outros sistemas de RPG.</p>
|
98 |
-
<p>Agora você está pronto para se tornar um agente da Ordem da Realidade e viver aventuras incríveis no universo do RPG Ordem Paranormal. Esperamos que você tenha gostado deste artigo e que ele tenha sido útil para você. Se você tiver alguma dúvida ou sugestão, deixe um comentário abaixo. E se você quiser ler mais artigos sobre RPGs e outros assuntos interessantes, continue acompanhando o nosso blog.</p>
|
99 |
-
<h2>FAQs</h2>
|
100 |
-
<h3>O que é o Outro Lado?</h3>
|
101 |
-
<p>O Outro Lado é a dimensão paralela onde habitam as criaturas sobrenaturais que ameaçam a realidade. O Outro Lado é um lugar caótico, escuro e perigoso, onde as leis da física e da lógica não se aplicam. O Outro Lado pode ser acessado por meio de portais, rituais ou eventos anômalos, mas é um lugar hostil e mortal para os humanos.</p>
|
102 |
-
<h3>Quem é Cellbit?</h3>
|
103 |
-
<p>Cellbit é o criador do RPG Ordem Paranormal e um dos maiores streamers e youtubers do Brasil. Ele começou a fazer vídeos de jogos em 2012, e desde então ganhou milhões de fãs e seguidores. Ele é conhecido por seu humor, sua criatividade e sua paixão por RPGs. Ele também é o fundador da Cellbit Produções, uma empresa de entretenimento que produz conteúdo para diversas plataformas.</p>
|
104 |
-
<h3>O que é a Jambô?</h3>
|
105 |
-
<p>A Jambô é a editora parceira de Cellbit na produção do livro de regras RPG Cellbit PDF download. A Jambô é uma das maiores editoras de RPG do Brasil, responsável por publicar títulos como Tormenta, 3D&T, Dragon Age, Mutantes & Malfeitores, Reinos de Ferro e muitos outros. A Jambô também publica livros de ficção, quadrinhos e revistas especializadas em RPGs e cultura nerd.</p>
|
106 |
-
<h3>Como jogar RPG online?</h3>
|
107 |
-
<p>Se você quer jogar RPG online com seus amigos, existem várias ferramentas que podem te ajudar. Algumas delas são:</p>
|
108 |
-
<ul>
|
109 |
-
<li>Rolz: um site que permite criar salas de chat com dados virtuais, ideal para jogar RPGs simples e rápidos;</li>
|
110 |
-
<li>Discord: um aplicativo de comunicação por voz e texto que permite criar servidores com canais dedicados para jogar RPGs, além de integrar bots e plugins que facilitam o jogo;</li>
|
111 |
-
<li>Roll20: uma plataforma online que permite criar mesas de jogo virtuais com mapas, fichas, dados, músicas e muito mais, simulando uma experiência completa de RPG;</li>
|
112 |
-
<li>RPG2ic: um aplicativo brasileiro que permite jogar RPGs pelo celular, com chat, dados, fichas e recursos para criar e compartilhar aventuras.</li>
|
113 |
-
</ul>
|
114 |
-
<h3>Como aprender mais sobre RPGs?</h3>
|
115 |
-
<p>Se você quer aprender mais sobre RPGs, existem várias fontes de informação e entretenimento que podem te ajudar. Algumas delas são:</p>
|
116 |
-
<ul>
|
117 |
-
<li>Livros: existem vários livros que ensinam as regras, os conceitos e as dicas para jogar RPGs, como o Livro dos Jogadores, o Livro do Mestre e o Livro dos Monstros de Dungeons & Dragons, o Manual Básico de 3D&T Alpha, o Guia do Jogador de Tormenta20 e o Guia da Trilogia de Vampiro: A Máscara;</li>
|
118 |
-
<li>Vídeos: existem vários canais no YouTube que falam sobre RPGs, como o Nerdcast RPG, o Formação Fireball, o Covil do RPG, o Perdidos no Play e o próprio canal do Cellbit;</li>
|
119 |
-
<li>Podcasts: existem vários podcasts que falam sobre RPGs, como o Podcast dos Reinos, o Rolando 20, o Papo Furado na Taverna, o Taverna do Beholder Cego e o Podcast da Ordem Paranormal;</li>
|
120 |
-
<li>Blogs: existem vários blogs que falam sobre RPGs, como o Joga o Dado, o Mundos Colidem, o Pensotopia, o RPGista e o Paragons;</li>
|
121 |
-
<li>Revistas: existem várias revistas que falam sobre RPGs, como a Dragão Brasil, a Dungeon Magazine, a Dragon Magazine e a Roleplaying Tips.</li>
|
122 |
-
</ul></p> 401be4b1e0<br />
|
123 |
-
<br />
|
124 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/app/loading.css
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
::-webkit-scrollbar {
|
2 |
-
width: 10px;
|
3 |
-
height: 10px;
|
4 |
-
display: none;
|
5 |
-
}
|
6 |
-
|
7 |
-
::-webkit-scrollbar-button:start:decrement,
|
8 |
-
::-webkit-scrollbar-button:end:increment {
|
9 |
-
height: 30px;
|
10 |
-
background-color: transparent;
|
11 |
-
}
|
12 |
-
|
13 |
-
::-webkit-scrollbar-track-piece {
|
14 |
-
background-color: #3b3b3b;
|
15 |
-
-webkit-border-radius: 16px;
|
16 |
-
}
|
17 |
-
|
18 |
-
::-webkit-scrollbar-thumb:vertical {
|
19 |
-
height: 50px;
|
20 |
-
background-color: #666;
|
21 |
-
border: 1px solid #eee;
|
22 |
-
-webkit-border-radius: 6px;
|
23 |
-
}
|
24 |
-
|
25 |
-
/* loading start */
|
26 |
-
.loading-spinner {
|
27 |
-
display: flex;
|
28 |
-
justify-content: center;
|
29 |
-
align-items: center;
|
30 |
-
height: 100vh;
|
31 |
-
opacity: 1;
|
32 |
-
transition: opacity .8s ease-out;
|
33 |
-
}
|
34 |
-
|
35 |
-
.loading-spinner.hidden {
|
36 |
-
opacity: 0;
|
37 |
-
}
|
38 |
-
|
39 |
-
.loading-spinner>div {
|
40 |
-
width: 30px;
|
41 |
-
height: 30px;
|
42 |
-
background: linear-gradient(90deg, #2870EA 10.79%, #1B4AEF 87.08%);
|
43 |
-
|
44 |
-
border-radius: 100%;
|
45 |
-
display: inline-block;
|
46 |
-
animation: sk-bouncedelay 1.4s infinite ease-in-out both;
|
47 |
-
}
|
48 |
-
|
49 |
-
.loading-spinner .bounce1 {
|
50 |
-
animation-delay: -0.32s;
|
51 |
-
}
|
52 |
-
|
53 |
-
.loading-spinner .bounce2 {
|
54 |
-
animation-delay: -0.16s;
|
55 |
-
}
|
56 |
-
|
57 |
-
@keyframes sk-bouncedelay {
|
58 |
-
|
59 |
-
0%,
|
60 |
-
80%,
|
61 |
-
100% {
|
62 |
-
transform: scale(0);
|
63 |
-
}
|
64 |
-
|
65 |
-
40% {
|
66 |
-
transform: scale(1.0);
|
67 |
-
}
|
68 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AB-TW/team-ai/agents/tools/smart_domain/common.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
tech_prefix = """You are a software developer. {task}
|
2 |
-
|
3 |
-
===TechStack
|
4 |
-
{tech_stack}
|
5 |
-
===END OF TechStack
|
6 |
-
|
7 |
-
===Architecture
|
8 |
-
{architecture}
|
9 |
-
===END OF Architecture
|
10 |
-
|
11 |
-
===TestStrategy
|
12 |
-
{test_strategy}
|
13 |
-
===END OF TestStrategy
|
14 |
-
|
15 |
-
"""
|
16 |
-
|
17 |
-
def getPrefix(task, tech_stack, architecture, test_strategy):
|
18 |
-
return tech_prefix.format(task=task, tech_stack=tech_stack, architecture=architecture, test_strategy=test_strategy)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/audio/stft.py
DELETED
@@ -1,180 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn.functional as F
|
3 |
-
import numpy as np
|
4 |
-
from scipy.signal import get_window
|
5 |
-
from librosa.util import pad_center, tiny
|
6 |
-
from librosa.filters import mel as librosa_mel_fn
|
7 |
-
|
8 |
-
from audioldm.audio.audio_processing import (
|
9 |
-
dynamic_range_compression,
|
10 |
-
dynamic_range_decompression,
|
11 |
-
window_sumsquare,
|
12 |
-
)
|
13 |
-
|
14 |
-
|
15 |
-
class STFT(torch.nn.Module):
|
16 |
-
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
|
17 |
-
|
18 |
-
def __init__(self, filter_length, hop_length, win_length, window="hann"):
|
19 |
-
super(STFT, self).__init__()
|
20 |
-
self.filter_length = filter_length
|
21 |
-
self.hop_length = hop_length
|
22 |
-
self.win_length = win_length
|
23 |
-
self.window = window
|
24 |
-
self.forward_transform = None
|
25 |
-
scale = self.filter_length / self.hop_length
|
26 |
-
fourier_basis = np.fft.fft(np.eye(self.filter_length))
|
27 |
-
|
28 |
-
cutoff = int((self.filter_length / 2 + 1))
|
29 |
-
fourier_basis = np.vstack(
|
30 |
-
[np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])]
|
31 |
-
)
|
32 |
-
|
33 |
-
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
|
34 |
-
inverse_basis = torch.FloatTensor(
|
35 |
-
np.linalg.pinv(scale * fourier_basis).T[:, None, :]
|
36 |
-
)
|
37 |
-
|
38 |
-
if window is not None:
|
39 |
-
assert filter_length >= win_length
|
40 |
-
# get window and zero center pad it to filter_length
|
41 |
-
fft_window = get_window(window, win_length, fftbins=True)
|
42 |
-
fft_window = pad_center(fft_window, filter_length)
|
43 |
-
fft_window = torch.from_numpy(fft_window).float()
|
44 |
-
|
45 |
-
# window the bases
|
46 |
-
forward_basis *= fft_window
|
47 |
-
inverse_basis *= fft_window
|
48 |
-
|
49 |
-
self.register_buffer("forward_basis", forward_basis.float())
|
50 |
-
self.register_buffer("inverse_basis", inverse_basis.float())
|
51 |
-
|
52 |
-
def transform(self, input_data):
|
53 |
-
num_batches = input_data.size(0)
|
54 |
-
num_samples = input_data.size(1)
|
55 |
-
|
56 |
-
self.num_samples = num_samples
|
57 |
-
|
58 |
-
# similar to librosa, reflect-pad the input
|
59 |
-
input_data = input_data.view(num_batches, 1, num_samples)
|
60 |
-
input_data = F.pad(
|
61 |
-
input_data.unsqueeze(1),
|
62 |
-
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
|
63 |
-
mode="reflect",
|
64 |
-
)
|
65 |
-
input_data = input_data.squeeze(1)
|
66 |
-
|
67 |
-
forward_transform = F.conv1d(
|
68 |
-
input_data,
|
69 |
-
torch.autograd.Variable(self.forward_basis, requires_grad=False),
|
70 |
-
stride=self.hop_length,
|
71 |
-
padding=0,
|
72 |
-
).cpu()
|
73 |
-
|
74 |
-
cutoff = int((self.filter_length / 2) + 1)
|
75 |
-
real_part = forward_transform[:, :cutoff, :]
|
76 |
-
imag_part = forward_transform[:, cutoff:, :]
|
77 |
-
|
78 |
-
magnitude = torch.sqrt(real_part**2 + imag_part**2)
|
79 |
-
phase = torch.autograd.Variable(torch.atan2(imag_part.data, real_part.data))
|
80 |
-
|
81 |
-
return magnitude, phase
|
82 |
-
|
83 |
-
def inverse(self, magnitude, phase):
|
84 |
-
recombine_magnitude_phase = torch.cat(
|
85 |
-
[magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1
|
86 |
-
)
|
87 |
-
|
88 |
-
inverse_transform = F.conv_transpose1d(
|
89 |
-
recombine_magnitude_phase,
|
90 |
-
torch.autograd.Variable(self.inverse_basis, requires_grad=False),
|
91 |
-
stride=self.hop_length,
|
92 |
-
padding=0,
|
93 |
-
)
|
94 |
-
|
95 |
-
if self.window is not None:
|
96 |
-
window_sum = window_sumsquare(
|
97 |
-
self.window,
|
98 |
-
magnitude.size(-1),
|
99 |
-
hop_length=self.hop_length,
|
100 |
-
win_length=self.win_length,
|
101 |
-
n_fft=self.filter_length,
|
102 |
-
dtype=np.float32,
|
103 |
-
)
|
104 |
-
# remove modulation effects
|
105 |
-
approx_nonzero_indices = torch.from_numpy(
|
106 |
-
np.where(window_sum > tiny(window_sum))[0]
|
107 |
-
)
|
108 |
-
window_sum = torch.autograd.Variable(
|
109 |
-
torch.from_numpy(window_sum), requires_grad=False
|
110 |
-
)
|
111 |
-
window_sum = window_sum
|
112 |
-
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[
|
113 |
-
approx_nonzero_indices
|
114 |
-
]
|
115 |
-
|
116 |
-
# scale by hop ratio
|
117 |
-
inverse_transform *= float(self.filter_length) / self.hop_length
|
118 |
-
|
119 |
-
inverse_transform = inverse_transform[:, :, int(self.filter_length / 2) :]
|
120 |
-
inverse_transform = inverse_transform[:, :, : -int(self.filter_length / 2) :]
|
121 |
-
|
122 |
-
return inverse_transform
|
123 |
-
|
124 |
-
def forward(self, input_data):
|
125 |
-
self.magnitude, self.phase = self.transform(input_data)
|
126 |
-
reconstruction = self.inverse(self.magnitude, self.phase)
|
127 |
-
return reconstruction
|
128 |
-
|
129 |
-
|
130 |
-
class TacotronSTFT(torch.nn.Module):
|
131 |
-
def __init__(
|
132 |
-
self,
|
133 |
-
filter_length,
|
134 |
-
hop_length,
|
135 |
-
win_length,
|
136 |
-
n_mel_channels,
|
137 |
-
sampling_rate,
|
138 |
-
mel_fmin,
|
139 |
-
mel_fmax,
|
140 |
-
):
|
141 |
-
super(TacotronSTFT, self).__init__()
|
142 |
-
self.n_mel_channels = n_mel_channels
|
143 |
-
self.sampling_rate = sampling_rate
|
144 |
-
self.stft_fn = STFT(filter_length, hop_length, win_length)
|
145 |
-
mel_basis = librosa_mel_fn(
|
146 |
-
sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax
|
147 |
-
)
|
148 |
-
mel_basis = torch.from_numpy(mel_basis).float()
|
149 |
-
self.register_buffer("mel_basis", mel_basis)
|
150 |
-
|
151 |
-
def spectral_normalize(self, magnitudes, normalize_fun):
|
152 |
-
output = dynamic_range_compression(magnitudes, normalize_fun)
|
153 |
-
return output
|
154 |
-
|
155 |
-
def spectral_de_normalize(self, magnitudes):
|
156 |
-
output = dynamic_range_decompression(magnitudes)
|
157 |
-
return output
|
158 |
-
|
159 |
-
def mel_spectrogram(self, y, normalize_fun=torch.log):
|
160 |
-
"""Computes mel-spectrograms from a batch of waves
|
161 |
-
PARAMS
|
162 |
-
------
|
163 |
-
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
|
164 |
-
|
165 |
-
RETURNS
|
166 |
-
-------
|
167 |
-
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
|
168 |
-
"""
|
169 |
-
assert torch.min(y.data) >= -1, torch.min(y.data)
|
170 |
-
assert torch.max(y.data) <= 1, torch.max(y.data)
|
171 |
-
|
172 |
-
magnitudes, phases = self.stft_fn.transform(y)
|
173 |
-
magnitudes = magnitudes.data
|
174 |
-
mel_output = torch.matmul(self.mel_basis, magnitudes)
|
175 |
-
mel_output = self.spectral_normalize(mel_output, normalize_fun)
|
176 |
-
energy = torch.norm(magnitudes, dim=1)
|
177 |
-
|
178 |
-
log_magnitudes = self.spectral_normalize(magnitudes, normalize_fun)
|
179 |
-
|
180 |
-
return mel_output, log_magnitudes, energy
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AP123/text-to-3D/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Point-e Demo
|
3 |
-
emoji: 🐢
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.14.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
duplicated_from: anzorq/point-e_demo
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/perspective/Perspective.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import { ContainerPerspective } from '../../../plugins/perspectiveimage.js';
|
2 |
-
export default ContainerPerspective;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollbar/ScrollBar.d.ts
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
// import * as Phaser from 'phaser';
|
2 |
-
import Sizer from '../sizer/Sizer';
|
3 |
-
import RoundRecrangle from '../../../plugins/roundrectangle';
|
4 |
-
|
5 |
-
export default ScrollBar;
|
6 |
-
|
7 |
-
declare namespace ScrollBar {
|
8 |
-
|
9 |
-
type SliderInputTypes = 0 | 1 | -1 | 'drag' | 'pan' | 'click' | 'none';
|
10 |
-
|
11 |
-
interface IConfig extends Sizer.IConfig {
|
12 |
-
space?: {
|
13 |
-
left?: number,
|
14 |
-
right?: number,
|
15 |
-
top?: number,
|
16 |
-
bottom?: number,
|
17 |
-
},
|
18 |
-
|
19 |
-
background?: Phaser.GameObjects.GameObject,
|
20 |
-
|
21 |
-
buttons?: {
|
22 |
-
top?: Phaser.GameObjects.GameObject,
|
23 |
-
bottom?: Phaser.GameObjects.GameObject,
|
24 |
-
left?: Phaser.GameObjects.GameObject,
|
25 |
-
right?: Phaser.GameObjects.GameObject,
|
26 |
-
|
27 |
-
step?: number,
|
28 |
-
},
|
29 |
-
|
30 |
-
slider?: {
|
31 |
-
background?: Phaser.GameObjects.GameObject | RoundRecrangle.IConfig,
|
32 |
-
track?: Phaser.GameObjects.GameObject | RoundRecrangle.IConfig,
|
33 |
-
indicator?: Phaser.GameObjects.GameObject | RoundRecrangle.IConfig,
|
34 |
-
thumb?: Phaser.GameObjects.GameObject | RoundRecrangle.IConfig,
|
35 |
-
input?: SliderInputTypes,
|
36 |
-
gap?: number,
|
37 |
-
easeValue?: {
|
38 |
-
duration?: number,
|
39 |
-
ease?: string
|
40 |
-
},
|
41 |
-
}
|
42 |
-
|
43 |
-
valuechangeCallback?: (newValue: number, oldValue: number, ScrollBar: ScrollBar) => void,
|
44 |
-
|
45 |
-
enable?: boolean,
|
46 |
-
}
|
47 |
-
}
|
48 |
-
|
49 |
-
declare class ScrollBar extends Sizer {
|
50 |
-
constructor(
|
51 |
-
scene: Phaser.Scene,
|
52 |
-
config?: ScrollBar.IConfig
|
53 |
-
);
|
54 |
-
|
55 |
-
value: number;
|
56 |
-
getValue(min?: number, max?: number): number;
|
57 |
-
setValue(value?: number, min?: number, max?: number): this;
|
58 |
-
addValue(inc?: number, min?: number, max?: number): this;
|
59 |
-
|
60 |
-
easeValueTo(value?: number, min?: number, max?: number): this;
|
61 |
-
stopEaseValue(): this;
|
62 |
-
setEaseValueDuration(duration: number): this;
|
63 |
-
setEaseValueFunction(ease: string): this;
|
64 |
-
|
65 |
-
setEnable(enable?: boolean): this;
|
66 |
-
enable: boolean;
|
67 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Algoworks/Image_Face_Upscale_Restoration-GFPGAN_pub/app.py
DELETED
@@ -1,142 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import cv2
|
4 |
-
import gradio as gr
|
5 |
-
import torch
|
6 |
-
from basicsr.archs.srvgg_arch import SRVGGNetCompact
|
7 |
-
from gfpgan.utils import GFPGANer
|
8 |
-
from realesrgan.utils import RealESRGANer
|
9 |
-
|
10 |
-
os.system("pip freeze")
|
11 |
-
# download weights
|
12 |
-
if not os.path.exists('realesr-general-x4v3.pth'):
|
13 |
-
os.system("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P .")
|
14 |
-
if not os.path.exists('GFPGANv1.2.pth'):
|
15 |
-
os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.2.pth -P .")
|
16 |
-
if not os.path.exists('GFPGANv1.3.pth'):
|
17 |
-
os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth -P .")
|
18 |
-
if not os.path.exists('GFPGANv1.4.pth'):
|
19 |
-
os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P .")
|
20 |
-
if not os.path.exists('RestoreFormer.pth'):
|
21 |
-
os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth -P .")
|
22 |
-
if not os.path.exists('CodeFormer.pth'):
|
23 |
-
os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/CodeFormer.pth -P .")
|
24 |
-
|
25 |
-
torch.hub.download_url_to_file(
|
26 |
-
'https://thumbs.dreamstime.com/b/tower-bridge-traditional-red-bus-black-white-colors-view-to-tower-bridge-london-black-white-colors-108478942.jpg',
|
27 |
-
'a1.jpg')
|
28 |
-
torch.hub.download_url_to_file(
|
29 |
-
'https://media.istockphoto.com/id/523514029/photo/london-skyline-b-w.jpg?s=612x612&w=0&k=20&c=kJS1BAtfqYeUDaORupj0sBPc1hpzJhBUUqEFfRnHzZ0=',
|
30 |
-
'a2.jpg')
|
31 |
-
torch.hub.download_url_to_file(
|
32 |
-
'https://i.guim.co.uk/img/media/06f614065ed82ca0e917b149a32493c791619854/0_0_3648_2789/master/3648.jpg?width=700&quality=85&auto=format&fit=max&s=05764b507c18a38590090d987c8b6202',
|
33 |
-
'a3.jpg')
|
34 |
-
torch.hub.download_url_to_file(
|
35 |
-
'https://i.pinimg.com/736x/46/96/9e/46969eb94aec2437323464804d27706d--victorian-london-victorian-era.jpg',
|
36 |
-
'a4.jpg')
|
37 |
-
|
38 |
-
# background enhancer with RealESRGAN
|
39 |
-
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
|
40 |
-
model_path = 'realesr-general-x4v3.pth'
|
41 |
-
half = True if torch.cuda.is_available() else False
|
42 |
-
upsampler = RealESRGANer(scale=4, model_path=model_path, model=model, tile=0, tile_pad=10, pre_pad=0, half=half)
|
43 |
-
|
44 |
-
os.makedirs('output', exist_ok=True)
|
45 |
-
|
46 |
-
|
47 |
-
# def inference(img, version, scale, weight):
|
48 |
-
def inference(img, version, scale):
|
49 |
-
# weight /= 100
|
50 |
-
print(img, version, scale)
|
51 |
-
try:
|
52 |
-
extension = os.path.splitext(os.path.basename(str(img)))[1]
|
53 |
-
img = cv2.imread(img, cv2.IMREAD_UNCHANGED)
|
54 |
-
if len(img.shape) == 3 and img.shape[2] == 4:
|
55 |
-
img_mode = 'RGBA'
|
56 |
-
elif len(img.shape) == 2: # for gray inputs
|
57 |
-
img_mode = None
|
58 |
-
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
59 |
-
else:
|
60 |
-
img_mode = None
|
61 |
-
|
62 |
-
h, w = img.shape[0:2]
|
63 |
-
if h < 300:
|
64 |
-
img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4)
|
65 |
-
|
66 |
-
if version == 'v1.2':
|
67 |
-
face_enhancer = GFPGANer(
|
68 |
-
model_path='GFPGANv1.2.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
|
69 |
-
elif version == 'v1.3':
|
70 |
-
face_enhancer = GFPGANer(
|
71 |
-
model_path='GFPGANv1.3.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
|
72 |
-
elif version == 'v1.4':
|
73 |
-
face_enhancer = GFPGANer(
|
74 |
-
model_path='GFPGANv1.4.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
|
75 |
-
elif version == 'RestoreFormer':
|
76 |
-
face_enhancer = GFPGANer(
|
77 |
-
model_path='RestoreFormer.pth', upscale=2, arch='RestoreFormer', channel_multiplier=2, bg_upsampler=upsampler)
|
78 |
-
elif version == 'CodeFormer':
|
79 |
-
face_enhancer = GFPGANer(
|
80 |
-
model_path='CodeFormer.pth', upscale=2, arch='CodeFormer', channel_multiplier=2, bg_upsampler=upsampler)
|
81 |
-
elif version == 'RealESR-General-x4v3':
|
82 |
-
face_enhancer = GFPGANer(
|
83 |
-
model_path='realesr-general-x4v3.pth', upscale=2, arch='realesr-general', channel_multiplier=2, bg_upsampler=upsampler)
|
84 |
-
|
85 |
-
try:
|
86 |
-
# _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True, weight=weight)
|
87 |
-
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
|
88 |
-
except RuntimeError as error:
|
89 |
-
print('Error', error)
|
90 |
-
|
91 |
-
try:
|
92 |
-
if scale != 2:
|
93 |
-
interpolation = cv2.INTER_AREA if scale < 2 else cv2.INTER_LANCZOS4
|
94 |
-
h, w = img.shape[0:2]
|
95 |
-
output = cv2.resize(output, (int(w * scale / 2), int(h * scale / 2)), interpolation=interpolation)
|
96 |
-
except Exception as error:
|
97 |
-
print('wrong scale input.', error)
|
98 |
-
if img_mode == 'RGBA': # RGBA images should be saved in png format
|
99 |
-
extension = 'png'
|
100 |
-
else:
|
101 |
-
extension = 'jpg'
|
102 |
-
save_path = f'output/out.{extension}'
|
103 |
-
cv2.imwrite(save_path, output)
|
104 |
-
|
105 |
-
output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
|
106 |
-
return output, save_path
|
107 |
-
except Exception as error:
|
108 |
-
print('global exception', error)
|
109 |
-
return None, None
|
110 |
-
|
111 |
-
|
112 |
-
title = "Image Upscaling & Restoration(esp. Face) using GFPGAN Algorithm"
|
113 |
-
description = r"""Gradio demo for <a href='https://github.com/TencentARC/GFPGAN' target='_blank'><b>GFPGAN: Towards Real-World Blind Face Restoration and Upscalling of the image with a Generative Facial Prior</b></a>.<br>
|
114 |
-
Practically the algorithm is used to restore your **old photos** or improve **AI-generated faces**.<br>
|
115 |
-
To use it, simply just upload the concerned image.<br>
|
116 |
-
"""
|
117 |
-
article = r"""
|
118 |
-
[](https://github.com/TencentARC/GFPGAN/releases)
|
119 |
-
[](https://github.com/TencentARC/GFPGAN)
|
120 |
-
[](https://arxiv.org/abs/2101.04061)
|
121 |
-
<center><img src='https://visitor-badge.glitch.me/badge?page_id=dj_face_restoration_GFPGAN' alt='visitor badge'></center>
|
122 |
-
"""
|
123 |
-
demo = gr.Interface(
|
124 |
-
inference, [
|
125 |
-
gr.inputs.Image(type="filepath", label="Input"),
|
126 |
-
# gr.inputs.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer', 'CodeFormer'], type="value", default='v1.4', label='version'),
|
127 |
-
gr.inputs.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer','CodeFormer','RealESR-General-x4v3'], type="value", default='v1.4', label='version'),
|
128 |
-
gr.inputs.Number(label="Rescaling factor", default=2),
|
129 |
-
# gr.Slider(0, 100, label='Weight, only for CodeFormer. 0 for better quality, 100 for better identity', default=50)
|
130 |
-
], [
|
131 |
-
gr.outputs.Image(type="numpy", label="Output (The whole image)"),
|
132 |
-
gr.outputs.File(label="Download the output image")
|
133 |
-
],
|
134 |
-
title=title,
|
135 |
-
description=description,
|
136 |
-
article=article,
|
137 |
-
# examples=[['AI-generate.jpg', 'v1.4', 2, 50], ['lincoln.jpg', 'v1.4', 2, 50], ['Blake_Lively.jpg', 'v1.4', 2, 50],
|
138 |
-
# ['10045.png', 'v1.4', 2, 50]]).launch()
|
139 |
-
examples=[['a1.jpg', 'v1.4', 2], ['a2.jpg', 'v1.4', 2], ['a3.jpg', 'v1.4', 2],['a4.jpg', 'v1.4', 2]])
|
140 |
-
|
141 |
-
demo.queue(concurrency_count=4)
|
142 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/批量总结PDF文档pdfminer.py
DELETED
@@ -1,160 +0,0 @@
|
|
1 |
-
from toolbox import update_ui
|
2 |
-
from toolbox import CatchException, report_execption, write_results_to_file
|
3 |
-
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
4 |
-
|
5 |
-
fast_debug = False
|
6 |
-
|
7 |
-
def readPdf(pdfPath):
|
8 |
-
"""
|
9 |
-
读取pdf文件,返回文本内容
|
10 |
-
"""
|
11 |
-
import pdfminer
|
12 |
-
from pdfminer.pdfparser import PDFParser
|
13 |
-
from pdfminer.pdfdocument import PDFDocument
|
14 |
-
from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed
|
15 |
-
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
|
16 |
-
from pdfminer.pdfdevice import PDFDevice
|
17 |
-
from pdfminer.layout import LAParams
|
18 |
-
from pdfminer.converter import PDFPageAggregator
|
19 |
-
|
20 |
-
fp = open(pdfPath, 'rb')
|
21 |
-
|
22 |
-
# Create a PDF parser object associated with the file object
|
23 |
-
parser = PDFParser(fp)
|
24 |
-
|
25 |
-
# Create a PDF document object that stores the document structure.
|
26 |
-
# Password for initialization as 2nd parameter
|
27 |
-
document = PDFDocument(parser)
|
28 |
-
# Check if the document allows text extraction. If not, abort.
|
29 |
-
if not document.is_extractable:
|
30 |
-
raise PDFTextExtractionNotAllowed
|
31 |
-
|
32 |
-
# Create a PDF resource manager object that stores shared resources.
|
33 |
-
rsrcmgr = PDFResourceManager()
|
34 |
-
|
35 |
-
# Create a PDF device object.
|
36 |
-
# device = PDFDevice(rsrcmgr)
|
37 |
-
|
38 |
-
# BEGIN LAYOUT ANALYSIS.
|
39 |
-
# Set parameters for analysis.
|
40 |
-
laparams = LAParams(
|
41 |
-
char_margin=10.0,
|
42 |
-
line_margin=0.2,
|
43 |
-
boxes_flow=0.2,
|
44 |
-
all_texts=False,
|
45 |
-
)
|
46 |
-
# Create a PDF page aggregator object.
|
47 |
-
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
|
48 |
-
# Create a PDF interpreter object.
|
49 |
-
interpreter = PDFPageInterpreter(rsrcmgr, device)
|
50 |
-
|
51 |
-
# loop over all pages in the document
|
52 |
-
outTextList = []
|
53 |
-
for page in PDFPage.create_pages(document):
|
54 |
-
# read the page into a layout object
|
55 |
-
interpreter.process_page(page)
|
56 |
-
layout = device.get_result()
|
57 |
-
for obj in layout._objs:
|
58 |
-
if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal):
|
59 |
-
# print(obj.get_text())
|
60 |
-
outTextList.append(obj.get_text())
|
61 |
-
|
62 |
-
return outTextList
|
63 |
-
|
64 |
-
|
65 |
-
def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
66 |
-
import time, glob, os
|
67 |
-
from bs4 import BeautifulSoup
|
68 |
-
print('begin analysis on:', file_manifest)
|
69 |
-
for index, fp in enumerate(file_manifest):
|
70 |
-
if ".tex" in fp:
|
71 |
-
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
72 |
-
file_content = f.read()
|
73 |
-
if ".pdf" in fp.lower():
|
74 |
-
file_content = readPdf(fp)
|
75 |
-
file_content = BeautifulSoup(''.join(file_content), features="lxml").body.text.encode('gbk', 'ignore').decode('gbk')
|
76 |
-
|
77 |
-
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
|
78 |
-
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
79 |
-
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
80 |
-
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
81 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
82 |
-
|
83 |
-
if not fast_debug:
|
84 |
-
msg = '正常'
|
85 |
-
# ** gpt request **
|
86 |
-
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
87 |
-
inputs=i_say,
|
88 |
-
inputs_show_user=i_say_show_user,
|
89 |
-
llm_kwargs=llm_kwargs,
|
90 |
-
chatbot=chatbot,
|
91 |
-
history=[],
|
92 |
-
sys_prompt="总结文章。"
|
93 |
-
) # 带超时倒计时
|
94 |
-
chatbot[-1] = (i_say_show_user, gpt_say)
|
95 |
-
history.append(i_say_show_user); history.append(gpt_say)
|
96 |
-
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
97 |
-
if not fast_debug: time.sleep(2)
|
98 |
-
|
99 |
-
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
100 |
-
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
101 |
-
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
102 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
103 |
-
|
104 |
-
if not fast_debug:
|
105 |
-
msg = '正常'
|
106 |
-
# ** gpt request **
|
107 |
-
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
108 |
-
inputs=i_say,
|
109 |
-
inputs_show_user=i_say,
|
110 |
-
llm_kwargs=llm_kwargs,
|
111 |
-
chatbot=chatbot,
|
112 |
-
history=history,
|
113 |
-
sys_prompt="总结文章。"
|
114 |
-
) # 带超时倒计时
|
115 |
-
chatbot[-1] = (i_say, gpt_say)
|
116 |
-
history.append(i_say); history.append(gpt_say)
|
117 |
-
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
118 |
-
res = write_results_to_file(history)
|
119 |
-
chatbot.append(("完成了吗?", res))
|
120 |
-
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
@CatchException
|
125 |
-
def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
126 |
-
history = [] # 清空历史,以免输入溢出
|
127 |
-
import glob, os
|
128 |
-
|
129 |
-
# 基本信息:功能、贡献者
|
130 |
-
chatbot.append([
|
131 |
-
"函数插件功能?",
|
132 |
-
"批量总结PDF文档,此版本使用pdfminer插件,带token约简功能。函数插件贡献者: Euclid-Jie。"])
|
133 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
134 |
-
|
135 |
-
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
136 |
-
try:
|
137 |
-
import pdfminer, bs4
|
138 |
-
except:
|
139 |
-
report_execption(chatbot, history,
|
140 |
-
a = f"解析项目: {txt}",
|
141 |
-
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
|
142 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
143 |
-
return
|
144 |
-
if os.path.exists(txt):
|
145 |
-
project_folder = txt
|
146 |
-
else:
|
147 |
-
if txt == "": txt = '空空如也的输入栏'
|
148 |
-
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
149 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
150 |
-
return
|
151 |
-
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
|
152 |
-
[f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \
|
153 |
-
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
154 |
-
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
155 |
-
if len(file_manifest) == 0:
|
156 |
-
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}")
|
157 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
158 |
-
return
|
159 |
-
yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
160 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/callback/__init__.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
from pytorch_lightning.callbacks import (
|
2 |
-
ModelCheckpoint,
|
3 |
-
LearningRateMonitor,
|
4 |
-
EarlyStopping,
|
5 |
-
)
|
6 |
-
from src.utils.registry import Registry
|
7 |
-
|
8 |
-
# from src.callback.visualizer_callbacks import VisualizerCallback
|
9 |
-
|
10 |
-
CALLBACK_REGISTRY = Registry("CALLBACK")
|
11 |
-
|
12 |
-
CALLBACK_REGISTRY.register(EarlyStopping)
|
13 |
-
CALLBACK_REGISTRY.register(ModelCheckpoint)
|
14 |
-
CALLBACK_REGISTRY.register(LearningRateMonitor)
|
15 |
-
# TODO: add WandB visualizer callback
|
16 |
-
# CALLBACK_REGISTRY.register(VisualizerCallback)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/README_sdxl.md
DELETED
@@ -1,189 +0,0 @@
|
|
1 |
-
# DreamBooth training example for Stable Diffusion XL (SDXL)
|
2 |
-
|
3 |
-
[DreamBooth](https://arxiv.org/abs/2208.12242) is a method to personalize text2image models like stable diffusion given just a few (3~5) images of a subject.
|
4 |
-
|
5 |
-
The `train_dreambooth_lora_sdxl.py` script shows how to implement the training procedure and adapt it for [Stable Diffusion XL](https://huggingface.co/papers/2307.01952).
|
6 |
-
|
7 |
-
> 💡 **Note**: For now, we only allow DreamBooth fine-tuning of the SDXL UNet via LoRA. LoRA is a parameter-efficient fine-tuning technique introduced in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*.
|
8 |
-
|
9 |
-
## Running locally with PyTorch
|
10 |
-
|
11 |
-
### Installing the dependencies
|
12 |
-
|
13 |
-
Before running the scripts, make sure to install the library's training dependencies:
|
14 |
-
|
15 |
-
**Important**
|
16 |
-
|
17 |
-
To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
|
18 |
-
|
19 |
-
```bash
|
20 |
-
git clone https://github.com/huggingface/diffusers
|
21 |
-
cd diffusers
|
22 |
-
pip install -e .
|
23 |
-
```
|
24 |
-
|
25 |
-
Then cd in the `examples/dreambooth` folder and run
|
26 |
-
```bash
|
27 |
-
pip install -r requirements_sdxl.txt
|
28 |
-
```
|
29 |
-
|
30 |
-
And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
|
31 |
-
|
32 |
-
```bash
|
33 |
-
accelerate config
|
34 |
-
```
|
35 |
-
|
36 |
-
Or for a default accelerate configuration without answering questions about your environment
|
37 |
-
|
38 |
-
```bash
|
39 |
-
accelerate config default
|
40 |
-
```
|
41 |
-
|
42 |
-
Or if your environment doesn't support an interactive shell (e.g., a notebook)
|
43 |
-
|
44 |
-
```python
|
45 |
-
from accelerate.utils import write_basic_config
|
46 |
-
write_basic_config()
|
47 |
-
```
|
48 |
-
|
49 |
-
When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups.
|
50 |
-
|
51 |
-
### Dog toy example
|
52 |
-
|
53 |
-
Now let's get our dataset. For this example we will use some dog images: https://huggingface.co/datasets/diffusers/dog-example.
|
54 |
-
|
55 |
-
Let's first download it locally:
|
56 |
-
|
57 |
-
```python
|
58 |
-
from huggingface_hub import snapshot_download
|
59 |
-
|
60 |
-
local_dir = "./dog"
|
61 |
-
snapshot_download(
|
62 |
-
"diffusers/dog-example",
|
63 |
-
local_dir=local_dir, repo_type="dataset",
|
64 |
-
ignore_patterns=".gitattributes",
|
65 |
-
)
|
66 |
-
```
|
67 |
-
|
68 |
-
Since SDXL 0.9 weights are gated, we need to be authenticated to be able to use them. So, let's run:
|
69 |
-
|
70 |
-
```bash
|
71 |
-
huggingface-cli login
|
72 |
-
```
|
73 |
-
|
74 |
-
This will also allow us to push the trained LoRA parameters to the Hugging Face Hub platform.
|
75 |
-
|
76 |
-
Now, we can launch training using:
|
77 |
-
|
78 |
-
```bash
|
79 |
-
export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0"
|
80 |
-
export INSTANCE_DIR="dog"
|
81 |
-
export OUTPUT_DIR="lora-trained-xl"
|
82 |
-
|
83 |
-
accelerate launch train_dreambooth_lora_sdxl.py \
|
84 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
85 |
-
--instance_data_dir=$INSTANCE_DIR \
|
86 |
-
--output_dir=$OUTPUT_DIR \
|
87 |
-
--mixed_precision="fp16" \
|
88 |
-
--instance_prompt="a photo of sks dog" \
|
89 |
-
--resolution=1024 \
|
90 |
-
--train_batch_size=1 \
|
91 |
-
--gradient_accumulation_steps=4 \
|
92 |
-
--learning_rate=1e-4 \
|
93 |
-
--report_to="wandb" \
|
94 |
-
--lr_scheduler="constant" \
|
95 |
-
--lr_warmup_steps=0 \
|
96 |
-
--max_train_steps=500 \
|
97 |
-
--validation_prompt="A photo of sks dog in a bucket" \
|
98 |
-
--validation_epochs=25 \
|
99 |
-
--seed="0" \
|
100 |
-
--push_to_hub
|
101 |
-
```
|
102 |
-
|
103 |
-
To better track our training experiments, we're using the following flags in the command above:
|
104 |
-
|
105 |
-
* `report_to="wandb` will ensure the training runs are tracked on Weights and Biases. To use it, be sure to install `wandb` with `pip install wandb`.
|
106 |
-
* `validation_prompt` and `validation_epochs` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected.
|
107 |
-
|
108 |
-
Our experiments were conducted on a single 40GB A100 GPU.
|
109 |
-
|
110 |
-
### Inference
|
111 |
-
|
112 |
-
Once training is done, we can perform inference like so:
|
113 |
-
|
114 |
-
```python
|
115 |
-
from huggingface_hub.repocard import RepoCard
|
116 |
-
from diffusers import DiffusionPipeline
|
117 |
-
import torch
|
118 |
-
|
119 |
-
lora_model_id = <"lora-sdxl-dreambooth-id">
|
120 |
-
card = RepoCard.load(lora_model_id)
|
121 |
-
base_model_id = card.data.to_dict()["base_model"]
|
122 |
-
|
123 |
-
pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16)
|
124 |
-
pipe = pipe.to("cuda")
|
125 |
-
pipe.load_lora_weights(lora_model_id)
|
126 |
-
image = pipe("A picture of a sks dog in a bucket", num_inference_steps=25).images[0]
|
127 |
-
image.save("sks_dog.png")
|
128 |
-
```
|
129 |
-
|
130 |
-
We can further refine the outputs with the [Refiner](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0):
|
131 |
-
|
132 |
-
```python
|
133 |
-
from huggingface_hub.repocard import RepoCard
|
134 |
-
from diffusers import DiffusionPipeline, StableDiffusionXLImg2ImgPipeline
|
135 |
-
import torch
|
136 |
-
|
137 |
-
lora_model_id = <"lora-sdxl-dreambooth-id">
|
138 |
-
card = RepoCard.load(lora_model_id)
|
139 |
-
base_model_id = card.data.to_dict()["base_model"]
|
140 |
-
|
141 |
-
# Load the base pipeline and load the LoRA parameters into it.
|
142 |
-
pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16)
|
143 |
-
pipe = pipe.to("cuda")
|
144 |
-
pipe.load_lora_weights(lora_model_id)
|
145 |
-
|
146 |
-
# Load the refiner.
|
147 |
-
refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained(
|
148 |
-
"stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16"
|
149 |
-
)
|
150 |
-
refiner.to("cuda")
|
151 |
-
|
152 |
-
prompt = "A picture of a sks dog in a bucket"
|
153 |
-
generator = torch.Generator("cuda").manual_seed(0)
|
154 |
-
|
155 |
-
# Run inference.
|
156 |
-
image = pipe(prompt=prompt, output_type="latent", generator=generator).images[0]
|
157 |
-
image = refiner(prompt=prompt, image=image[None, :], generator=generator).images[0]
|
158 |
-
image.save("refined_sks_dog.png")
|
159 |
-
```
|
160 |
-
|
161 |
-
Here's a side-by-side comparison of the with and without Refiner pipeline outputs:
|
162 |
-
|
163 |
-
| Without Refiner | With Refiner |
|
164 |
-
|---|---|
|
165 |
-
|  |  |
|
166 |
-
|
167 |
-
### Training with text encoder(s)
|
168 |
-
|
169 |
-
Alongside the UNet, LoRA fine-tuning of the text encoders is also supported. To do so, just specify `--train_text_encoder` while launching training. Please keep the following points in mind:
|
170 |
-
|
171 |
-
* SDXL has two text encoders. So, we fine-tune both using LoRA.
|
172 |
-
* When not fine-tuning the text encoders, we ALWAYS precompute the text embeddings to save memory.
|
173 |
-
|
174 |
-
### Specifying a better VAE
|
175 |
-
|
176 |
-
SDXL's VAE is known to suffer from numerical instability issues. This is why we also expose a CLI argument namely `--pretrained_vae_model_name_or_path` that lets you specify the location of a better VAE (such as [this one](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)).
|
177 |
-
|
178 |
-
## Notes
|
179 |
-
|
180 |
-
In our experiments, we found that SDXL yields good initial results without extensive hyperparameter tuning. For example, without fine-tuning the text encoders and without using prior-preservation, we observed decent results. We didn't explore further hyper-parameter tuning experiments, but we do encourage the community to explore this avenue further and share their results with us 🤗
|
181 |
-
|
182 |
-
## Results
|
183 |
-
|
184 |
-
You can explore the results from a couple of our internal experiments by checking out this link: [https://wandb.ai/sayakpaul/dreambooth-lora-sd-xl](https://wandb.ai/sayakpaul/dreambooth-lora-sd-xl). Specifically, we used the same script with the exact same hyperparameters on the following datasets:
|
185 |
-
|
186 |
-
* [Dogs](https://huggingface.co/datasets/diffusers/dog-example)
|
187 |
-
* [Starbucks logo](https://huggingface.co/datasets/diffusers/starbucks-example)
|
188 |
-
* [Mr. Potato Head](https://huggingface.co/datasets/diffusers/potato-head-example)
|
189 |
-
* [Keramer face](https://huggingface.co/datasets/diffusers/keramer-face-example)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
DELETED
@@ -1,1196 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
from typing import Optional, Union
|
3 |
-
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
|
7 |
-
from ...configuration_utils import ConfigMixin, register_to_config
|
8 |
-
from ...models import ModelMixin
|
9 |
-
from ...models.attention import AdaLayerNorm, FeedForward
|
10 |
-
from ...models.attention_processor import Attention
|
11 |
-
from ...models.embeddings import TimestepEmbedding, Timesteps, get_2d_sincos_pos_embed
|
12 |
-
from ...models.transformer_2d import Transformer2DModelOutput
|
13 |
-
from ...utils import logging
|
14 |
-
|
15 |
-
|
16 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
17 |
-
|
18 |
-
|
19 |
-
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
|
20 |
-
# Cut & paste from PyTorch official master until it's in a few official releases - RW
|
21 |
-
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
|
22 |
-
def norm_cdf(x):
|
23 |
-
# Computes standard normal cumulative distribution function
|
24 |
-
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
|
25 |
-
|
26 |
-
if (mean < a - 2 * std) or (mean > b + 2 * std):
|
27 |
-
logger.warning(
|
28 |
-
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
|
29 |
-
"The distribution of values may be incorrect."
|
30 |
-
)
|
31 |
-
|
32 |
-
with torch.no_grad():
|
33 |
-
# Values are generated by using a truncated uniform distribution and
|
34 |
-
# then using the inverse CDF for the normal distribution.
|
35 |
-
# Get upper and lower cdf values
|
36 |
-
l = norm_cdf((a - mean) / std)
|
37 |
-
u = norm_cdf((b - mean) / std)
|
38 |
-
|
39 |
-
# Uniformly fill tensor with values from [l, u], then translate to
|
40 |
-
# [2l-1, 2u-1].
|
41 |
-
tensor.uniform_(2 * l - 1, 2 * u - 1)
|
42 |
-
|
43 |
-
# Use inverse cdf transform for normal distribution to get truncated
|
44 |
-
# standard normal
|
45 |
-
tensor.erfinv_()
|
46 |
-
|
47 |
-
# Transform to proper mean, std
|
48 |
-
tensor.mul_(std * math.sqrt(2.0))
|
49 |
-
tensor.add_(mean)
|
50 |
-
|
51 |
-
# Clamp to ensure it's in the proper range
|
52 |
-
tensor.clamp_(min=a, max=b)
|
53 |
-
return tensor
|
54 |
-
|
55 |
-
|
56 |
-
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
|
57 |
-
# type: (torch.Tensor, float, float, float, float) -> torch.Tensor
|
58 |
-
r"""Fills the input Tensor with values drawn from a truncated
|
59 |
-
normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean},
|
60 |
-
\text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for
|
61 |
-
generating the random values works best when :math:`a \leq \text{mean} \leq b`.
|
62 |
-
|
63 |
-
Args:
|
64 |
-
tensor: an n-dimensional `torch.Tensor`
|
65 |
-
mean: the mean of the normal distribution
|
66 |
-
std: the standard deviation of the normal distribution
|
67 |
-
a: the minimum cutoff value
|
68 |
-
b: the maximum cutoff value
|
69 |
-
Examples:
|
70 |
-
>>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w)
|
71 |
-
"""
|
72 |
-
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
73 |
-
|
74 |
-
|
75 |
-
class PatchEmbed(nn.Module):
|
76 |
-
"""2D Image to Patch Embedding"""
|
77 |
-
|
78 |
-
def __init__(
|
79 |
-
self,
|
80 |
-
height=224,
|
81 |
-
width=224,
|
82 |
-
patch_size=16,
|
83 |
-
in_channels=3,
|
84 |
-
embed_dim=768,
|
85 |
-
layer_norm=False,
|
86 |
-
flatten=True,
|
87 |
-
bias=True,
|
88 |
-
use_pos_embed=True,
|
89 |
-
):
|
90 |
-
super().__init__()
|
91 |
-
|
92 |
-
num_patches = (height // patch_size) * (width // patch_size)
|
93 |
-
self.flatten = flatten
|
94 |
-
self.layer_norm = layer_norm
|
95 |
-
|
96 |
-
self.proj = nn.Conv2d(
|
97 |
-
in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias
|
98 |
-
)
|
99 |
-
if layer_norm:
|
100 |
-
self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-6)
|
101 |
-
else:
|
102 |
-
self.norm = None
|
103 |
-
|
104 |
-
self.use_pos_embed = use_pos_embed
|
105 |
-
if self.use_pos_embed:
|
106 |
-
pos_embed = get_2d_sincos_pos_embed(embed_dim, int(num_patches**0.5))
|
107 |
-
self.register_buffer("pos_embed", torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=False)
|
108 |
-
|
109 |
-
def forward(self, latent):
|
110 |
-
latent = self.proj(latent)
|
111 |
-
if self.flatten:
|
112 |
-
latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC
|
113 |
-
if self.layer_norm:
|
114 |
-
latent = self.norm(latent)
|
115 |
-
if self.use_pos_embed:
|
116 |
-
return latent + self.pos_embed
|
117 |
-
else:
|
118 |
-
return latent
|
119 |
-
|
120 |
-
|
121 |
-
class SkipBlock(nn.Module):
|
122 |
-
def __init__(self, dim: int):
|
123 |
-
super().__init__()
|
124 |
-
|
125 |
-
self.skip_linear = nn.Linear(2 * dim, dim)
|
126 |
-
|
127 |
-
# Use torch.nn.LayerNorm for now, following the original code
|
128 |
-
self.norm = nn.LayerNorm(dim)
|
129 |
-
|
130 |
-
def forward(self, x, skip):
|
131 |
-
x = self.skip_linear(torch.cat([x, skip], dim=-1))
|
132 |
-
x = self.norm(x)
|
133 |
-
|
134 |
-
return x
|
135 |
-
|
136 |
-
|
137 |
-
# Modified to support both pre-LayerNorm and post-LayerNorm configurations
|
138 |
-
# Don't support AdaLayerNormZero for now
|
139 |
-
# Modified from diffusers.models.attention.BasicTransformerBlock
|
140 |
-
class UTransformerBlock(nn.Module):
|
141 |
-
r"""
|
142 |
-
A modification of BasicTransformerBlock which supports pre-LayerNorm and post-LayerNorm configurations.
|
143 |
-
|
144 |
-
Parameters:
|
145 |
-
dim (`int`): The number of channels in the input and output.
|
146 |
-
num_attention_heads (`int`): The number of heads to use for multi-head attention.
|
147 |
-
attention_head_dim (`int`): The number of channels in each head.
|
148 |
-
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
149 |
-
cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
|
150 |
-
activation_fn (`str`, *optional*, defaults to `"geglu"`):
|
151 |
-
Activation function to be used in feed-forward.
|
152 |
-
num_embeds_ada_norm (:obj: `int`, *optional*):
|
153 |
-
The number of diffusion steps used during training. See `Transformer2DModel`.
|
154 |
-
attention_bias (:obj: `bool`, *optional*, defaults to `False`):
|
155 |
-
Configure if the attentions should contain a bias parameter.
|
156 |
-
only_cross_attention (`bool`, *optional*):
|
157 |
-
Whether to use only cross-attention layers. In this case two cross attention layers are used.
|
158 |
-
double_self_attention (`bool`, *optional*):
|
159 |
-
Whether to use two self-attention layers. In this case no cross attention layers are used.
|
160 |
-
upcast_attention (`bool`, *optional*):
|
161 |
-
Whether to upcast the query and key to float32 when performing the attention calculation.
|
162 |
-
norm_elementwise_affine (`bool`, *optional*):
|
163 |
-
Whether to use learnable per-element affine parameters during layer normalization.
|
164 |
-
norm_type (`str`, defaults to `"layer_norm"`):
|
165 |
-
The layer norm implementation to use.
|
166 |
-
pre_layer_norm (`bool`, *optional*):
|
167 |
-
Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"),
|
168 |
-
as opposed to after ("post-LayerNorm"). Note that `BasicTransformerBlock` uses pre-LayerNorm, e.g.
|
169 |
-
`pre_layer_norm = True`.
|
170 |
-
final_dropout (`bool`, *optional*):
|
171 |
-
Whether to use a final Dropout layer after the feedforward network.
|
172 |
-
"""
|
173 |
-
|
174 |
-
def __init__(
|
175 |
-
self,
|
176 |
-
dim: int,
|
177 |
-
num_attention_heads: int,
|
178 |
-
attention_head_dim: int,
|
179 |
-
dropout=0.0,
|
180 |
-
cross_attention_dim: Optional[int] = None,
|
181 |
-
activation_fn: str = "geglu",
|
182 |
-
num_embeds_ada_norm: Optional[int] = None,
|
183 |
-
attention_bias: bool = False,
|
184 |
-
only_cross_attention: bool = False,
|
185 |
-
double_self_attention: bool = False,
|
186 |
-
upcast_attention: bool = False,
|
187 |
-
norm_elementwise_affine: bool = True,
|
188 |
-
norm_type: str = "layer_norm",
|
189 |
-
pre_layer_norm: bool = True,
|
190 |
-
final_dropout: bool = False,
|
191 |
-
):
|
192 |
-
super().__init__()
|
193 |
-
self.only_cross_attention = only_cross_attention
|
194 |
-
|
195 |
-
self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
|
196 |
-
|
197 |
-
self.pre_layer_norm = pre_layer_norm
|
198 |
-
|
199 |
-
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
|
200 |
-
raise ValueError(
|
201 |
-
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
|
202 |
-
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
|
203 |
-
)
|
204 |
-
|
205 |
-
# 1. Self-Attn
|
206 |
-
self.attn1 = Attention(
|
207 |
-
query_dim=dim,
|
208 |
-
heads=num_attention_heads,
|
209 |
-
dim_head=attention_head_dim,
|
210 |
-
dropout=dropout,
|
211 |
-
bias=attention_bias,
|
212 |
-
cross_attention_dim=cross_attention_dim if only_cross_attention else None,
|
213 |
-
upcast_attention=upcast_attention,
|
214 |
-
)
|
215 |
-
|
216 |
-
# 2. Cross-Attn
|
217 |
-
if cross_attention_dim is not None or double_self_attention:
|
218 |
-
self.attn2 = Attention(
|
219 |
-
query_dim=dim,
|
220 |
-
cross_attention_dim=cross_attention_dim if not double_self_attention else None,
|
221 |
-
heads=num_attention_heads,
|
222 |
-
dim_head=attention_head_dim,
|
223 |
-
dropout=dropout,
|
224 |
-
bias=attention_bias,
|
225 |
-
upcast_attention=upcast_attention,
|
226 |
-
) # is self-attn if encoder_hidden_states is none
|
227 |
-
else:
|
228 |
-
self.attn2 = None
|
229 |
-
|
230 |
-
if self.use_ada_layer_norm:
|
231 |
-
self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
|
232 |
-
else:
|
233 |
-
self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
|
234 |
-
|
235 |
-
if cross_attention_dim is not None or double_self_attention:
|
236 |
-
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
|
237 |
-
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
|
238 |
-
# the second cross attention block.
|
239 |
-
self.norm2 = (
|
240 |
-
AdaLayerNorm(dim, num_embeds_ada_norm)
|
241 |
-
if self.use_ada_layer_norm
|
242 |
-
else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
|
243 |
-
)
|
244 |
-
else:
|
245 |
-
self.norm2 = None
|
246 |
-
|
247 |
-
# 3. Feed-forward
|
248 |
-
self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
|
249 |
-
self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)
|
250 |
-
|
251 |
-
def forward(
|
252 |
-
self,
|
253 |
-
hidden_states,
|
254 |
-
attention_mask=None,
|
255 |
-
encoder_hidden_states=None,
|
256 |
-
encoder_attention_mask=None,
|
257 |
-
timestep=None,
|
258 |
-
cross_attention_kwargs=None,
|
259 |
-
class_labels=None,
|
260 |
-
):
|
261 |
-
# Pre-LayerNorm
|
262 |
-
if self.pre_layer_norm:
|
263 |
-
if self.use_ada_layer_norm:
|
264 |
-
norm_hidden_states = self.norm1(hidden_states, timestep)
|
265 |
-
else:
|
266 |
-
norm_hidden_states = self.norm1(hidden_states)
|
267 |
-
else:
|
268 |
-
norm_hidden_states = hidden_states
|
269 |
-
|
270 |
-
# 1. Self-Attention
|
271 |
-
cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
|
272 |
-
attn_output = self.attn1(
|
273 |
-
norm_hidden_states,
|
274 |
-
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
|
275 |
-
attention_mask=attention_mask,
|
276 |
-
**cross_attention_kwargs,
|
277 |
-
)
|
278 |
-
|
279 |
-
# Post-LayerNorm
|
280 |
-
if not self.pre_layer_norm:
|
281 |
-
if self.use_ada_layer_norm:
|
282 |
-
attn_output = self.norm1(attn_output, timestep)
|
283 |
-
else:
|
284 |
-
attn_output = self.norm1(attn_output)
|
285 |
-
|
286 |
-
hidden_states = attn_output + hidden_states
|
287 |
-
|
288 |
-
if self.attn2 is not None:
|
289 |
-
# Pre-LayerNorm
|
290 |
-
if self.pre_layer_norm:
|
291 |
-
norm_hidden_states = (
|
292 |
-
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
|
293 |
-
)
|
294 |
-
else:
|
295 |
-
norm_hidden_states = hidden_states
|
296 |
-
# TODO (Birch-San): Here we should prepare the encoder_attention mask correctly
|
297 |
-
# prepare attention mask here
|
298 |
-
|
299 |
-
# 2. Cross-Attention
|
300 |
-
attn_output = self.attn2(
|
301 |
-
norm_hidden_states,
|
302 |
-
encoder_hidden_states=encoder_hidden_states,
|
303 |
-
attention_mask=encoder_attention_mask,
|
304 |
-
**cross_attention_kwargs,
|
305 |
-
)
|
306 |
-
|
307 |
-
# Post-LayerNorm
|
308 |
-
if not self.pre_layer_norm:
|
309 |
-
attn_output = self.norm2(attn_output, timestep) if self.use_ada_layer_norm else self.norm2(attn_output)
|
310 |
-
|
311 |
-
hidden_states = attn_output + hidden_states
|
312 |
-
|
313 |
-
# 3. Feed-forward
|
314 |
-
# Pre-LayerNorm
|
315 |
-
if self.pre_layer_norm:
|
316 |
-
norm_hidden_states = self.norm3(hidden_states)
|
317 |
-
else:
|
318 |
-
norm_hidden_states = hidden_states
|
319 |
-
|
320 |
-
ff_output = self.ff(norm_hidden_states)
|
321 |
-
|
322 |
-
# Post-LayerNorm
|
323 |
-
if not self.pre_layer_norm:
|
324 |
-
ff_output = self.norm3(ff_output)
|
325 |
-
|
326 |
-
hidden_states = ff_output + hidden_states
|
327 |
-
|
328 |
-
return hidden_states
|
329 |
-
|
330 |
-
|
331 |
-
# Like UTransformerBlock except with LayerNorms on the residual backbone of the block
|
332 |
-
# Modified from diffusers.models.attention.BasicTransformerBlock
|
333 |
-
class UniDiffuserBlock(nn.Module):
|
334 |
-
r"""
|
335 |
-
A modification of BasicTransformerBlock which supports pre-LayerNorm and post-LayerNorm configurations and puts the
|
336 |
-
LayerNorms on the residual backbone of the block. This matches the transformer block in the [original UniDiffuser
|
337 |
-
implementation](https://github.com/thu-ml/unidiffuser/blob/main/libs/uvit_multi_post_ln_v1.py#L104).
|
338 |
-
|
339 |
-
Parameters:
|
340 |
-
dim (`int`): The number of channels in the input and output.
|
341 |
-
num_attention_heads (`int`): The number of heads to use for multi-head attention.
|
342 |
-
attention_head_dim (`int`): The number of channels in each head.
|
343 |
-
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
344 |
-
cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
|
345 |
-
activation_fn (`str`, *optional*, defaults to `"geglu"`):
|
346 |
-
Activation function to be used in feed-forward.
|
347 |
-
num_embeds_ada_norm (:obj: `int`, *optional*):
|
348 |
-
The number of diffusion steps used during training. See `Transformer2DModel`.
|
349 |
-
attention_bias (:obj: `bool`, *optional*, defaults to `False`):
|
350 |
-
Configure if the attentions should contain a bias parameter.
|
351 |
-
only_cross_attention (`bool`, *optional*):
|
352 |
-
Whether to use only cross-attention layers. In this case two cross attention layers are used.
|
353 |
-
double_self_attention (`bool`, *optional*):
|
354 |
-
Whether to use two self-attention layers. In this case no cross attention layers are used.
|
355 |
-
upcast_attention (`bool`, *optional*):
|
356 |
-
Whether to upcast the query and key to float() when performing the attention calculation.
|
357 |
-
norm_elementwise_affine (`bool`, *optional*):
|
358 |
-
Whether to use learnable per-element affine parameters during layer normalization.
|
359 |
-
norm_type (`str`, defaults to `"layer_norm"`):
|
360 |
-
The layer norm implementation to use.
|
361 |
-
pre_layer_norm (`bool`, *optional*):
|
362 |
-
Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"),
|
363 |
-
as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm
|
364 |
-
(`pre_layer_norm = False`).
|
365 |
-
final_dropout (`bool`, *optional*):
|
366 |
-
Whether to use a final Dropout layer after the feedforward network.
|
367 |
-
"""
|
368 |
-
|
369 |
-
def __init__(
|
370 |
-
self,
|
371 |
-
dim: int,
|
372 |
-
num_attention_heads: int,
|
373 |
-
attention_head_dim: int,
|
374 |
-
dropout=0.0,
|
375 |
-
cross_attention_dim: Optional[int] = None,
|
376 |
-
activation_fn: str = "geglu",
|
377 |
-
num_embeds_ada_norm: Optional[int] = None,
|
378 |
-
attention_bias: bool = False,
|
379 |
-
only_cross_attention: bool = False,
|
380 |
-
double_self_attention: bool = False,
|
381 |
-
upcast_attention: bool = False,
|
382 |
-
norm_elementwise_affine: bool = True,
|
383 |
-
norm_type: str = "layer_norm",
|
384 |
-
pre_layer_norm: bool = False,
|
385 |
-
final_dropout: bool = True,
|
386 |
-
):
|
387 |
-
super().__init__()
|
388 |
-
self.only_cross_attention = only_cross_attention
|
389 |
-
|
390 |
-
self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
|
391 |
-
|
392 |
-
self.pre_layer_norm = pre_layer_norm
|
393 |
-
|
394 |
-
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
|
395 |
-
raise ValueError(
|
396 |
-
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
|
397 |
-
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
|
398 |
-
)
|
399 |
-
|
400 |
-
# 1. Self-Attn
|
401 |
-
self.attn1 = Attention(
|
402 |
-
query_dim=dim,
|
403 |
-
heads=num_attention_heads,
|
404 |
-
dim_head=attention_head_dim,
|
405 |
-
dropout=dropout,
|
406 |
-
bias=attention_bias,
|
407 |
-
cross_attention_dim=cross_attention_dim if only_cross_attention else None,
|
408 |
-
upcast_attention=upcast_attention,
|
409 |
-
)
|
410 |
-
|
411 |
-
# 2. Cross-Attn
|
412 |
-
if cross_attention_dim is not None or double_self_attention:
|
413 |
-
self.attn2 = Attention(
|
414 |
-
query_dim=dim,
|
415 |
-
cross_attention_dim=cross_attention_dim if not double_self_attention else None,
|
416 |
-
heads=num_attention_heads,
|
417 |
-
dim_head=attention_head_dim,
|
418 |
-
dropout=dropout,
|
419 |
-
bias=attention_bias,
|
420 |
-
upcast_attention=upcast_attention,
|
421 |
-
) # is self-attn if encoder_hidden_states is none
|
422 |
-
else:
|
423 |
-
self.attn2 = None
|
424 |
-
|
425 |
-
if self.use_ada_layer_norm:
|
426 |
-
self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
|
427 |
-
else:
|
428 |
-
self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
|
429 |
-
|
430 |
-
if cross_attention_dim is not None or double_self_attention:
|
431 |
-
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
|
432 |
-
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
|
433 |
-
# the second cross attention block.
|
434 |
-
self.norm2 = (
|
435 |
-
AdaLayerNorm(dim, num_embeds_ada_norm)
|
436 |
-
if self.use_ada_layer_norm
|
437 |
-
else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
|
438 |
-
)
|
439 |
-
else:
|
440 |
-
self.norm2 = None
|
441 |
-
|
442 |
-
# 3. Feed-forward
|
443 |
-
self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
|
444 |
-
self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)
|
445 |
-
|
446 |
-
def forward(
|
447 |
-
self,
|
448 |
-
hidden_states,
|
449 |
-
attention_mask=None,
|
450 |
-
encoder_hidden_states=None,
|
451 |
-
encoder_attention_mask=None,
|
452 |
-
timestep=None,
|
453 |
-
cross_attention_kwargs=None,
|
454 |
-
class_labels=None,
|
455 |
-
):
|
456 |
-
# Following the diffusers transformer block implementation, put the LayerNorm on the
|
457 |
-
# residual backbone
|
458 |
-
# Pre-LayerNorm
|
459 |
-
if self.pre_layer_norm:
|
460 |
-
if self.use_ada_layer_norm:
|
461 |
-
hidden_states = self.norm1(hidden_states, timestep)
|
462 |
-
else:
|
463 |
-
hidden_states = self.norm1(hidden_states)
|
464 |
-
|
465 |
-
# 1. Self-Attention
|
466 |
-
cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
|
467 |
-
attn_output = self.attn1(
|
468 |
-
hidden_states,
|
469 |
-
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
|
470 |
-
attention_mask=attention_mask,
|
471 |
-
**cross_attention_kwargs,
|
472 |
-
)
|
473 |
-
|
474 |
-
hidden_states = attn_output + hidden_states
|
475 |
-
|
476 |
-
# Following the diffusers transformer block implementation, put the LayerNorm on the
|
477 |
-
# residual backbone
|
478 |
-
# Post-LayerNorm
|
479 |
-
if not self.pre_layer_norm:
|
480 |
-
if self.use_ada_layer_norm:
|
481 |
-
hidden_states = self.norm1(hidden_states, timestep)
|
482 |
-
else:
|
483 |
-
hidden_states = self.norm1(hidden_states)
|
484 |
-
|
485 |
-
if self.attn2 is not None:
|
486 |
-
# Pre-LayerNorm
|
487 |
-
if self.pre_layer_norm:
|
488 |
-
hidden_states = (
|
489 |
-
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
|
490 |
-
)
|
491 |
-
# TODO (Birch-San): Here we should prepare the encoder_attention mask correctly
|
492 |
-
# prepare attention mask here
|
493 |
-
|
494 |
-
# 2. Cross-Attention
|
495 |
-
attn_output = self.attn2(
|
496 |
-
hidden_states,
|
497 |
-
encoder_hidden_states=encoder_hidden_states,
|
498 |
-
attention_mask=encoder_attention_mask,
|
499 |
-
**cross_attention_kwargs,
|
500 |
-
)
|
501 |
-
|
502 |
-
hidden_states = attn_output + hidden_states
|
503 |
-
|
504 |
-
# Post-LayerNorm
|
505 |
-
if not self.pre_layer_norm:
|
506 |
-
hidden_states = (
|
507 |
-
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
|
508 |
-
)
|
509 |
-
|
510 |
-
# 3. Feed-forward
|
511 |
-
# Pre-LayerNorm
|
512 |
-
if self.pre_layer_norm:
|
513 |
-
hidden_states = self.norm3(hidden_states)
|
514 |
-
|
515 |
-
ff_output = self.ff(hidden_states)
|
516 |
-
|
517 |
-
hidden_states = ff_output + hidden_states
|
518 |
-
|
519 |
-
# Post-LayerNorm
|
520 |
-
if not self.pre_layer_norm:
|
521 |
-
hidden_states = self.norm3(hidden_states)
|
522 |
-
|
523 |
-
return hidden_states
|
524 |
-
|
525 |
-
|
526 |
-
# Modified from diffusers.models.transformer_2d.Transformer2DModel
|
527 |
-
# Modify the transformer block structure to be U-Net like following U-ViT
|
528 |
-
# Only supports patch-style input and torch.nn.LayerNorm currently
|
529 |
-
# https://github.com/baofff/U-ViT
|
530 |
-
class UTransformer2DModel(ModelMixin, ConfigMixin):
|
531 |
-
"""
|
532 |
-
Transformer model based on the [U-ViT](https://github.com/baofff/U-ViT) architecture for image-like data. Compared
|
533 |
-
to [`Transformer2DModel`], this model has skip connections between transformer blocks in a "U"-shaped fashion,
|
534 |
-
similar to a U-Net. Supports only continuous (actual embeddings) inputs, which are embedded via a [`PatchEmbed`]
|
535 |
-
layer and then reshaped to (b, t, d).
|
536 |
-
|
537 |
-
Parameters:
|
538 |
-
num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
|
539 |
-
attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
|
540 |
-
in_channels (`int`, *optional*):
|
541 |
-
Pass if the input is continuous. The number of channels in the input.
|
542 |
-
out_channels (`int`, *optional*):
|
543 |
-
The number of output channels; if `None`, defaults to `in_channels`.
|
544 |
-
num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
|
545 |
-
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
546 |
-
norm_num_groups (`int`, *optional*, defaults to `32`):
|
547 |
-
The number of groups to use when performing Group Normalization.
|
548 |
-
cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use.
|
549 |
-
attention_bias (`bool`, *optional*):
|
550 |
-
Configure if the TransformerBlocks' attention should contain a bias parameter.
|
551 |
-
sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images.
|
552 |
-
Note that this is fixed at training time as it is used for learning a number of position embeddings. See
|
553 |
-
`ImagePositionalEmbeddings`.
|
554 |
-
num_vector_embeds (`int`, *optional*):
|
555 |
-
Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels.
|
556 |
-
Includes the class for the masked latent pixel.
|
557 |
-
patch_size (`int`, *optional*, defaults to 2):
|
558 |
-
The patch size to use in the patch embedding.
|
559 |
-
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
|
560 |
-
num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`.
|
561 |
-
The number of diffusion steps used during training. Note that this is fixed at training time as it is used
|
562 |
-
to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for
|
563 |
-
up to but not more than steps than `num_embeds_ada_norm`.
|
564 |
-
use_linear_projection (int, *optional*): TODO: Not used
|
565 |
-
only_cross_attention (`bool`, *optional*):
|
566 |
-
Whether to use only cross-attention layers. In this case two cross attention layers are used in each
|
567 |
-
transformer block.
|
568 |
-
upcast_attention (`bool`, *optional*):
|
569 |
-
Whether to upcast the query and key to float() when performing the attention calculation.
|
570 |
-
norm_type (`str`, *optional*, defaults to `"layer_norm"`):
|
571 |
-
The Layer Normalization implementation to use. Defaults to `torch.nn.LayerNorm`.
|
572 |
-
block_type (`str`, *optional*, defaults to `"unidiffuser"`):
|
573 |
-
The transformer block implementation to use. If `"unidiffuser"`, has the LayerNorms on the residual
|
574 |
-
backbone of each transformer block; otherwise has them in the attention/feedforward branches (the standard
|
575 |
-
behavior in `diffusers`.)
|
576 |
-
pre_layer_norm (`bool`, *optional*):
|
577 |
-
Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"),
|
578 |
-
as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm
|
579 |
-
(`pre_layer_norm = False`).
|
580 |
-
norm_elementwise_affine (`bool`, *optional*):
|
581 |
-
Whether to use learnable per-element affine parameters during layer normalization.
|
582 |
-
use_patch_pos_embed (`bool`, *optional*):
|
583 |
-
Whether to use position embeddings inside the patch embedding layer (`PatchEmbed`).
|
584 |
-
final_dropout (`bool`, *optional*):
|
585 |
-
Whether to use a final Dropout layer after the feedforward network.
|
586 |
-
"""
|
587 |
-
|
588 |
-
@register_to_config
|
589 |
-
def __init__(
|
590 |
-
self,
|
591 |
-
num_attention_heads: int = 16,
|
592 |
-
attention_head_dim: int = 88,
|
593 |
-
in_channels: Optional[int] = None,
|
594 |
-
out_channels: Optional[int] = None,
|
595 |
-
num_layers: int = 1,
|
596 |
-
dropout: float = 0.0,
|
597 |
-
norm_num_groups: int = 32,
|
598 |
-
cross_attention_dim: Optional[int] = None,
|
599 |
-
attention_bias: bool = False,
|
600 |
-
sample_size: Optional[int] = None,
|
601 |
-
num_vector_embeds: Optional[int] = None,
|
602 |
-
patch_size: Optional[int] = 2,
|
603 |
-
activation_fn: str = "geglu",
|
604 |
-
num_embeds_ada_norm: Optional[int] = None,
|
605 |
-
use_linear_projection: bool = False,
|
606 |
-
only_cross_attention: bool = False,
|
607 |
-
upcast_attention: bool = False,
|
608 |
-
norm_type: str = "layer_norm",
|
609 |
-
block_type: str = "unidiffuser",
|
610 |
-
pre_layer_norm: bool = False,
|
611 |
-
norm_elementwise_affine: bool = True,
|
612 |
-
use_patch_pos_embed=False,
|
613 |
-
ff_final_dropout: bool = False,
|
614 |
-
):
|
615 |
-
super().__init__()
|
616 |
-
self.use_linear_projection = use_linear_projection
|
617 |
-
self.num_attention_heads = num_attention_heads
|
618 |
-
self.attention_head_dim = attention_head_dim
|
619 |
-
inner_dim = num_attention_heads * attention_head_dim
|
620 |
-
|
621 |
-
# 1. Input
|
622 |
-
# Only support patch input of shape (batch_size, num_channels, height, width) for now
|
623 |
-
assert in_channels is not None and patch_size is not None, "Patch input requires in_channels and patch_size."
|
624 |
-
|
625 |
-
assert sample_size is not None, "UTransformer2DModel over patched input must provide sample_size"
|
626 |
-
|
627 |
-
# 2. Define input layers
|
628 |
-
self.height = sample_size
|
629 |
-
self.width = sample_size
|
630 |
-
|
631 |
-
self.patch_size = patch_size
|
632 |
-
self.pos_embed = PatchEmbed(
|
633 |
-
height=sample_size,
|
634 |
-
width=sample_size,
|
635 |
-
patch_size=patch_size,
|
636 |
-
in_channels=in_channels,
|
637 |
-
embed_dim=inner_dim,
|
638 |
-
use_pos_embed=use_patch_pos_embed,
|
639 |
-
)
|
640 |
-
|
641 |
-
# 3. Define transformers blocks
|
642 |
-
# Modify this to have in_blocks ("downsample" blocks, even though we don't actually downsample), a mid_block,
|
643 |
-
# and out_blocks ("upsample" blocks). Like a U-Net, there are skip connections from in_blocks to out_blocks in
|
644 |
-
# a "U"-shaped fashion (e.g. first in_block to last out_block, etc.).
|
645 |
-
# Quick hack to make the transformer block type configurable
|
646 |
-
if block_type == "unidiffuser":
|
647 |
-
block_cls = UniDiffuserBlock
|
648 |
-
else:
|
649 |
-
block_cls = UTransformerBlock
|
650 |
-
self.transformer_in_blocks = nn.ModuleList(
|
651 |
-
[
|
652 |
-
block_cls(
|
653 |
-
inner_dim,
|
654 |
-
num_attention_heads,
|
655 |
-
attention_head_dim,
|
656 |
-
dropout=dropout,
|
657 |
-
cross_attention_dim=cross_attention_dim,
|
658 |
-
activation_fn=activation_fn,
|
659 |
-
num_embeds_ada_norm=num_embeds_ada_norm,
|
660 |
-
attention_bias=attention_bias,
|
661 |
-
only_cross_attention=only_cross_attention,
|
662 |
-
upcast_attention=upcast_attention,
|
663 |
-
norm_type=norm_type,
|
664 |
-
pre_layer_norm=pre_layer_norm,
|
665 |
-
norm_elementwise_affine=norm_elementwise_affine,
|
666 |
-
final_dropout=ff_final_dropout,
|
667 |
-
)
|
668 |
-
for d in range(num_layers // 2)
|
669 |
-
]
|
670 |
-
)
|
671 |
-
|
672 |
-
self.transformer_mid_block = block_cls(
|
673 |
-
inner_dim,
|
674 |
-
num_attention_heads,
|
675 |
-
attention_head_dim,
|
676 |
-
dropout=dropout,
|
677 |
-
cross_attention_dim=cross_attention_dim,
|
678 |
-
activation_fn=activation_fn,
|
679 |
-
num_embeds_ada_norm=num_embeds_ada_norm,
|
680 |
-
attention_bias=attention_bias,
|
681 |
-
only_cross_attention=only_cross_attention,
|
682 |
-
upcast_attention=upcast_attention,
|
683 |
-
norm_type=norm_type,
|
684 |
-
pre_layer_norm=pre_layer_norm,
|
685 |
-
norm_elementwise_affine=norm_elementwise_affine,
|
686 |
-
final_dropout=ff_final_dropout,
|
687 |
-
)
|
688 |
-
|
689 |
-
# For each skip connection, we use a SkipBlock (concatenation + Linear + LayerNorm) to process the inputs
|
690 |
-
# before each transformer out_block.
|
691 |
-
self.transformer_out_blocks = nn.ModuleList(
|
692 |
-
[
|
693 |
-
nn.ModuleDict(
|
694 |
-
{
|
695 |
-
"skip": SkipBlock(
|
696 |
-
inner_dim,
|
697 |
-
),
|
698 |
-
"block": block_cls(
|
699 |
-
inner_dim,
|
700 |
-
num_attention_heads,
|
701 |
-
attention_head_dim,
|
702 |
-
dropout=dropout,
|
703 |
-
cross_attention_dim=cross_attention_dim,
|
704 |
-
activation_fn=activation_fn,
|
705 |
-
num_embeds_ada_norm=num_embeds_ada_norm,
|
706 |
-
attention_bias=attention_bias,
|
707 |
-
only_cross_attention=only_cross_attention,
|
708 |
-
upcast_attention=upcast_attention,
|
709 |
-
norm_type=norm_type,
|
710 |
-
pre_layer_norm=pre_layer_norm,
|
711 |
-
norm_elementwise_affine=norm_elementwise_affine,
|
712 |
-
final_dropout=ff_final_dropout,
|
713 |
-
),
|
714 |
-
}
|
715 |
-
)
|
716 |
-
for d in range(num_layers // 2)
|
717 |
-
]
|
718 |
-
)
|
719 |
-
|
720 |
-
# 4. Define output layers
|
721 |
-
self.out_channels = in_channels if out_channels is None else out_channels
|
722 |
-
|
723 |
-
# Following the UniDiffuser U-ViT implementation, we process the transformer output with
|
724 |
-
# a LayerNorm layer with per-element affine params
|
725 |
-
self.norm_out = nn.LayerNorm(inner_dim)
|
726 |
-
|
727 |
-
def forward(
|
728 |
-
self,
|
729 |
-
hidden_states,
|
730 |
-
encoder_hidden_states=None,
|
731 |
-
timestep=None,
|
732 |
-
class_labels=None,
|
733 |
-
cross_attention_kwargs=None,
|
734 |
-
return_dict: bool = True,
|
735 |
-
hidden_states_is_embedding: bool = False,
|
736 |
-
unpatchify: bool = True,
|
737 |
-
):
|
738 |
-
"""
|
739 |
-
Args:
|
740 |
-
hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`.
|
741 |
-
When continuous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input
|
742 |
-
hidden_states
|
743 |
-
encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):
|
744 |
-
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
|
745 |
-
self-attention.
|
746 |
-
timestep ( `torch.long`, *optional*):
|
747 |
-
Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step.
|
748 |
-
class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
|
749 |
-
Optional class labels to be applied as an embedding in AdaLayerZeroNorm. Used to indicate class labels
|
750 |
-
conditioning.
|
751 |
-
cross_attention_kwargs (*optional*):
|
752 |
-
Keyword arguments to supply to the cross attention layers, if used.
|
753 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
754 |
-
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
|
755 |
-
hidden_states_is_embedding (`bool`, *optional*, defaults to `False`):
|
756 |
-
Whether or not hidden_states is an embedding directly usable by the transformer. In this case we will
|
757 |
-
ignore input handling (e.g. continuous, vectorized, etc.) and directly feed hidden_states into the
|
758 |
-
transformer blocks.
|
759 |
-
unpatchify (`bool`, *optional*, defaults to `True`):
|
760 |
-
Whether to unpatchify the transformer output.
|
761 |
-
|
762 |
-
Returns:
|
763 |
-
[`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`:
|
764 |
-
[`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When
|
765 |
-
returning a tuple, the first element is the sample tensor.
|
766 |
-
"""
|
767 |
-
# 0. Check inputs
|
768 |
-
|
769 |
-
if not unpatchify and return_dict:
|
770 |
-
raise ValueError(
|
771 |
-
f"Cannot both define `unpatchify`: {unpatchify} and `return_dict`: {return_dict} since when"
|
772 |
-
f" `unpatchify` is {unpatchify} the returned output is of shape (batch_size, seq_len, hidden_dim)"
|
773 |
-
" rather than (batch_size, num_channels, height, width)."
|
774 |
-
)
|
775 |
-
|
776 |
-
# 1. Input
|
777 |
-
if not hidden_states_is_embedding:
|
778 |
-
hidden_states = self.pos_embed(hidden_states)
|
779 |
-
|
780 |
-
# 2. Blocks
|
781 |
-
|
782 |
-
# In ("downsample") blocks
|
783 |
-
skips = []
|
784 |
-
for in_block in self.transformer_in_blocks:
|
785 |
-
hidden_states = in_block(
|
786 |
-
hidden_states,
|
787 |
-
encoder_hidden_states=encoder_hidden_states,
|
788 |
-
timestep=timestep,
|
789 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
790 |
-
class_labels=class_labels,
|
791 |
-
)
|
792 |
-
skips.append(hidden_states)
|
793 |
-
|
794 |
-
# Mid block
|
795 |
-
hidden_states = self.transformer_mid_block(hidden_states)
|
796 |
-
|
797 |
-
# Out ("upsample") blocks
|
798 |
-
for out_block in self.transformer_out_blocks:
|
799 |
-
hidden_states = out_block["skip"](hidden_states, skips.pop())
|
800 |
-
hidden_states = out_block["block"](
|
801 |
-
hidden_states,
|
802 |
-
encoder_hidden_states=encoder_hidden_states,
|
803 |
-
timestep=timestep,
|
804 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
805 |
-
class_labels=class_labels,
|
806 |
-
)
|
807 |
-
|
808 |
-
# 3. Output
|
809 |
-
# Don't support AdaLayerNorm for now, so no conditioning/scale/shift logic
|
810 |
-
hidden_states = self.norm_out(hidden_states)
|
811 |
-
# hidden_states = self.proj_out(hidden_states)
|
812 |
-
|
813 |
-
if unpatchify:
|
814 |
-
# unpatchify
|
815 |
-
height = width = int(hidden_states.shape[1] ** 0.5)
|
816 |
-
hidden_states = hidden_states.reshape(
|
817 |
-
shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)
|
818 |
-
)
|
819 |
-
hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
|
820 |
-
output = hidden_states.reshape(
|
821 |
-
shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)
|
822 |
-
)
|
823 |
-
else:
|
824 |
-
output = hidden_states
|
825 |
-
|
826 |
-
if not return_dict:
|
827 |
-
return (output,)
|
828 |
-
|
829 |
-
return Transformer2DModelOutput(sample=output)
|
830 |
-
|
831 |
-
|
832 |
-
class UniDiffuserModel(ModelMixin, ConfigMixin):
|
833 |
-
"""
|
834 |
-
Transformer model for a image-text [UniDiffuser](https://arxiv.org/pdf/2303.06555.pdf) model. This is a
|
835 |
-
modification of [`UTransformer2DModel`] with input and output heads for the VAE-embedded latent image, the
|
836 |
-
CLIP-embedded image, and the CLIP-embedded prompt (see paper for more details).
|
837 |
-
|
838 |
-
Parameters:
|
839 |
-
text_dim (`int`): The hidden dimension of the CLIP text model used to embed images.
|
840 |
-
clip_img_dim (`int`): The hidden dimension of the CLIP vision model used to embed prompts.
|
841 |
-
num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
|
842 |
-
attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
|
843 |
-
in_channels (`int`, *optional*):
|
844 |
-
Pass if the input is continuous. The number of channels in the input.
|
845 |
-
out_channels (`int`, *optional*):
|
846 |
-
The number of output channels; if `None`, defaults to `in_channels`.
|
847 |
-
num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
|
848 |
-
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
849 |
-
norm_num_groups (`int`, *optional*, defaults to `32`):
|
850 |
-
The number of groups to use when performing Group Normalization.
|
851 |
-
cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use.
|
852 |
-
attention_bias (`bool`, *optional*):
|
853 |
-
Configure if the TransformerBlocks' attention should contain a bias parameter.
|
854 |
-
sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images.
|
855 |
-
Note that this is fixed at training time as it is used for learning a number of position embeddings. See
|
856 |
-
`ImagePositionalEmbeddings`.
|
857 |
-
num_vector_embeds (`int`, *optional*):
|
858 |
-
Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels.
|
859 |
-
Includes the class for the masked latent pixel.
|
860 |
-
patch_size (`int`, *optional*, defaults to 2):
|
861 |
-
The patch size to use in the patch embedding.
|
862 |
-
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
|
863 |
-
num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`.
|
864 |
-
The number of diffusion steps used during training. Note that this is fixed at training time as it is used
|
865 |
-
to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for
|
866 |
-
up to but not more than steps than `num_embeds_ada_norm`.
|
867 |
-
use_linear_projection (int, *optional*): TODO: Not used
|
868 |
-
only_cross_attention (`bool`, *optional*):
|
869 |
-
Whether to use only cross-attention layers. In this case two cross attention layers are used in each
|
870 |
-
transformer block.
|
871 |
-
upcast_attention (`bool`, *optional*):
|
872 |
-
Whether to upcast the query and key to float32 when performing the attention calculation.
|
873 |
-
norm_type (`str`, *optional*, defaults to `"layer_norm"`):
|
874 |
-
The Layer Normalization implementation to use. Defaults to `torch.nn.LayerNorm`.
|
875 |
-
block_type (`str`, *optional*, defaults to `"unidiffuser"`):
|
876 |
-
The transformer block implementation to use. If `"unidiffuser"`, has the LayerNorms on the residual
|
877 |
-
backbone of each transformer block; otherwise has them in the attention/feedforward branches (the standard
|
878 |
-
behavior in `diffusers`.)
|
879 |
-
pre_layer_norm (`bool`, *optional*):
|
880 |
-
Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"),
|
881 |
-
as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm
|
882 |
-
(`pre_layer_norm = False`).
|
883 |
-
norm_elementwise_affine (`bool`, *optional*):
|
884 |
-
Whether to use learnable per-element affine parameters during layer normalization.
|
885 |
-
use_patch_pos_embed (`bool`, *optional*):
|
886 |
-
Whether to use position embeddings inside the patch embedding layer (`PatchEmbed`).
|
887 |
-
ff_final_dropout (`bool`, *optional*):
|
888 |
-
Whether to use a final Dropout layer after the feedforward network.
|
889 |
-
use_data_type_embedding (`bool`, *optional*):
|
890 |
-
Whether to use a data type embedding. This is only relevant for UniDiffuser-v1 style models; UniDiffuser-v1
|
891 |
-
is continue-trained from UniDiffuser-v0 on non-publically-available data and accepts a `data_type`
|
892 |
-
argument, which can either be `1` to use the weights trained on non-publically-available data or `0`
|
893 |
-
otherwise. This argument is subsequently embedded by the data type embedding, if used.
|
894 |
-
"""
|
895 |
-
|
896 |
-
@register_to_config
|
897 |
-
def __init__(
|
898 |
-
self,
|
899 |
-
text_dim: int = 768,
|
900 |
-
clip_img_dim: int = 512,
|
901 |
-
num_text_tokens: int = 77,
|
902 |
-
num_attention_heads: int = 16,
|
903 |
-
attention_head_dim: int = 88,
|
904 |
-
in_channels: Optional[int] = None,
|
905 |
-
out_channels: Optional[int] = None,
|
906 |
-
num_layers: int = 1,
|
907 |
-
dropout: float = 0.0,
|
908 |
-
norm_num_groups: int = 32,
|
909 |
-
cross_attention_dim: Optional[int] = None,
|
910 |
-
attention_bias: bool = False,
|
911 |
-
sample_size: Optional[int] = None,
|
912 |
-
num_vector_embeds: Optional[int] = None,
|
913 |
-
patch_size: Optional[int] = None,
|
914 |
-
activation_fn: str = "geglu",
|
915 |
-
num_embeds_ada_norm: Optional[int] = None,
|
916 |
-
use_linear_projection: bool = False,
|
917 |
-
only_cross_attention: bool = False,
|
918 |
-
upcast_attention: bool = False,
|
919 |
-
norm_type: str = "layer_norm",
|
920 |
-
block_type: str = "unidiffuser",
|
921 |
-
pre_layer_norm: bool = False,
|
922 |
-
use_timestep_embedding=False,
|
923 |
-
norm_elementwise_affine: bool = True,
|
924 |
-
use_patch_pos_embed=False,
|
925 |
-
ff_final_dropout: bool = True,
|
926 |
-
use_data_type_embedding: bool = False,
|
927 |
-
):
|
928 |
-
super().__init__()
|
929 |
-
|
930 |
-
# 0. Handle dimensions
|
931 |
-
self.inner_dim = num_attention_heads * attention_head_dim
|
932 |
-
|
933 |
-
assert sample_size is not None, "UniDiffuserModel over patched input must provide sample_size"
|
934 |
-
self.sample_size = sample_size
|
935 |
-
self.in_channels = in_channels
|
936 |
-
self.out_channels = in_channels if out_channels is None else out_channels
|
937 |
-
|
938 |
-
self.patch_size = patch_size
|
939 |
-
# Assume image is square...
|
940 |
-
self.num_patches = (self.sample_size // patch_size) * (self.sample_size // patch_size)
|
941 |
-
|
942 |
-
# 1. Define input layers
|
943 |
-
# 1.1 Input layers for text and image input
|
944 |
-
# For now, only support patch input for VAE latent image input
|
945 |
-
self.vae_img_in = PatchEmbed(
|
946 |
-
height=sample_size,
|
947 |
-
width=sample_size,
|
948 |
-
patch_size=patch_size,
|
949 |
-
in_channels=in_channels,
|
950 |
-
embed_dim=self.inner_dim,
|
951 |
-
use_pos_embed=use_patch_pos_embed,
|
952 |
-
)
|
953 |
-
self.clip_img_in = nn.Linear(clip_img_dim, self.inner_dim)
|
954 |
-
self.text_in = nn.Linear(text_dim, self.inner_dim)
|
955 |
-
|
956 |
-
# 1.2. Timestep embeddings for t_img, t_text
|
957 |
-
self.timestep_img_proj = Timesteps(
|
958 |
-
self.inner_dim,
|
959 |
-
flip_sin_to_cos=True,
|
960 |
-
downscale_freq_shift=0,
|
961 |
-
)
|
962 |
-
self.timestep_img_embed = (
|
963 |
-
TimestepEmbedding(
|
964 |
-
self.inner_dim,
|
965 |
-
4 * self.inner_dim,
|
966 |
-
out_dim=self.inner_dim,
|
967 |
-
)
|
968 |
-
if use_timestep_embedding
|
969 |
-
else nn.Identity()
|
970 |
-
)
|
971 |
-
|
972 |
-
self.timestep_text_proj = Timesteps(
|
973 |
-
self.inner_dim,
|
974 |
-
flip_sin_to_cos=True,
|
975 |
-
downscale_freq_shift=0,
|
976 |
-
)
|
977 |
-
self.timestep_text_embed = (
|
978 |
-
TimestepEmbedding(
|
979 |
-
self.inner_dim,
|
980 |
-
4 * self.inner_dim,
|
981 |
-
out_dim=self.inner_dim,
|
982 |
-
)
|
983 |
-
if use_timestep_embedding
|
984 |
-
else nn.Identity()
|
985 |
-
)
|
986 |
-
|
987 |
-
# 1.3. Positional embedding
|
988 |
-
self.num_text_tokens = num_text_tokens
|
989 |
-
self.num_tokens = 1 + 1 + num_text_tokens + 1 + self.num_patches
|
990 |
-
self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, self.inner_dim))
|
991 |
-
self.pos_embed_drop = nn.Dropout(p=dropout)
|
992 |
-
trunc_normal_(self.pos_embed, std=0.02)
|
993 |
-
|
994 |
-
# 1.4. Handle data type token embeddings for UniDiffuser-V1, if necessary
|
995 |
-
self.use_data_type_embedding = use_data_type_embedding
|
996 |
-
if self.use_data_type_embedding:
|
997 |
-
self.data_type_token_embedding = nn.Embedding(2, self.inner_dim)
|
998 |
-
self.data_type_pos_embed_token = nn.Parameter(torch.zeros(1, 1, self.inner_dim))
|
999 |
-
|
1000 |
-
# 2. Define transformer blocks
|
1001 |
-
self.transformer = UTransformer2DModel(
|
1002 |
-
num_attention_heads=num_attention_heads,
|
1003 |
-
attention_head_dim=attention_head_dim,
|
1004 |
-
in_channels=in_channels,
|
1005 |
-
out_channels=out_channels,
|
1006 |
-
num_layers=num_layers,
|
1007 |
-
dropout=dropout,
|
1008 |
-
norm_num_groups=norm_num_groups,
|
1009 |
-
cross_attention_dim=cross_attention_dim,
|
1010 |
-
attention_bias=attention_bias,
|
1011 |
-
sample_size=sample_size,
|
1012 |
-
num_vector_embeds=num_vector_embeds,
|
1013 |
-
patch_size=patch_size,
|
1014 |
-
activation_fn=activation_fn,
|
1015 |
-
num_embeds_ada_norm=num_embeds_ada_norm,
|
1016 |
-
use_linear_projection=use_linear_projection,
|
1017 |
-
only_cross_attention=only_cross_attention,
|
1018 |
-
upcast_attention=upcast_attention,
|
1019 |
-
norm_type=norm_type,
|
1020 |
-
block_type=block_type,
|
1021 |
-
pre_layer_norm=pre_layer_norm,
|
1022 |
-
norm_elementwise_affine=norm_elementwise_affine,
|
1023 |
-
use_patch_pos_embed=use_patch_pos_embed,
|
1024 |
-
ff_final_dropout=ff_final_dropout,
|
1025 |
-
)
|
1026 |
-
|
1027 |
-
# 3. Define output layers
|
1028 |
-
patch_dim = (patch_size**2) * out_channels
|
1029 |
-
self.vae_img_out = nn.Linear(self.inner_dim, patch_dim)
|
1030 |
-
self.clip_img_out = nn.Linear(self.inner_dim, clip_img_dim)
|
1031 |
-
self.text_out = nn.Linear(self.inner_dim, text_dim)
|
1032 |
-
|
1033 |
-
@torch.jit.ignore
|
1034 |
-
def no_weight_decay(self):
|
1035 |
-
return {"pos_embed"}
|
1036 |
-
|
1037 |
-
def forward(
|
1038 |
-
self,
|
1039 |
-
latent_image_embeds: torch.FloatTensor,
|
1040 |
-
image_embeds: torch.FloatTensor,
|
1041 |
-
prompt_embeds: torch.FloatTensor,
|
1042 |
-
timestep_img: Union[torch.Tensor, float, int],
|
1043 |
-
timestep_text: Union[torch.Tensor, float, int],
|
1044 |
-
data_type: Optional[Union[torch.Tensor, float, int]] = 1,
|
1045 |
-
encoder_hidden_states=None,
|
1046 |
-
cross_attention_kwargs=None,
|
1047 |
-
):
|
1048 |
-
"""
|
1049 |
-
Args:
|
1050 |
-
latent_image_embeds (`torch.FloatTensor` of shape `(batch size, latent channels, height, width)`):
|
1051 |
-
Latent image representation from the VAE encoder.
|
1052 |
-
image_embeds (`torch.FloatTensor` of shape `(batch size, 1, clip_img_dim)`):
|
1053 |
-
CLIP-embedded image representation (unsqueezed in the first dimension).
|
1054 |
-
prompt_embeds (`torch.FloatTensor` of shape `(batch size, seq_len, text_dim)`):
|
1055 |
-
CLIP-embedded text representation.
|
1056 |
-
timestep_img (`torch.long` or `float` or `int`):
|
1057 |
-
Current denoising step for the image.
|
1058 |
-
timestep_text (`torch.long` or `float` or `int`):
|
1059 |
-
Current denoising step for the text.
|
1060 |
-
data_type: (`torch.int` or `float` or `int`, *optional*, defaults to `1`):
|
1061 |
-
Only used in UniDiffuser-v1-style models. Can be either `1`, to use weights trained on nonpublic data,
|
1062 |
-
or `0` otherwise.
|
1063 |
-
encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):
|
1064 |
-
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
|
1065 |
-
self-attention.
|
1066 |
-
cross_attention_kwargs (*optional*):
|
1067 |
-
Keyword arguments to supply to the cross attention layers, if used.
|
1068 |
-
|
1069 |
-
|
1070 |
-
Returns:
|
1071 |
-
`tuple`: Returns relevant parts of the model's noise prediction: the first element of the tuple is tbe VAE
|
1072 |
-
image embedding, the second element is the CLIP image embedding, and the third element is the CLIP text
|
1073 |
-
embedding.
|
1074 |
-
"""
|
1075 |
-
batch_size = latent_image_embeds.shape[0]
|
1076 |
-
|
1077 |
-
# 1. Input
|
1078 |
-
# 1.1. Map inputs to shape (B, N, inner_dim)
|
1079 |
-
vae_hidden_states = self.vae_img_in(latent_image_embeds)
|
1080 |
-
clip_hidden_states = self.clip_img_in(image_embeds)
|
1081 |
-
text_hidden_states = self.text_in(prompt_embeds)
|
1082 |
-
|
1083 |
-
num_text_tokens, num_img_tokens = text_hidden_states.size(1), vae_hidden_states.size(1)
|
1084 |
-
|
1085 |
-
# 1.2. Encode image timesteps to single token (B, 1, inner_dim)
|
1086 |
-
if not torch.is_tensor(timestep_img):
|
1087 |
-
timestep_img = torch.tensor([timestep_img], dtype=torch.long, device=vae_hidden_states.device)
|
1088 |
-
|
1089 |
-
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
1090 |
-
timestep_img = timestep_img * torch.ones(batch_size, dtype=timestep_img.dtype, device=timestep_img.device)
|
1091 |
-
|
1092 |
-
timestep_img_token = self.timestep_img_proj(timestep_img)
|
1093 |
-
# t_img_token does not contain any weights and will always return f32 tensors
|
1094 |
-
# but time_embedding might be fp16, so we need to cast here.
|
1095 |
-
timestep_img_token = timestep_img_token.to(dtype=self.dtype)
|
1096 |
-
timestep_img_token = self.timestep_img_embed(timestep_img_token)
|
1097 |
-
timestep_img_token = timestep_img_token.unsqueeze(dim=1)
|
1098 |
-
|
1099 |
-
# 1.3. Encode text timesteps to single token (B, 1, inner_dim)
|
1100 |
-
if not torch.is_tensor(timestep_text):
|
1101 |
-
timestep_text = torch.tensor([timestep_text], dtype=torch.long, device=vae_hidden_states.device)
|
1102 |
-
|
1103 |
-
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
1104 |
-
timestep_text = timestep_text * torch.ones(batch_size, dtype=timestep_text.dtype, device=timestep_text.device)
|
1105 |
-
|
1106 |
-
timestep_text_token = self.timestep_text_proj(timestep_text)
|
1107 |
-
# t_text_token does not contain any weights and will always return f32 tensors
|
1108 |
-
# but time_embedding might be fp16, so we need to cast here.
|
1109 |
-
timestep_text_token = timestep_text_token.to(dtype=self.dtype)
|
1110 |
-
timestep_text_token = self.timestep_text_embed(timestep_text_token)
|
1111 |
-
timestep_text_token = timestep_text_token.unsqueeze(dim=1)
|
1112 |
-
|
1113 |
-
# 1.4. Concatenate all of the embeddings together.
|
1114 |
-
if self.use_data_type_embedding:
|
1115 |
-
assert data_type is not None, "data_type must be supplied if the model uses a data type embedding"
|
1116 |
-
if not torch.is_tensor(data_type):
|
1117 |
-
data_type = torch.tensor([data_type], dtype=torch.int, device=vae_hidden_states.device)
|
1118 |
-
|
1119 |
-
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
1120 |
-
data_type = data_type * torch.ones(batch_size, dtype=data_type.dtype, device=data_type.device)
|
1121 |
-
|
1122 |
-
data_type_token = self.data_type_token_embedding(data_type).unsqueeze(dim=1)
|
1123 |
-
hidden_states = torch.cat(
|
1124 |
-
[
|
1125 |
-
timestep_img_token,
|
1126 |
-
timestep_text_token,
|
1127 |
-
data_type_token,
|
1128 |
-
text_hidden_states,
|
1129 |
-
clip_hidden_states,
|
1130 |
-
vae_hidden_states,
|
1131 |
-
],
|
1132 |
-
dim=1,
|
1133 |
-
)
|
1134 |
-
else:
|
1135 |
-
hidden_states = torch.cat(
|
1136 |
-
[timestep_img_token, timestep_text_token, text_hidden_states, clip_hidden_states, vae_hidden_states],
|
1137 |
-
dim=1,
|
1138 |
-
)
|
1139 |
-
|
1140 |
-
# 1.5. Prepare the positional embeddings and add to hidden states
|
1141 |
-
# Note: I think img_vae should always have the proper shape, so there's no need to interpolate
|
1142 |
-
# the position embeddings.
|
1143 |
-
if self.use_data_type_embedding:
|
1144 |
-
pos_embed = torch.cat(
|
1145 |
-
[self.pos_embed[:, : 1 + 1, :], self.data_type_pos_embed_token, self.pos_embed[:, 1 + 1 :, :]], dim=1
|
1146 |
-
)
|
1147 |
-
else:
|
1148 |
-
pos_embed = self.pos_embed
|
1149 |
-
hidden_states = hidden_states + pos_embed
|
1150 |
-
hidden_states = self.pos_embed_drop(hidden_states)
|
1151 |
-
|
1152 |
-
# 2. Blocks
|
1153 |
-
hidden_states = self.transformer(
|
1154 |
-
hidden_states,
|
1155 |
-
encoder_hidden_states=encoder_hidden_states,
|
1156 |
-
timestep=None,
|
1157 |
-
class_labels=None,
|
1158 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
1159 |
-
return_dict=False,
|
1160 |
-
hidden_states_is_embedding=True,
|
1161 |
-
unpatchify=False,
|
1162 |
-
)[0]
|
1163 |
-
|
1164 |
-
# 3. Output
|
1165 |
-
# Split out the predicted noise representation.
|
1166 |
-
if self.use_data_type_embedding:
|
1167 |
-
(
|
1168 |
-
t_img_token_out,
|
1169 |
-
t_text_token_out,
|
1170 |
-
data_type_token_out,
|
1171 |
-
text_out,
|
1172 |
-
img_clip_out,
|
1173 |
-
img_vae_out,
|
1174 |
-
) = hidden_states.split((1, 1, 1, num_text_tokens, 1, num_img_tokens), dim=1)
|
1175 |
-
else:
|
1176 |
-
t_img_token_out, t_text_token_out, text_out, img_clip_out, img_vae_out = hidden_states.split(
|
1177 |
-
(1, 1, num_text_tokens, 1, num_img_tokens), dim=1
|
1178 |
-
)
|
1179 |
-
|
1180 |
-
img_vae_out = self.vae_img_out(img_vae_out)
|
1181 |
-
|
1182 |
-
# unpatchify
|
1183 |
-
height = width = int(img_vae_out.shape[1] ** 0.5)
|
1184 |
-
img_vae_out = img_vae_out.reshape(
|
1185 |
-
shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)
|
1186 |
-
)
|
1187 |
-
img_vae_out = torch.einsum("nhwpqc->nchpwq", img_vae_out)
|
1188 |
-
img_vae_out = img_vae_out.reshape(
|
1189 |
-
shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)
|
1190 |
-
)
|
1191 |
-
|
1192 |
-
img_clip_out = self.clip_img_out(img_clip_out)
|
1193 |
-
|
1194 |
-
text_out = self.text_out(text_out)
|
1195 |
-
|
1196 |
-
return img_vae_out, img_clip_out, text_out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/get_modified_files.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
|
17 |
-
# python ./utils/get_modified_files.py utils src tests examples
|
18 |
-
#
|
19 |
-
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
|
20 |
-
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
|
21 |
-
|
22 |
-
import re
|
23 |
-
import subprocess
|
24 |
-
import sys
|
25 |
-
|
26 |
-
|
27 |
-
fork_point_sha = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
|
28 |
-
modified_files = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split()
|
29 |
-
|
30 |
-
joined_dirs = "|".join(sys.argv[1:])
|
31 |
-
regex = re.compile(rf"^({joined_dirs}).*?\.py$")
|
32 |
-
|
33 |
-
relevant_modified_files = [x for x in modified_files if regex.match(x)]
|
34 |
-
print(" ".join(relevant_modified_files), end="")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
_base_ = '../faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
backbone=dict(
|
4 |
-
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
|
5 |
-
stage_with_dcn=(False, True, True, True)))
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
roi_head=dict(
|
4 |
-
bbox_roi_extractor=dict(
|
5 |
-
type='SingleRoIExtractor',
|
6 |
-
roi_layer=dict(
|
7 |
-
_delete_=True,
|
8 |
-
type='ModulatedDeformRoIPoolPack',
|
9 |
-
output_size=7,
|
10 |
-
output_channels=256),
|
11 |
-
out_channels=256,
|
12 |
-
featmap_strides=[4, 8, 16, 32])))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = './faster_rcnn_hrnetv2p_w40_1x_coco.py'
|
2 |
-
# learning policy
|
3 |
-
lr_config = dict(step=[16, 22])
|
4 |
-
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/yolo_head.py
DELETED
@@ -1,577 +0,0 @@
|
|
1 |
-
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
|
2 |
-
|
3 |
-
import warnings
|
4 |
-
|
5 |
-
import torch
|
6 |
-
import torch.nn as nn
|
7 |
-
import torch.nn.functional as F
|
8 |
-
from mmcv.cnn import ConvModule, normal_init
|
9 |
-
from mmcv.runner import force_fp32
|
10 |
-
|
11 |
-
from mmdet.core import (build_anchor_generator, build_assigner,
|
12 |
-
build_bbox_coder, build_sampler, images_to_levels,
|
13 |
-
multi_apply, multiclass_nms)
|
14 |
-
from ..builder import HEADS, build_loss
|
15 |
-
from .base_dense_head import BaseDenseHead
|
16 |
-
from .dense_test_mixins import BBoxTestMixin
|
17 |
-
|
18 |
-
|
19 |
-
@HEADS.register_module()
|
20 |
-
class YOLOV3Head(BaseDenseHead, BBoxTestMixin):
|
21 |
-
"""YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767.
|
22 |
-
|
23 |
-
Args:
|
24 |
-
num_classes (int): The number of object classes (w/o background)
|
25 |
-
in_channels (List[int]): Number of input channels per scale.
|
26 |
-
out_channels (List[int]): The number of output channels per scale
|
27 |
-
before the final 1x1 layer. Default: (1024, 512, 256).
|
28 |
-
anchor_generator (dict): Config dict for anchor generator
|
29 |
-
bbox_coder (dict): Config of bounding box coder.
|
30 |
-
featmap_strides (List[int]): The stride of each scale.
|
31 |
-
Should be in descending order. Default: (32, 16, 8).
|
32 |
-
one_hot_smoother (float): Set a non-zero value to enable label-smooth
|
33 |
-
Default: 0.
|
34 |
-
conv_cfg (dict): Config dict for convolution layer. Default: None.
|
35 |
-
norm_cfg (dict): Dictionary to construct and config norm layer.
|
36 |
-
Default: dict(type='BN', requires_grad=True)
|
37 |
-
act_cfg (dict): Config dict for activation layer.
|
38 |
-
Default: dict(type='LeakyReLU', negative_slope=0.1).
|
39 |
-
loss_cls (dict): Config of classification loss.
|
40 |
-
loss_conf (dict): Config of confidence loss.
|
41 |
-
loss_xy (dict): Config of xy coordinate loss.
|
42 |
-
loss_wh (dict): Config of wh coordinate loss.
|
43 |
-
train_cfg (dict): Training config of YOLOV3 head. Default: None.
|
44 |
-
test_cfg (dict): Testing config of YOLOV3 head. Default: None.
|
45 |
-
"""
|
46 |
-
|
47 |
-
def __init__(self,
|
48 |
-
num_classes,
|
49 |
-
in_channels,
|
50 |
-
out_channels=(1024, 512, 256),
|
51 |
-
anchor_generator=dict(
|
52 |
-
type='YOLOAnchorGenerator',
|
53 |
-
base_sizes=[[(116, 90), (156, 198), (373, 326)],
|
54 |
-
[(30, 61), (62, 45), (59, 119)],
|
55 |
-
[(10, 13), (16, 30), (33, 23)]],
|
56 |
-
strides=[32, 16, 8]),
|
57 |
-
bbox_coder=dict(type='YOLOBBoxCoder'),
|
58 |
-
featmap_strides=[32, 16, 8],
|
59 |
-
one_hot_smoother=0.,
|
60 |
-
conv_cfg=None,
|
61 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
62 |
-
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
|
63 |
-
loss_cls=dict(
|
64 |
-
type='CrossEntropyLoss',
|
65 |
-
use_sigmoid=True,
|
66 |
-
loss_weight=1.0),
|
67 |
-
loss_conf=dict(
|
68 |
-
type='CrossEntropyLoss',
|
69 |
-
use_sigmoid=True,
|
70 |
-
loss_weight=1.0),
|
71 |
-
loss_xy=dict(
|
72 |
-
type='CrossEntropyLoss',
|
73 |
-
use_sigmoid=True,
|
74 |
-
loss_weight=1.0),
|
75 |
-
loss_wh=dict(type='MSELoss', loss_weight=1.0),
|
76 |
-
train_cfg=None,
|
77 |
-
test_cfg=None):
|
78 |
-
super(YOLOV3Head, self).__init__()
|
79 |
-
# Check params
|
80 |
-
assert (len(in_channels) == len(out_channels) == len(featmap_strides))
|
81 |
-
|
82 |
-
self.num_classes = num_classes
|
83 |
-
self.in_channels = in_channels
|
84 |
-
self.out_channels = out_channels
|
85 |
-
self.featmap_strides = featmap_strides
|
86 |
-
self.train_cfg = train_cfg
|
87 |
-
self.test_cfg = test_cfg
|
88 |
-
if self.train_cfg:
|
89 |
-
self.assigner = build_assigner(self.train_cfg.assigner)
|
90 |
-
if hasattr(self.train_cfg, 'sampler'):
|
91 |
-
sampler_cfg = self.train_cfg.sampler
|
92 |
-
else:
|
93 |
-
sampler_cfg = dict(type='PseudoSampler')
|
94 |
-
self.sampler = build_sampler(sampler_cfg, context=self)
|
95 |
-
|
96 |
-
self.one_hot_smoother = one_hot_smoother
|
97 |
-
|
98 |
-
self.conv_cfg = conv_cfg
|
99 |
-
self.norm_cfg = norm_cfg
|
100 |
-
self.act_cfg = act_cfg
|
101 |
-
|
102 |
-
self.bbox_coder = build_bbox_coder(bbox_coder)
|
103 |
-
self.anchor_generator = build_anchor_generator(anchor_generator)
|
104 |
-
|
105 |
-
self.loss_cls = build_loss(loss_cls)
|
106 |
-
self.loss_conf = build_loss(loss_conf)
|
107 |
-
self.loss_xy = build_loss(loss_xy)
|
108 |
-
self.loss_wh = build_loss(loss_wh)
|
109 |
-
# usually the numbers of anchors for each level are the same
|
110 |
-
# except SSD detectors
|
111 |
-
self.num_anchors = self.anchor_generator.num_base_anchors[0]
|
112 |
-
assert len(
|
113 |
-
self.anchor_generator.num_base_anchors) == len(featmap_strides)
|
114 |
-
self._init_layers()
|
115 |
-
|
116 |
-
@property
|
117 |
-
def num_levels(self):
|
118 |
-
return len(self.featmap_strides)
|
119 |
-
|
120 |
-
@property
|
121 |
-
def num_attrib(self):
|
122 |
-
"""int: number of attributes in pred_map, bboxes (4) +
|
123 |
-
objectness (1) + num_classes"""
|
124 |
-
|
125 |
-
return 5 + self.num_classes
|
126 |
-
|
127 |
-
def _init_layers(self):
|
128 |
-
self.convs_bridge = nn.ModuleList()
|
129 |
-
self.convs_pred = nn.ModuleList()
|
130 |
-
for i in range(self.num_levels):
|
131 |
-
conv_bridge = ConvModule(
|
132 |
-
self.in_channels[i],
|
133 |
-
self.out_channels[i],
|
134 |
-
3,
|
135 |
-
padding=1,
|
136 |
-
conv_cfg=self.conv_cfg,
|
137 |
-
norm_cfg=self.norm_cfg,
|
138 |
-
act_cfg=self.act_cfg)
|
139 |
-
conv_pred = nn.Conv2d(self.out_channels[i],
|
140 |
-
self.num_anchors * self.num_attrib, 1)
|
141 |
-
|
142 |
-
self.convs_bridge.append(conv_bridge)
|
143 |
-
self.convs_pred.append(conv_pred)
|
144 |
-
|
145 |
-
def init_weights(self):
|
146 |
-
"""Initialize weights of the head."""
|
147 |
-
for m in self.convs_pred:
|
148 |
-
normal_init(m, std=0.01)
|
149 |
-
|
150 |
-
def forward(self, feats):
|
151 |
-
"""Forward features from the upstream network.
|
152 |
-
|
153 |
-
Args:
|
154 |
-
feats (tuple[Tensor]): Features from the upstream network, each is
|
155 |
-
a 4D-tensor.
|
156 |
-
|
157 |
-
Returns:
|
158 |
-
tuple[Tensor]: A tuple of multi-level predication map, each is a
|
159 |
-
4D-tensor of shape (batch_size, 5+num_classes, height, width).
|
160 |
-
"""
|
161 |
-
|
162 |
-
assert len(feats) == self.num_levels
|
163 |
-
pred_maps = []
|
164 |
-
for i in range(self.num_levels):
|
165 |
-
x = feats[i]
|
166 |
-
x = self.convs_bridge[i](x)
|
167 |
-
pred_map = self.convs_pred[i](x)
|
168 |
-
pred_maps.append(pred_map)
|
169 |
-
|
170 |
-
return tuple(pred_maps),
|
171 |
-
|
172 |
-
@force_fp32(apply_to=('pred_maps', ))
|
173 |
-
def get_bboxes(self,
|
174 |
-
pred_maps,
|
175 |
-
img_metas,
|
176 |
-
cfg=None,
|
177 |
-
rescale=False,
|
178 |
-
with_nms=True):
|
179 |
-
"""Transform network output for a batch into bbox predictions.
|
180 |
-
|
181 |
-
Args:
|
182 |
-
pred_maps (list[Tensor]): Raw predictions for a batch of images.
|
183 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
184 |
-
image size, scaling factor, etc.
|
185 |
-
cfg (mmcv.Config | None): Test / postprocessing configuration,
|
186 |
-
if None, test_cfg would be used. Default: None.
|
187 |
-
rescale (bool): If True, return boxes in original image space.
|
188 |
-
Default: False.
|
189 |
-
with_nms (bool): If True, do nms before return boxes.
|
190 |
-
Default: True.
|
191 |
-
|
192 |
-
Returns:
|
193 |
-
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
|
194 |
-
The first item is an (n, 5) tensor, where 5 represent
|
195 |
-
(tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
|
196 |
-
The shape of the second tensor in the tuple is (n,), and
|
197 |
-
each element represents the class label of the corresponding
|
198 |
-
box.
|
199 |
-
"""
|
200 |
-
num_levels = len(pred_maps)
|
201 |
-
pred_maps_list = [pred_maps[i].detach() for i in range(num_levels)]
|
202 |
-
scale_factors = [
|
203 |
-
img_metas[i]['scale_factor']
|
204 |
-
for i in range(pred_maps_list[0].shape[0])
|
205 |
-
]
|
206 |
-
result_list = self._get_bboxes(pred_maps_list, scale_factors, cfg,
|
207 |
-
rescale, with_nms)
|
208 |
-
return result_list
|
209 |
-
|
210 |
-
def _get_bboxes(self,
|
211 |
-
pred_maps_list,
|
212 |
-
scale_factors,
|
213 |
-
cfg,
|
214 |
-
rescale=False,
|
215 |
-
with_nms=True):
|
216 |
-
"""Transform outputs for a single batch item into bbox predictions.
|
217 |
-
|
218 |
-
Args:
|
219 |
-
pred_maps_list (list[Tensor]): Prediction maps for different scales
|
220 |
-
of each single image in the batch.
|
221 |
-
scale_factors (list(ndarray)): Scale factor of the image arrange as
|
222 |
-
(w_scale, h_scale, w_scale, h_scale).
|
223 |
-
cfg (mmcv.Config | None): Test / postprocessing configuration,
|
224 |
-
if None, test_cfg would be used.
|
225 |
-
rescale (bool): If True, return boxes in original image space.
|
226 |
-
Default: False.
|
227 |
-
with_nms (bool): If True, do nms before return boxes.
|
228 |
-
Default: True.
|
229 |
-
|
230 |
-
Returns:
|
231 |
-
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
|
232 |
-
The first item is an (n, 5) tensor, where 5 represent
|
233 |
-
(tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
|
234 |
-
The shape of the second tensor in the tuple is (n,), and
|
235 |
-
each element represents the class label of the corresponding
|
236 |
-
box.
|
237 |
-
"""
|
238 |
-
cfg = self.test_cfg if cfg is None else cfg
|
239 |
-
assert len(pred_maps_list) == self.num_levels
|
240 |
-
|
241 |
-
device = pred_maps_list[0].device
|
242 |
-
batch_size = pred_maps_list[0].shape[0]
|
243 |
-
|
244 |
-
featmap_sizes = [
|
245 |
-
pred_maps_list[i].shape[-2:] for i in range(self.num_levels)
|
246 |
-
]
|
247 |
-
multi_lvl_anchors = self.anchor_generator.grid_anchors(
|
248 |
-
featmap_sizes, device)
|
249 |
-
# convert to tensor to keep tracing
|
250 |
-
nms_pre_tensor = torch.tensor(
|
251 |
-
cfg.get('nms_pre', -1), device=device, dtype=torch.long)
|
252 |
-
|
253 |
-
multi_lvl_bboxes = []
|
254 |
-
multi_lvl_cls_scores = []
|
255 |
-
multi_lvl_conf_scores = []
|
256 |
-
for i in range(self.num_levels):
|
257 |
-
# get some key info for current scale
|
258 |
-
pred_map = pred_maps_list[i]
|
259 |
-
stride = self.featmap_strides[i]
|
260 |
-
# (b,h, w, num_anchors*num_attrib) ->
|
261 |
-
# (b,h*w*num_anchors, num_attrib)
|
262 |
-
pred_map = pred_map.permute(0, 2, 3,
|
263 |
-
1).reshape(batch_size, -1,
|
264 |
-
self.num_attrib)
|
265 |
-
# Inplace operation like
|
266 |
-
# ```pred_map[..., :2] = \torch.sigmoid(pred_map[..., :2])```
|
267 |
-
# would create constant tensor when exporting to onnx
|
268 |
-
pred_map_conf = torch.sigmoid(pred_map[..., :2])
|
269 |
-
pred_map_rest = pred_map[..., 2:]
|
270 |
-
pred_map = torch.cat([pred_map_conf, pred_map_rest], dim=-1)
|
271 |
-
pred_map_boxes = pred_map[..., :4]
|
272 |
-
multi_lvl_anchor = multi_lvl_anchors[i]
|
273 |
-
multi_lvl_anchor = multi_lvl_anchor.expand_as(pred_map_boxes)
|
274 |
-
bbox_pred = self.bbox_coder.decode(multi_lvl_anchor,
|
275 |
-
pred_map_boxes, stride)
|
276 |
-
# conf and cls
|
277 |
-
conf_pred = torch.sigmoid(pred_map[..., 4])
|
278 |
-
cls_pred = torch.sigmoid(pred_map[..., 5:]).view(
|
279 |
-
batch_size, -1, self.num_classes) # Cls pred one-hot.
|
280 |
-
|
281 |
-
# Get top-k prediction
|
282 |
-
# Always keep topk op for dynamic input in onnx
|
283 |
-
if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export()
|
284 |
-
or conf_pred.shape[1] > nms_pre_tensor):
|
285 |
-
from torch import _shape_as_tensor
|
286 |
-
# keep shape as tensor and get k
|
287 |
-
num_anchor = _shape_as_tensor(conf_pred)[1].to(device)
|
288 |
-
nms_pre = torch.where(nms_pre_tensor < num_anchor,
|
289 |
-
nms_pre_tensor, num_anchor)
|
290 |
-
_, topk_inds = conf_pred.topk(nms_pre)
|
291 |
-
batch_inds = torch.arange(batch_size).view(
|
292 |
-
-1, 1).expand_as(topk_inds).long()
|
293 |
-
bbox_pred = bbox_pred[batch_inds, topk_inds, :]
|
294 |
-
cls_pred = cls_pred[batch_inds, topk_inds, :]
|
295 |
-
conf_pred = conf_pred[batch_inds, topk_inds]
|
296 |
-
|
297 |
-
# Save the result of current scale
|
298 |
-
multi_lvl_bboxes.append(bbox_pred)
|
299 |
-
multi_lvl_cls_scores.append(cls_pred)
|
300 |
-
multi_lvl_conf_scores.append(conf_pred)
|
301 |
-
|
302 |
-
# Merge the results of different scales together
|
303 |
-
batch_mlvl_bboxes = torch.cat(multi_lvl_bboxes, dim=1)
|
304 |
-
batch_mlvl_scores = torch.cat(multi_lvl_cls_scores, dim=1)
|
305 |
-
batch_mlvl_conf_scores = torch.cat(multi_lvl_conf_scores, dim=1)
|
306 |
-
|
307 |
-
# Set max number of box to be feed into nms in deployment
|
308 |
-
deploy_nms_pre = cfg.get('deploy_nms_pre', -1)
|
309 |
-
if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export():
|
310 |
-
_, topk_inds = batch_mlvl_conf_scores.topk(deploy_nms_pre)
|
311 |
-
batch_inds = torch.arange(batch_size).view(
|
312 |
-
-1, 1).expand_as(topk_inds).long()
|
313 |
-
batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds, :]
|
314 |
-
batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds, :]
|
315 |
-
batch_mlvl_conf_scores = batch_mlvl_conf_scores[batch_inds,
|
316 |
-
topk_inds]
|
317 |
-
|
318 |
-
if with_nms and (batch_mlvl_conf_scores.size(0) == 0):
|
319 |
-
return torch.zeros((0, 5)), torch.zeros((0, ))
|
320 |
-
|
321 |
-
if rescale:
|
322 |
-
batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
|
323 |
-
scale_factors).unsqueeze(1)
|
324 |
-
|
325 |
-
# In mmdet 2.x, the class_id for background is num_classes.
|
326 |
-
# i.e., the last column.
|
327 |
-
padding = batch_mlvl_scores.new_zeros(batch_size,
|
328 |
-
batch_mlvl_scores.shape[1], 1)
|
329 |
-
batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
|
330 |
-
|
331 |
-
# Support exporting to onnx without nms
|
332 |
-
if with_nms and cfg.get('nms', None) is not None:
|
333 |
-
det_results = []
|
334 |
-
for (mlvl_bboxes, mlvl_scores,
|
335 |
-
mlvl_conf_scores) in zip(batch_mlvl_bboxes, batch_mlvl_scores,
|
336 |
-
batch_mlvl_conf_scores):
|
337 |
-
# Filtering out all predictions with conf < conf_thr
|
338 |
-
conf_thr = cfg.get('conf_thr', -1)
|
339 |
-
if conf_thr > 0 and (not torch.onnx.is_in_onnx_export()):
|
340 |
-
# TensorRT not support NonZero
|
341 |
-
# add as_tuple=False for compatibility in Pytorch 1.6
|
342 |
-
# flatten would create a Reshape op with constant values,
|
343 |
-
# and raise RuntimeError when doing inference in ONNX
|
344 |
-
# Runtime with a different input image (#4221).
|
345 |
-
conf_inds = mlvl_conf_scores.ge(conf_thr).nonzero(
|
346 |
-
as_tuple=False).squeeze(1)
|
347 |
-
mlvl_bboxes = mlvl_bboxes[conf_inds, :]
|
348 |
-
mlvl_scores = mlvl_scores[conf_inds, :]
|
349 |
-
mlvl_conf_scores = mlvl_conf_scores[conf_inds]
|
350 |
-
|
351 |
-
det_bboxes, det_labels = multiclass_nms(
|
352 |
-
mlvl_bboxes,
|
353 |
-
mlvl_scores,
|
354 |
-
cfg.score_thr,
|
355 |
-
cfg.nms,
|
356 |
-
cfg.max_per_img,
|
357 |
-
score_factors=mlvl_conf_scores)
|
358 |
-
det_results.append(tuple([det_bboxes, det_labels]))
|
359 |
-
|
360 |
-
else:
|
361 |
-
det_results = [
|
362 |
-
tuple(mlvl_bs)
|
363 |
-
for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores,
|
364 |
-
batch_mlvl_conf_scores)
|
365 |
-
]
|
366 |
-
return det_results
|
367 |
-
|
368 |
-
@force_fp32(apply_to=('pred_maps', ))
|
369 |
-
def loss(self,
|
370 |
-
pred_maps,
|
371 |
-
gt_bboxes,
|
372 |
-
gt_labels,
|
373 |
-
img_metas,
|
374 |
-
gt_bboxes_ignore=None):
|
375 |
-
"""Compute loss of the head.
|
376 |
-
|
377 |
-
Args:
|
378 |
-
pred_maps (list[Tensor]): Prediction map for each scale level,
|
379 |
-
shape (N, num_anchors * num_attrib, H, W)
|
380 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
|
381 |
-
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
|
382 |
-
gt_labels (list[Tensor]): class indices corresponding to each box
|
383 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
384 |
-
image size, scaling factor, etc.
|
385 |
-
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
|
386 |
-
boxes can be ignored when computing the loss.
|
387 |
-
|
388 |
-
Returns:
|
389 |
-
dict[str, Tensor]: A dictionary of loss components.
|
390 |
-
"""
|
391 |
-
num_imgs = len(img_metas)
|
392 |
-
device = pred_maps[0][0].device
|
393 |
-
|
394 |
-
featmap_sizes = [
|
395 |
-
pred_maps[i].shape[-2:] for i in range(self.num_levels)
|
396 |
-
]
|
397 |
-
multi_level_anchors = self.anchor_generator.grid_anchors(
|
398 |
-
featmap_sizes, device)
|
399 |
-
anchor_list = [multi_level_anchors for _ in range(num_imgs)]
|
400 |
-
|
401 |
-
responsible_flag_list = []
|
402 |
-
for img_id in range(len(img_metas)):
|
403 |
-
responsible_flag_list.append(
|
404 |
-
self.anchor_generator.responsible_flags(
|
405 |
-
featmap_sizes, gt_bboxes[img_id], device))
|
406 |
-
|
407 |
-
target_maps_list, neg_maps_list = self.get_targets(
|
408 |
-
anchor_list, responsible_flag_list, gt_bboxes, gt_labels)
|
409 |
-
|
410 |
-
losses_cls, losses_conf, losses_xy, losses_wh = multi_apply(
|
411 |
-
self.loss_single, pred_maps, target_maps_list, neg_maps_list)
|
412 |
-
|
413 |
-
return dict(
|
414 |
-
loss_cls=losses_cls,
|
415 |
-
loss_conf=losses_conf,
|
416 |
-
loss_xy=losses_xy,
|
417 |
-
loss_wh=losses_wh)
|
418 |
-
|
419 |
-
def loss_single(self, pred_map, target_map, neg_map):
|
420 |
-
"""Compute loss of a single image from a batch.
|
421 |
-
|
422 |
-
Args:
|
423 |
-
pred_map (Tensor): Raw predictions for a single level.
|
424 |
-
target_map (Tensor): The Ground-Truth target for a single level.
|
425 |
-
neg_map (Tensor): The negative masks for a single level.
|
426 |
-
|
427 |
-
Returns:
|
428 |
-
tuple:
|
429 |
-
loss_cls (Tensor): Classification loss.
|
430 |
-
loss_conf (Tensor): Confidence loss.
|
431 |
-
loss_xy (Tensor): Regression loss of x, y coordinate.
|
432 |
-
loss_wh (Tensor): Regression loss of w, h coordinate.
|
433 |
-
"""
|
434 |
-
|
435 |
-
num_imgs = len(pred_map)
|
436 |
-
pred_map = pred_map.permute(0, 2, 3,
|
437 |
-
1).reshape(num_imgs, -1, self.num_attrib)
|
438 |
-
neg_mask = neg_map.float()
|
439 |
-
pos_mask = target_map[..., 4]
|
440 |
-
pos_and_neg_mask = neg_mask + pos_mask
|
441 |
-
pos_mask = pos_mask.unsqueeze(dim=-1)
|
442 |
-
if torch.max(pos_and_neg_mask) > 1.:
|
443 |
-
warnings.warn('There is overlap between pos and neg sample.')
|
444 |
-
pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.)
|
445 |
-
|
446 |
-
pred_xy = pred_map[..., :2]
|
447 |
-
pred_wh = pred_map[..., 2:4]
|
448 |
-
pred_conf = pred_map[..., 4]
|
449 |
-
pred_label = pred_map[..., 5:]
|
450 |
-
|
451 |
-
target_xy = target_map[..., :2]
|
452 |
-
target_wh = target_map[..., 2:4]
|
453 |
-
target_conf = target_map[..., 4]
|
454 |
-
target_label = target_map[..., 5:]
|
455 |
-
|
456 |
-
loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask)
|
457 |
-
loss_conf = self.loss_conf(
|
458 |
-
pred_conf, target_conf, weight=pos_and_neg_mask)
|
459 |
-
loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask)
|
460 |
-
loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask)
|
461 |
-
|
462 |
-
return loss_cls, loss_conf, loss_xy, loss_wh
|
463 |
-
|
464 |
-
def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list,
|
465 |
-
gt_labels_list):
|
466 |
-
"""Compute target maps for anchors in multiple images.
|
467 |
-
|
468 |
-
Args:
|
469 |
-
anchor_list (list[list[Tensor]]): Multi level anchors of each
|
470 |
-
image. The outer list indicates images, and the inner list
|
471 |
-
corresponds to feature levels of the image. Each element of
|
472 |
-
the inner list is a tensor of shape (num_total_anchors, 4).
|
473 |
-
responsible_flag_list (list[list[Tensor]]): Multi level responsible
|
474 |
-
flags of each image. Each element is a tensor of shape
|
475 |
-
(num_total_anchors, )
|
476 |
-
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
|
477 |
-
gt_labels_list (list[Tensor]): Ground truth labels of each box.
|
478 |
-
|
479 |
-
Returns:
|
480 |
-
tuple: Usually returns a tuple containing learning targets.
|
481 |
-
- target_map_list (list[Tensor]): Target map of each level.
|
482 |
-
- neg_map_list (list[Tensor]): Negative map of each level.
|
483 |
-
"""
|
484 |
-
num_imgs = len(anchor_list)
|
485 |
-
|
486 |
-
# anchor number of multi levels
|
487 |
-
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
|
488 |
-
|
489 |
-
results = multi_apply(self._get_targets_single, anchor_list,
|
490 |
-
responsible_flag_list, gt_bboxes_list,
|
491 |
-
gt_labels_list)
|
492 |
-
|
493 |
-
all_target_maps, all_neg_maps = results
|
494 |
-
assert num_imgs == len(all_target_maps) == len(all_neg_maps)
|
495 |
-
target_maps_list = images_to_levels(all_target_maps, num_level_anchors)
|
496 |
-
neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors)
|
497 |
-
|
498 |
-
return target_maps_list, neg_maps_list
|
499 |
-
|
500 |
-
def _get_targets_single(self, anchors, responsible_flags, gt_bboxes,
|
501 |
-
gt_labels):
|
502 |
-
"""Generate matching bounding box prior and converted GT.
|
503 |
-
|
504 |
-
Args:
|
505 |
-
anchors (list[Tensor]): Multi-level anchors of the image.
|
506 |
-
responsible_flags (list[Tensor]): Multi-level responsible flags of
|
507 |
-
anchors
|
508 |
-
gt_bboxes (Tensor): Ground truth bboxes of single image.
|
509 |
-
gt_labels (Tensor): Ground truth labels of single image.
|
510 |
-
|
511 |
-
Returns:
|
512 |
-
tuple:
|
513 |
-
target_map (Tensor): Predication target map of each
|
514 |
-
scale level, shape (num_total_anchors,
|
515 |
-
5+num_classes)
|
516 |
-
neg_map (Tensor): Negative map of each scale level,
|
517 |
-
shape (num_total_anchors,)
|
518 |
-
"""
|
519 |
-
|
520 |
-
anchor_strides = []
|
521 |
-
for i in range(len(anchors)):
|
522 |
-
anchor_strides.append(
|
523 |
-
torch.tensor(self.featmap_strides[i],
|
524 |
-
device=gt_bboxes.device).repeat(len(anchors[i])))
|
525 |
-
concat_anchors = torch.cat(anchors)
|
526 |
-
concat_responsible_flags = torch.cat(responsible_flags)
|
527 |
-
|
528 |
-
anchor_strides = torch.cat(anchor_strides)
|
529 |
-
assert len(anchor_strides) == len(concat_anchors) == \
|
530 |
-
len(concat_responsible_flags)
|
531 |
-
assign_result = self.assigner.assign(concat_anchors,
|
532 |
-
concat_responsible_flags,
|
533 |
-
gt_bboxes)
|
534 |
-
sampling_result = self.sampler.sample(assign_result, concat_anchors,
|
535 |
-
gt_bboxes)
|
536 |
-
|
537 |
-
target_map = concat_anchors.new_zeros(
|
538 |
-
concat_anchors.size(0), self.num_attrib)
|
539 |
-
|
540 |
-
target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode(
|
541 |
-
sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes,
|
542 |
-
anchor_strides[sampling_result.pos_inds])
|
543 |
-
|
544 |
-
target_map[sampling_result.pos_inds, 4] = 1
|
545 |
-
|
546 |
-
gt_labels_one_hot = F.one_hot(
|
547 |
-
gt_labels, num_classes=self.num_classes).float()
|
548 |
-
if self.one_hot_smoother != 0: # label smooth
|
549 |
-
gt_labels_one_hot = gt_labels_one_hot * (
|
550 |
-
1 - self.one_hot_smoother
|
551 |
-
) + self.one_hot_smoother / self.num_classes
|
552 |
-
target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[
|
553 |
-
sampling_result.pos_assigned_gt_inds]
|
554 |
-
|
555 |
-
neg_map = concat_anchors.new_zeros(
|
556 |
-
concat_anchors.size(0), dtype=torch.uint8)
|
557 |
-
neg_map[sampling_result.neg_inds] = 1
|
558 |
-
|
559 |
-
return target_map, neg_map
|
560 |
-
|
561 |
-
def aug_test(self, feats, img_metas, rescale=False):
|
562 |
-
"""Test function with test time augmentation.
|
563 |
-
|
564 |
-
Args:
|
565 |
-
feats (list[Tensor]): the outer list indicates test-time
|
566 |
-
augmentations and inner Tensor should have a shape NxCxHxW,
|
567 |
-
which contains features for all images in the batch.
|
568 |
-
img_metas (list[list[dict]]): the outer list indicates test-time
|
569 |
-
augs (multiscale, flip, etc.) and the inner list indicates
|
570 |
-
images in a batch. each dict has image information.
|
571 |
-
rescale (bool, optional): Whether to rescale the results.
|
572 |
-
Defaults to False.
|
573 |
-
|
574 |
-
Returns:
|
575 |
-
list[ndarray]: bbox results of each class
|
576 |
-
"""
|
577 |
-
return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/ade20k.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
|
4 |
-
]
|
5 |
-
model = dict(
|
6 |
-
decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
_base_ = './ocrnet_hr18_512x1024_40k_cityscapes.py'
|
2 |
-
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
3 |
-
model = dict(
|
4 |
-
pretrained='open-mmlab://msra/hrnetv2_w48',
|
5 |
-
backbone=dict(
|
6 |
-
extra=dict(
|
7 |
-
stage2=dict(num_channels=(48, 96)),
|
8 |
-
stage3=dict(num_channels=(48, 96, 192)),
|
9 |
-
stage4=dict(num_channels=(48, 96, 192, 384)))),
|
10 |
-
decode_head=[
|
11 |
-
dict(
|
12 |
-
type='FCNHead',
|
13 |
-
in_channels=[48, 96, 192, 384],
|
14 |
-
channels=sum([48, 96, 192, 384]),
|
15 |
-
input_transform='resize_concat',
|
16 |
-
in_index=(0, 1, 2, 3),
|
17 |
-
kernel_size=1,
|
18 |
-
num_convs=1,
|
19 |
-
norm_cfg=norm_cfg,
|
20 |
-
concat_input=False,
|
21 |
-
dropout_ratio=-1,
|
22 |
-
num_classes=19,
|
23 |
-
align_corners=False,
|
24 |
-
loss_decode=dict(
|
25 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
26 |
-
dict(
|
27 |
-
type='OCRHead',
|
28 |
-
in_channels=[48, 96, 192, 384],
|
29 |
-
channels=512,
|
30 |
-
ocr_channels=256,
|
31 |
-
input_transform='resize_concat',
|
32 |
-
in_index=(0, 1, 2, 3),
|
33 |
-
norm_cfg=norm_cfg,
|
34 |
-
dropout_ratio=-1,
|
35 |
-
num_classes=19,
|
36 |
-
align_corners=False,
|
37 |
-
loss_decode=dict(
|
38 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
|
39 |
-
])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './psanet_r50-d8_512x512_80k_ade20k.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/apis/inference.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
import matplotlib.pyplot as plt
|
2 |
-
import annotator.uniformer.mmcv as mmcv
|
3 |
-
import torch
|
4 |
-
from annotator.uniformer.mmcv.parallel import collate, scatter
|
5 |
-
from annotator.uniformer.mmcv.runner import load_checkpoint
|
6 |
-
|
7 |
-
from annotator.uniformer.mmseg.datasets.pipelines import Compose
|
8 |
-
from annotator.uniformer.mmseg.models import build_segmentor
|
9 |
-
|
10 |
-
|
11 |
-
def init_segmentor(config, checkpoint=None, device='cuda:0'):
|
12 |
-
"""Initialize a segmentor from config file.
|
13 |
-
|
14 |
-
Args:
|
15 |
-
config (str or :obj:`mmcv.Config`): Config file path or the config
|
16 |
-
object.
|
17 |
-
checkpoint (str, optional): Checkpoint path. If left as None, the model
|
18 |
-
will not load any weights.
|
19 |
-
device (str, optional) CPU/CUDA device option. Default 'cuda:0'.
|
20 |
-
Use 'cpu' for loading model on CPU.
|
21 |
-
Returns:
|
22 |
-
nn.Module: The constructed segmentor.
|
23 |
-
"""
|
24 |
-
if isinstance(config, str):
|
25 |
-
config = mmcv.Config.fromfile(config)
|
26 |
-
elif not isinstance(config, mmcv.Config):
|
27 |
-
raise TypeError('config must be a filename or Config object, '
|
28 |
-
'but got {}'.format(type(config)))
|
29 |
-
config.model.pretrained = None
|
30 |
-
config.model.train_cfg = None
|
31 |
-
model = build_segmentor(config.model, test_cfg=config.get('test_cfg'))
|
32 |
-
if checkpoint is not None:
|
33 |
-
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
|
34 |
-
model.CLASSES = checkpoint['meta']['CLASSES']
|
35 |
-
model.PALETTE = checkpoint['meta']['PALETTE']
|
36 |
-
model.cfg = config # save the config in the model for convenience
|
37 |
-
model.to(device)
|
38 |
-
model.eval()
|
39 |
-
return model
|
40 |
-
|
41 |
-
|
42 |
-
class LoadImage:
|
43 |
-
"""A simple pipeline to load image."""
|
44 |
-
|
45 |
-
def __call__(self, results):
|
46 |
-
"""Call function to load images into results.
|
47 |
-
|
48 |
-
Args:
|
49 |
-
results (dict): A result dict contains the file name
|
50 |
-
of the image to be read.
|
51 |
-
|
52 |
-
Returns:
|
53 |
-
dict: ``results`` will be returned containing loaded image.
|
54 |
-
"""
|
55 |
-
|
56 |
-
if isinstance(results['img'], str):
|
57 |
-
results['filename'] = results['img']
|
58 |
-
results['ori_filename'] = results['img']
|
59 |
-
else:
|
60 |
-
results['filename'] = None
|
61 |
-
results['ori_filename'] = None
|
62 |
-
img = mmcv.imread(results['img'])
|
63 |
-
results['img'] = img
|
64 |
-
results['img_shape'] = img.shape
|
65 |
-
results['ori_shape'] = img.shape
|
66 |
-
return results
|
67 |
-
|
68 |
-
|
69 |
-
def inference_segmentor(model, img):
|
70 |
-
"""Inference image(s) with the segmentor.
|
71 |
-
|
72 |
-
Args:
|
73 |
-
model (nn.Module): The loaded segmentor.
|
74 |
-
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
|
75 |
-
images.
|
76 |
-
|
77 |
-
Returns:
|
78 |
-
(list[Tensor]): The segmentation result.
|
79 |
-
"""
|
80 |
-
cfg = model.cfg
|
81 |
-
device = next(model.parameters()).device # model device
|
82 |
-
# build the data pipeline
|
83 |
-
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
|
84 |
-
test_pipeline = Compose(test_pipeline)
|
85 |
-
# prepare data
|
86 |
-
data = dict(img=img)
|
87 |
-
data = test_pipeline(data)
|
88 |
-
data = collate([data], samples_per_gpu=1)
|
89 |
-
if next(model.parameters()).is_cuda:
|
90 |
-
# scatter to specified GPU
|
91 |
-
data = scatter(data, [device])[0]
|
92 |
-
else:
|
93 |
-
data['img_metas'] = [i.data[0] for i in data['img_metas']]
|
94 |
-
|
95 |
-
# forward the model
|
96 |
-
with torch.no_grad():
|
97 |
-
result = model(return_loss=False, rescale=True, **data)
|
98 |
-
return result
|
99 |
-
|
100 |
-
|
101 |
-
def show_result_pyplot(model,
|
102 |
-
img,
|
103 |
-
result,
|
104 |
-
palette=None,
|
105 |
-
fig_size=(15, 10),
|
106 |
-
opacity=0.5,
|
107 |
-
title='',
|
108 |
-
block=True):
|
109 |
-
"""Visualize the segmentation results on the image.
|
110 |
-
|
111 |
-
Args:
|
112 |
-
model (nn.Module): The loaded segmentor.
|
113 |
-
img (str or np.ndarray): Image filename or loaded image.
|
114 |
-
result (list): The segmentation result.
|
115 |
-
palette (list[list[int]]] | None): The palette of segmentation
|
116 |
-
map. If None is given, random palette will be generated.
|
117 |
-
Default: None
|
118 |
-
fig_size (tuple): Figure size of the pyplot figure.
|
119 |
-
opacity(float): Opacity of painted segmentation map.
|
120 |
-
Default 0.5.
|
121 |
-
Must be in (0, 1] range.
|
122 |
-
title (str): The title of pyplot figure.
|
123 |
-
Default is ''.
|
124 |
-
block (bool): Whether to block the pyplot figure.
|
125 |
-
Default is True.
|
126 |
-
"""
|
127 |
-
if hasattr(model, 'module'):
|
128 |
-
model = model.module
|
129 |
-
img = model.show_result(
|
130 |
-
img, result, palette=palette, show=False, opacity=opacity)
|
131 |
-
# plt.figure(figsize=fig_size)
|
132 |
-
# plt.imshow(mmcv.bgr2rgb(img))
|
133 |
-
# plt.title(title)
|
134 |
-
# plt.tight_layout()
|
135 |
-
# plt.show(block=block)
|
136 |
-
return mmcv.bgr2rgb(img)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArcAhmedEssam/CLIP-Interrogator-2/share_btn.py
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
|
2 |
-
<path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
|
3 |
-
<path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
|
4 |
-
</svg>"""
|
5 |
-
|
6 |
-
loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
|
7 |
-
style="color: #ffffff;
|
8 |
-
"
|
9 |
-
xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
|
10 |
-
|
11 |
-
share_js = """async () => {
|
12 |
-
async function uploadFile(file){
|
13 |
-
const UPLOAD_URL = 'https://huggingface.co/uploads';
|
14 |
-
const response = await fetch(UPLOAD_URL, {
|
15 |
-
method: 'POST',
|
16 |
-
headers: {
|
17 |
-
'Content-Type': file.type,
|
18 |
-
'X-Requested-With': 'XMLHttpRequest',
|
19 |
-
},
|
20 |
-
body: file, /// <- File inherits from Blob
|
21 |
-
});
|
22 |
-
const url = await response.text();
|
23 |
-
return url;
|
24 |
-
}
|
25 |
-
async function getInputImgFile(imgEl){
|
26 |
-
const res = await fetch(imgEl.src);
|
27 |
-
const blob = await res.blob();
|
28 |
-
const imgId = Date.now() % 200;
|
29 |
-
const isPng = imgEl.src.startsWith(`data:image/png`);
|
30 |
-
if(isPng){
|
31 |
-
const fileName = `sd-perception-${{imgId}}.png`;
|
32 |
-
return new File([blob], fileName, { type: 'image/png' });
|
33 |
-
}else{
|
34 |
-
const fileName = `sd-perception-${{imgId}}.jpg`;
|
35 |
-
return new File([blob], fileName, { type: 'image/jpeg' });
|
36 |
-
}
|
37 |
-
}
|
38 |
-
const gradioEl = document.querySelector('body > gradio-app');
|
39 |
-
// const gradioEl = document.querySelector("gradio-app").shadowRoot;
|
40 |
-
const inputImgEl = gradioEl.querySelector('#input-img img');
|
41 |
-
const outputTxt = gradioEl.querySelector('#output-txt textarea').value;
|
42 |
-
let titleTxt = outputTxt;
|
43 |
-
if(titleTxt.length > 100){
|
44 |
-
titleTxt = titleTxt.slice(0, 100) + ' ...';
|
45 |
-
}
|
46 |
-
const shareBtnEl = gradioEl.querySelector('#share-btn');
|
47 |
-
const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
|
48 |
-
const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
|
49 |
-
if(!outputTxt){
|
50 |
-
return;
|
51 |
-
};
|
52 |
-
shareBtnEl.style.pointerEvents = 'none';
|
53 |
-
shareIconEl.style.display = 'none';
|
54 |
-
loadingIconEl.style.removeProperty('display');
|
55 |
-
const inputFile = await getInputImgFile(inputImgEl);
|
56 |
-
const urlInputImg = await uploadFile(inputFile);
|
57 |
-
const descriptionMd = `#### Input img:
|
58 |
-
<img src='${urlInputImg}' style='max-height: 350px;'>
|
59 |
-
#### Caption:
|
60 |
-
${outputTxt}`;
|
61 |
-
const params = new URLSearchParams({
|
62 |
-
title: titleTxt,
|
63 |
-
description: descriptionMd,
|
64 |
-
});
|
65 |
-
const paramsStr = params.toString();
|
66 |
-
window.open(`https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2/discussions/new?${paramsStr}`, '_blank');
|
67 |
-
shareBtnEl.style.removeProperty('pointer-events');
|
68 |
-
shareIconEl.style.removeProperty('display');
|
69 |
-
loadingIconEl.style.display = 'none';
|
70 |
-
}"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/install/wheel.py
DELETED
@@ -1,740 +0,0 @@
|
|
1 |
-
"""Support for installing and building the "wheel" binary package format.
|
2 |
-
"""
|
3 |
-
|
4 |
-
import collections
|
5 |
-
import compileall
|
6 |
-
import contextlib
|
7 |
-
import csv
|
8 |
-
import importlib
|
9 |
-
import logging
|
10 |
-
import os.path
|
11 |
-
import re
|
12 |
-
import shutil
|
13 |
-
import sys
|
14 |
-
import warnings
|
15 |
-
from base64 import urlsafe_b64encode
|
16 |
-
from email.message import Message
|
17 |
-
from itertools import chain, filterfalse, starmap
|
18 |
-
from typing import (
|
19 |
-
IO,
|
20 |
-
TYPE_CHECKING,
|
21 |
-
Any,
|
22 |
-
BinaryIO,
|
23 |
-
Callable,
|
24 |
-
Dict,
|
25 |
-
Generator,
|
26 |
-
Iterable,
|
27 |
-
Iterator,
|
28 |
-
List,
|
29 |
-
NewType,
|
30 |
-
Optional,
|
31 |
-
Sequence,
|
32 |
-
Set,
|
33 |
-
Tuple,
|
34 |
-
Union,
|
35 |
-
cast,
|
36 |
-
)
|
37 |
-
from zipfile import ZipFile, ZipInfo
|
38 |
-
|
39 |
-
from pip._vendor.distlib.scripts import ScriptMaker
|
40 |
-
from pip._vendor.distlib.util import get_export_entry
|
41 |
-
from pip._vendor.packaging.utils import canonicalize_name
|
42 |
-
|
43 |
-
from pip._internal.exceptions import InstallationError
|
44 |
-
from pip._internal.locations import get_major_minor_version
|
45 |
-
from pip._internal.metadata import (
|
46 |
-
BaseDistribution,
|
47 |
-
FilesystemWheel,
|
48 |
-
get_wheel_distribution,
|
49 |
-
)
|
50 |
-
from pip._internal.models.direct_url import DIRECT_URL_METADATA_NAME, DirectUrl
|
51 |
-
from pip._internal.models.scheme import SCHEME_KEYS, Scheme
|
52 |
-
from pip._internal.utils.filesystem import adjacent_tmp_file, replace
|
53 |
-
from pip._internal.utils.misc import captured_stdout, ensure_dir, hash_file, partition
|
54 |
-
from pip._internal.utils.unpacking import (
|
55 |
-
current_umask,
|
56 |
-
is_within_directory,
|
57 |
-
set_extracted_file_to_default_mode_plus_executable,
|
58 |
-
zip_item_is_executable,
|
59 |
-
)
|
60 |
-
from pip._internal.utils.wheel import parse_wheel
|
61 |
-
|
62 |
-
if TYPE_CHECKING:
|
63 |
-
from typing import Protocol
|
64 |
-
|
65 |
-
class File(Protocol):
|
66 |
-
src_record_path: "RecordPath"
|
67 |
-
dest_path: str
|
68 |
-
changed: bool
|
69 |
-
|
70 |
-
def save(self) -> None:
|
71 |
-
pass
|
72 |
-
|
73 |
-
|
74 |
-
logger = logging.getLogger(__name__)
|
75 |
-
|
76 |
-
RecordPath = NewType("RecordPath", str)
|
77 |
-
InstalledCSVRow = Tuple[RecordPath, str, Union[int, str]]
|
78 |
-
|
79 |
-
|
80 |
-
def rehash(path: str, blocksize: int = 1 << 20) -> Tuple[str, str]:
|
81 |
-
"""Return (encoded_digest, length) for path using hashlib.sha256()"""
|
82 |
-
h, length = hash_file(path, blocksize)
|
83 |
-
digest = "sha256=" + urlsafe_b64encode(h.digest()).decode("latin1").rstrip("=")
|
84 |
-
return (digest, str(length))
|
85 |
-
|
86 |
-
|
87 |
-
def csv_io_kwargs(mode: str) -> Dict[str, Any]:
|
88 |
-
"""Return keyword arguments to properly open a CSV file
|
89 |
-
in the given mode.
|
90 |
-
"""
|
91 |
-
return {"mode": mode, "newline": "", "encoding": "utf-8"}
|
92 |
-
|
93 |
-
|
94 |
-
def fix_script(path: str) -> bool:
|
95 |
-
"""Replace #!python with #!/path/to/python
|
96 |
-
Return True if file was changed.
|
97 |
-
"""
|
98 |
-
# XXX RECORD hashes will need to be updated
|
99 |
-
assert os.path.isfile(path)
|
100 |
-
|
101 |
-
with open(path, "rb") as script:
|
102 |
-
firstline = script.readline()
|
103 |
-
if not firstline.startswith(b"#!python"):
|
104 |
-
return False
|
105 |
-
exename = sys.executable.encode(sys.getfilesystemencoding())
|
106 |
-
firstline = b"#!" + exename + os.linesep.encode("ascii")
|
107 |
-
rest = script.read()
|
108 |
-
with open(path, "wb") as script:
|
109 |
-
script.write(firstline)
|
110 |
-
script.write(rest)
|
111 |
-
return True
|
112 |
-
|
113 |
-
|
114 |
-
def wheel_root_is_purelib(metadata: Message) -> bool:
|
115 |
-
return metadata.get("Root-Is-Purelib", "").lower() == "true"
|
116 |
-
|
117 |
-
|
118 |
-
def get_entrypoints(dist: BaseDistribution) -> Tuple[Dict[str, str], Dict[str, str]]:
|
119 |
-
console_scripts = {}
|
120 |
-
gui_scripts = {}
|
121 |
-
for entry_point in dist.iter_entry_points():
|
122 |
-
if entry_point.group == "console_scripts":
|
123 |
-
console_scripts[entry_point.name] = entry_point.value
|
124 |
-
elif entry_point.group == "gui_scripts":
|
125 |
-
gui_scripts[entry_point.name] = entry_point.value
|
126 |
-
return console_scripts, gui_scripts
|
127 |
-
|
128 |
-
|
129 |
-
def message_about_scripts_not_on_PATH(scripts: Sequence[str]) -> Optional[str]:
|
130 |
-
"""Determine if any scripts are not on PATH and format a warning.
|
131 |
-
Returns a warning message if one or more scripts are not on PATH,
|
132 |
-
otherwise None.
|
133 |
-
"""
|
134 |
-
if not scripts:
|
135 |
-
return None
|
136 |
-
|
137 |
-
# Group scripts by the path they were installed in
|
138 |
-
grouped_by_dir: Dict[str, Set[str]] = collections.defaultdict(set)
|
139 |
-
for destfile in scripts:
|
140 |
-
parent_dir = os.path.dirname(destfile)
|
141 |
-
script_name = os.path.basename(destfile)
|
142 |
-
grouped_by_dir[parent_dir].add(script_name)
|
143 |
-
|
144 |
-
# We don't want to warn for directories that are on PATH.
|
145 |
-
not_warn_dirs = [
|
146 |
-
os.path.normcase(os.path.normpath(i)).rstrip(os.sep)
|
147 |
-
for i in os.environ.get("PATH", "").split(os.pathsep)
|
148 |
-
]
|
149 |
-
# If an executable sits with sys.executable, we don't warn for it.
|
150 |
-
# This covers the case of venv invocations without activating the venv.
|
151 |
-
not_warn_dirs.append(
|
152 |
-
os.path.normcase(os.path.normpath(os.path.dirname(sys.executable)))
|
153 |
-
)
|
154 |
-
warn_for: Dict[str, Set[str]] = {
|
155 |
-
parent_dir: scripts
|
156 |
-
for parent_dir, scripts in grouped_by_dir.items()
|
157 |
-
if os.path.normcase(os.path.normpath(parent_dir)) not in not_warn_dirs
|
158 |
-
}
|
159 |
-
if not warn_for:
|
160 |
-
return None
|
161 |
-
|
162 |
-
# Format a message
|
163 |
-
msg_lines = []
|
164 |
-
for parent_dir, dir_scripts in warn_for.items():
|
165 |
-
sorted_scripts: List[str] = sorted(dir_scripts)
|
166 |
-
if len(sorted_scripts) == 1:
|
167 |
-
start_text = "script {} is".format(sorted_scripts[0])
|
168 |
-
else:
|
169 |
-
start_text = "scripts {} are".format(
|
170 |
-
", ".join(sorted_scripts[:-1]) + " and " + sorted_scripts[-1]
|
171 |
-
)
|
172 |
-
|
173 |
-
msg_lines.append(
|
174 |
-
"The {} installed in '{}' which is not on PATH.".format(
|
175 |
-
start_text, parent_dir
|
176 |
-
)
|
177 |
-
)
|
178 |
-
|
179 |
-
last_line_fmt = (
|
180 |
-
"Consider adding {} to PATH or, if you prefer "
|
181 |
-
"to suppress this warning, use --no-warn-script-location."
|
182 |
-
)
|
183 |
-
if len(msg_lines) == 1:
|
184 |
-
msg_lines.append(last_line_fmt.format("this directory"))
|
185 |
-
else:
|
186 |
-
msg_lines.append(last_line_fmt.format("these directories"))
|
187 |
-
|
188 |
-
# Add a note if any directory starts with ~
|
189 |
-
warn_for_tilde = any(
|
190 |
-
i[0] == "~" for i in os.environ.get("PATH", "").split(os.pathsep) if i
|
191 |
-
)
|
192 |
-
if warn_for_tilde:
|
193 |
-
tilde_warning_msg = (
|
194 |
-
"NOTE: The current PATH contains path(s) starting with `~`, "
|
195 |
-
"which may not be expanded by all applications."
|
196 |
-
)
|
197 |
-
msg_lines.append(tilde_warning_msg)
|
198 |
-
|
199 |
-
# Returns the formatted multiline message
|
200 |
-
return "\n".join(msg_lines)
|
201 |
-
|
202 |
-
|
203 |
-
def _normalized_outrows(
|
204 |
-
outrows: Iterable[InstalledCSVRow],
|
205 |
-
) -> List[Tuple[str, str, str]]:
|
206 |
-
"""Normalize the given rows of a RECORD file.
|
207 |
-
|
208 |
-
Items in each row are converted into str. Rows are then sorted to make
|
209 |
-
the value more predictable for tests.
|
210 |
-
|
211 |
-
Each row is a 3-tuple (path, hash, size) and corresponds to a record of
|
212 |
-
a RECORD file (see PEP 376 and PEP 427 for details). For the rows
|
213 |
-
passed to this function, the size can be an integer as an int or string,
|
214 |
-
or the empty string.
|
215 |
-
"""
|
216 |
-
# Normally, there should only be one row per path, in which case the
|
217 |
-
# second and third elements don't come into play when sorting.
|
218 |
-
# However, in cases in the wild where a path might happen to occur twice,
|
219 |
-
# we don't want the sort operation to trigger an error (but still want
|
220 |
-
# determinism). Since the third element can be an int or string, we
|
221 |
-
# coerce each element to a string to avoid a TypeError in this case.
|
222 |
-
# For additional background, see--
|
223 |
-
# https://github.com/pypa/pip/issues/5868
|
224 |
-
return sorted(
|
225 |
-
(record_path, hash_, str(size)) for record_path, hash_, size in outrows
|
226 |
-
)
|
227 |
-
|
228 |
-
|
229 |
-
def _record_to_fs_path(record_path: RecordPath, lib_dir: str) -> str:
|
230 |
-
return os.path.join(lib_dir, record_path)
|
231 |
-
|
232 |
-
|
233 |
-
def _fs_to_record_path(path: str, lib_dir: str) -> RecordPath:
|
234 |
-
# On Windows, do not handle relative paths if they belong to different
|
235 |
-
# logical disks
|
236 |
-
if os.path.splitdrive(path)[0].lower() == os.path.splitdrive(lib_dir)[0].lower():
|
237 |
-
path = os.path.relpath(path, lib_dir)
|
238 |
-
|
239 |
-
path = path.replace(os.path.sep, "/")
|
240 |
-
return cast("RecordPath", path)
|
241 |
-
|
242 |
-
|
243 |
-
def get_csv_rows_for_installed(
|
244 |
-
old_csv_rows: List[List[str]],
|
245 |
-
installed: Dict[RecordPath, RecordPath],
|
246 |
-
changed: Set[RecordPath],
|
247 |
-
generated: List[str],
|
248 |
-
lib_dir: str,
|
249 |
-
) -> List[InstalledCSVRow]:
|
250 |
-
"""
|
251 |
-
:param installed: A map from archive RECORD path to installation RECORD
|
252 |
-
path.
|
253 |
-
"""
|
254 |
-
installed_rows: List[InstalledCSVRow] = []
|
255 |
-
for row in old_csv_rows:
|
256 |
-
if len(row) > 3:
|
257 |
-
logger.warning("RECORD line has more than three elements: %s", row)
|
258 |
-
old_record_path = cast("RecordPath", row[0])
|
259 |
-
new_record_path = installed.pop(old_record_path, old_record_path)
|
260 |
-
if new_record_path in changed:
|
261 |
-
digest, length = rehash(_record_to_fs_path(new_record_path, lib_dir))
|
262 |
-
else:
|
263 |
-
digest = row[1] if len(row) > 1 else ""
|
264 |
-
length = row[2] if len(row) > 2 else ""
|
265 |
-
installed_rows.append((new_record_path, digest, length))
|
266 |
-
for f in generated:
|
267 |
-
path = _fs_to_record_path(f, lib_dir)
|
268 |
-
digest, length = rehash(f)
|
269 |
-
installed_rows.append((path, digest, length))
|
270 |
-
for installed_record_path in installed.values():
|
271 |
-
installed_rows.append((installed_record_path, "", ""))
|
272 |
-
return installed_rows
|
273 |
-
|
274 |
-
|
275 |
-
def get_console_script_specs(console: Dict[str, str]) -> List[str]:
|
276 |
-
"""
|
277 |
-
Given the mapping from entrypoint name to callable, return the relevant
|
278 |
-
console script specs.
|
279 |
-
"""
|
280 |
-
# Don't mutate caller's version
|
281 |
-
console = console.copy()
|
282 |
-
|
283 |
-
scripts_to_generate = []
|
284 |
-
|
285 |
-
# Special case pip and setuptools to generate versioned wrappers
|
286 |
-
#
|
287 |
-
# The issue is that some projects (specifically, pip and setuptools) use
|
288 |
-
# code in setup.py to create "versioned" entry points - pip2.7 on Python
|
289 |
-
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
|
290 |
-
# the wheel metadata at build time, and so if the wheel is installed with
|
291 |
-
# a *different* version of Python the entry points will be wrong. The
|
292 |
-
# correct fix for this is to enhance the metadata to be able to describe
|
293 |
-
# such versioned entry points, but that won't happen till Metadata 2.0 is
|
294 |
-
# available.
|
295 |
-
# In the meantime, projects using versioned entry points will either have
|
296 |
-
# incorrect versioned entry points, or they will not be able to distribute
|
297 |
-
# "universal" wheels (i.e., they will need a wheel per Python version).
|
298 |
-
#
|
299 |
-
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
|
300 |
-
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
|
301 |
-
# override the versioned entry points in the wheel and generate the
|
302 |
-
# correct ones. This code is purely a short-term measure until Metadata 2.0
|
303 |
-
# is available.
|
304 |
-
#
|
305 |
-
# To add the level of hack in this section of code, in order to support
|
306 |
-
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
|
307 |
-
# variable which will control which version scripts get installed.
|
308 |
-
#
|
309 |
-
# ENSUREPIP_OPTIONS=altinstall
|
310 |
-
# - Only pipX.Y and easy_install-X.Y will be generated and installed
|
311 |
-
# ENSUREPIP_OPTIONS=install
|
312 |
-
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
|
313 |
-
# that this option is technically if ENSUREPIP_OPTIONS is set and is
|
314 |
-
# not altinstall
|
315 |
-
# DEFAULT
|
316 |
-
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
|
317 |
-
# and easy_install-X.Y.
|
318 |
-
pip_script = console.pop("pip", None)
|
319 |
-
if pip_script:
|
320 |
-
if "ENSUREPIP_OPTIONS" not in os.environ:
|
321 |
-
scripts_to_generate.append("pip = " + pip_script)
|
322 |
-
|
323 |
-
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
|
324 |
-
scripts_to_generate.append(
|
325 |
-
"pip{} = {}".format(sys.version_info[0], pip_script)
|
326 |
-
)
|
327 |
-
|
328 |
-
scripts_to_generate.append(f"pip{get_major_minor_version()} = {pip_script}")
|
329 |
-
# Delete any other versioned pip entry points
|
330 |
-
pip_ep = [k for k in console if re.match(r"pip(\d+(\.\d+)?)?$", k)]
|
331 |
-
for k in pip_ep:
|
332 |
-
del console[k]
|
333 |
-
easy_install_script = console.pop("easy_install", None)
|
334 |
-
if easy_install_script:
|
335 |
-
if "ENSUREPIP_OPTIONS" not in os.environ:
|
336 |
-
scripts_to_generate.append("easy_install = " + easy_install_script)
|
337 |
-
|
338 |
-
scripts_to_generate.append(
|
339 |
-
"easy_install-{} = {}".format(
|
340 |
-
get_major_minor_version(), easy_install_script
|
341 |
-
)
|
342 |
-
)
|
343 |
-
# Delete any other versioned easy_install entry points
|
344 |
-
easy_install_ep = [
|
345 |
-
k for k in console if re.match(r"easy_install(-\d+\.\d+)?$", k)
|
346 |
-
]
|
347 |
-
for k in easy_install_ep:
|
348 |
-
del console[k]
|
349 |
-
|
350 |
-
# Generate the console entry points specified in the wheel
|
351 |
-
scripts_to_generate.extend(starmap("{} = {}".format, console.items()))
|
352 |
-
|
353 |
-
return scripts_to_generate
|
354 |
-
|
355 |
-
|
356 |
-
class ZipBackedFile:
|
357 |
-
def __init__(
|
358 |
-
self, src_record_path: RecordPath, dest_path: str, zip_file: ZipFile
|
359 |
-
) -> None:
|
360 |
-
self.src_record_path = src_record_path
|
361 |
-
self.dest_path = dest_path
|
362 |
-
self._zip_file = zip_file
|
363 |
-
self.changed = False
|
364 |
-
|
365 |
-
def _getinfo(self) -> ZipInfo:
|
366 |
-
return self._zip_file.getinfo(self.src_record_path)
|
367 |
-
|
368 |
-
def save(self) -> None:
|
369 |
-
# directory creation is lazy and after file filtering
|
370 |
-
# to ensure we don't install empty dirs; empty dirs can't be
|
371 |
-
# uninstalled.
|
372 |
-
parent_dir = os.path.dirname(self.dest_path)
|
373 |
-
ensure_dir(parent_dir)
|
374 |
-
|
375 |
-
# When we open the output file below, any existing file is truncated
|
376 |
-
# before we start writing the new contents. This is fine in most
|
377 |
-
# cases, but can cause a segfault if pip has loaded a shared
|
378 |
-
# object (e.g. from pyopenssl through its vendored urllib3)
|
379 |
-
# Since the shared object is mmap'd an attempt to call a
|
380 |
-
# symbol in it will then cause a segfault. Unlinking the file
|
381 |
-
# allows writing of new contents while allowing the process to
|
382 |
-
# continue to use the old copy.
|
383 |
-
if os.path.exists(self.dest_path):
|
384 |
-
os.unlink(self.dest_path)
|
385 |
-
|
386 |
-
zipinfo = self._getinfo()
|
387 |
-
|
388 |
-
with self._zip_file.open(zipinfo) as f:
|
389 |
-
with open(self.dest_path, "wb") as dest:
|
390 |
-
shutil.copyfileobj(f, dest)
|
391 |
-
|
392 |
-
if zip_item_is_executable(zipinfo):
|
393 |
-
set_extracted_file_to_default_mode_plus_executable(self.dest_path)
|
394 |
-
|
395 |
-
|
396 |
-
class ScriptFile:
|
397 |
-
def __init__(self, file: "File") -> None:
|
398 |
-
self._file = file
|
399 |
-
self.src_record_path = self._file.src_record_path
|
400 |
-
self.dest_path = self._file.dest_path
|
401 |
-
self.changed = False
|
402 |
-
|
403 |
-
def save(self) -> None:
|
404 |
-
self._file.save()
|
405 |
-
self.changed = fix_script(self.dest_path)
|
406 |
-
|
407 |
-
|
408 |
-
class MissingCallableSuffix(InstallationError):
|
409 |
-
def __init__(self, entry_point: str) -> None:
|
410 |
-
super().__init__(
|
411 |
-
"Invalid script entry point: {} - A callable "
|
412 |
-
"suffix is required. Cf https://packaging.python.org/"
|
413 |
-
"specifications/entry-points/#use-for-scripts for more "
|
414 |
-
"information.".format(entry_point)
|
415 |
-
)
|
416 |
-
|
417 |
-
|
418 |
-
def _raise_for_invalid_entrypoint(specification: str) -> None:
|
419 |
-
entry = get_export_entry(specification)
|
420 |
-
if entry is not None and entry.suffix is None:
|
421 |
-
raise MissingCallableSuffix(str(entry))
|
422 |
-
|
423 |
-
|
424 |
-
class PipScriptMaker(ScriptMaker):
|
425 |
-
def make(
|
426 |
-
self, specification: str, options: Optional[Dict[str, Any]] = None
|
427 |
-
) -> List[str]:
|
428 |
-
_raise_for_invalid_entrypoint(specification)
|
429 |
-
return super().make(specification, options)
|
430 |
-
|
431 |
-
|
432 |
-
def _install_wheel(
|
433 |
-
name: str,
|
434 |
-
wheel_zip: ZipFile,
|
435 |
-
wheel_path: str,
|
436 |
-
scheme: Scheme,
|
437 |
-
pycompile: bool = True,
|
438 |
-
warn_script_location: bool = True,
|
439 |
-
direct_url: Optional[DirectUrl] = None,
|
440 |
-
requested: bool = False,
|
441 |
-
) -> None:
|
442 |
-
"""Install a wheel.
|
443 |
-
|
444 |
-
:param name: Name of the project to install
|
445 |
-
:param wheel_zip: open ZipFile for wheel being installed
|
446 |
-
:param scheme: Distutils scheme dictating the install directories
|
447 |
-
:param req_description: String used in place of the requirement, for
|
448 |
-
logging
|
449 |
-
:param pycompile: Whether to byte-compile installed Python files
|
450 |
-
:param warn_script_location: Whether to check that scripts are installed
|
451 |
-
into a directory on PATH
|
452 |
-
:raises UnsupportedWheel:
|
453 |
-
* when the directory holds an unpacked wheel with incompatible
|
454 |
-
Wheel-Version
|
455 |
-
* when the .dist-info dir does not match the wheel
|
456 |
-
"""
|
457 |
-
info_dir, metadata = parse_wheel(wheel_zip, name)
|
458 |
-
|
459 |
-
if wheel_root_is_purelib(metadata):
|
460 |
-
lib_dir = scheme.purelib
|
461 |
-
else:
|
462 |
-
lib_dir = scheme.platlib
|
463 |
-
|
464 |
-
# Record details of the files moved
|
465 |
-
# installed = files copied from the wheel to the destination
|
466 |
-
# changed = files changed while installing (scripts #! line typically)
|
467 |
-
# generated = files newly generated during the install (script wrappers)
|
468 |
-
installed: Dict[RecordPath, RecordPath] = {}
|
469 |
-
changed: Set[RecordPath] = set()
|
470 |
-
generated: List[str] = []
|
471 |
-
|
472 |
-
def record_installed(
|
473 |
-
srcfile: RecordPath, destfile: str, modified: bool = False
|
474 |
-
) -> None:
|
475 |
-
"""Map archive RECORD paths to installation RECORD paths."""
|
476 |
-
newpath = _fs_to_record_path(destfile, lib_dir)
|
477 |
-
installed[srcfile] = newpath
|
478 |
-
if modified:
|
479 |
-
changed.add(newpath)
|
480 |
-
|
481 |
-
def is_dir_path(path: RecordPath) -> bool:
|
482 |
-
return path.endswith("/")
|
483 |
-
|
484 |
-
def assert_no_path_traversal(dest_dir_path: str, target_path: str) -> None:
|
485 |
-
if not is_within_directory(dest_dir_path, target_path):
|
486 |
-
message = (
|
487 |
-
"The wheel {!r} has a file {!r} trying to install"
|
488 |
-
" outside the target directory {!r}"
|
489 |
-
)
|
490 |
-
raise InstallationError(
|
491 |
-
message.format(wheel_path, target_path, dest_dir_path)
|
492 |
-
)
|
493 |
-
|
494 |
-
def root_scheme_file_maker(
|
495 |
-
zip_file: ZipFile, dest: str
|
496 |
-
) -> Callable[[RecordPath], "File"]:
|
497 |
-
def make_root_scheme_file(record_path: RecordPath) -> "File":
|
498 |
-
normed_path = os.path.normpath(record_path)
|
499 |
-
dest_path = os.path.join(dest, normed_path)
|
500 |
-
assert_no_path_traversal(dest, dest_path)
|
501 |
-
return ZipBackedFile(record_path, dest_path, zip_file)
|
502 |
-
|
503 |
-
return make_root_scheme_file
|
504 |
-
|
505 |
-
def data_scheme_file_maker(
|
506 |
-
zip_file: ZipFile, scheme: Scheme
|
507 |
-
) -> Callable[[RecordPath], "File"]:
|
508 |
-
scheme_paths = {key: getattr(scheme, key) for key in SCHEME_KEYS}
|
509 |
-
|
510 |
-
def make_data_scheme_file(record_path: RecordPath) -> "File":
|
511 |
-
normed_path = os.path.normpath(record_path)
|
512 |
-
try:
|
513 |
-
_, scheme_key, dest_subpath = normed_path.split(os.path.sep, 2)
|
514 |
-
except ValueError:
|
515 |
-
message = (
|
516 |
-
"Unexpected file in {}: {!r}. .data directory contents"
|
517 |
-
" should be named like: '<scheme key>/<path>'."
|
518 |
-
).format(wheel_path, record_path)
|
519 |
-
raise InstallationError(message)
|
520 |
-
|
521 |
-
try:
|
522 |
-
scheme_path = scheme_paths[scheme_key]
|
523 |
-
except KeyError:
|
524 |
-
valid_scheme_keys = ", ".join(sorted(scheme_paths))
|
525 |
-
message = (
|
526 |
-
"Unknown scheme key used in {}: {} (for file {!r}). .data"
|
527 |
-
" directory contents should be in subdirectories named"
|
528 |
-
" with a valid scheme key ({})"
|
529 |
-
).format(wheel_path, scheme_key, record_path, valid_scheme_keys)
|
530 |
-
raise InstallationError(message)
|
531 |
-
|
532 |
-
dest_path = os.path.join(scheme_path, dest_subpath)
|
533 |
-
assert_no_path_traversal(scheme_path, dest_path)
|
534 |
-
return ZipBackedFile(record_path, dest_path, zip_file)
|
535 |
-
|
536 |
-
return make_data_scheme_file
|
537 |
-
|
538 |
-
def is_data_scheme_path(path: RecordPath) -> bool:
|
539 |
-
return path.split("/", 1)[0].endswith(".data")
|
540 |
-
|
541 |
-
paths = cast(List[RecordPath], wheel_zip.namelist())
|
542 |
-
file_paths = filterfalse(is_dir_path, paths)
|
543 |
-
root_scheme_paths, data_scheme_paths = partition(is_data_scheme_path, file_paths)
|
544 |
-
|
545 |
-
make_root_scheme_file = root_scheme_file_maker(wheel_zip, lib_dir)
|
546 |
-
files: Iterator[File] = map(make_root_scheme_file, root_scheme_paths)
|
547 |
-
|
548 |
-
def is_script_scheme_path(path: RecordPath) -> bool:
|
549 |
-
parts = path.split("/", 2)
|
550 |
-
return len(parts) > 2 and parts[0].endswith(".data") and parts[1] == "scripts"
|
551 |
-
|
552 |
-
other_scheme_paths, script_scheme_paths = partition(
|
553 |
-
is_script_scheme_path, data_scheme_paths
|
554 |
-
)
|
555 |
-
|
556 |
-
make_data_scheme_file = data_scheme_file_maker(wheel_zip, scheme)
|
557 |
-
other_scheme_files = map(make_data_scheme_file, other_scheme_paths)
|
558 |
-
files = chain(files, other_scheme_files)
|
559 |
-
|
560 |
-
# Get the defined entry points
|
561 |
-
distribution = get_wheel_distribution(
|
562 |
-
FilesystemWheel(wheel_path),
|
563 |
-
canonicalize_name(name),
|
564 |
-
)
|
565 |
-
console, gui = get_entrypoints(distribution)
|
566 |
-
|
567 |
-
def is_entrypoint_wrapper(file: "File") -> bool:
|
568 |
-
# EP, EP.exe and EP-script.py are scripts generated for
|
569 |
-
# entry point EP by setuptools
|
570 |
-
path = file.dest_path
|
571 |
-
name = os.path.basename(path)
|
572 |
-
if name.lower().endswith(".exe"):
|
573 |
-
matchname = name[:-4]
|
574 |
-
elif name.lower().endswith("-script.py"):
|
575 |
-
matchname = name[:-10]
|
576 |
-
elif name.lower().endswith(".pya"):
|
577 |
-
matchname = name[:-4]
|
578 |
-
else:
|
579 |
-
matchname = name
|
580 |
-
# Ignore setuptools-generated scripts
|
581 |
-
return matchname in console or matchname in gui
|
582 |
-
|
583 |
-
script_scheme_files: Iterator[File] = map(
|
584 |
-
make_data_scheme_file, script_scheme_paths
|
585 |
-
)
|
586 |
-
script_scheme_files = filterfalse(is_entrypoint_wrapper, script_scheme_files)
|
587 |
-
script_scheme_files = map(ScriptFile, script_scheme_files)
|
588 |
-
files = chain(files, script_scheme_files)
|
589 |
-
|
590 |
-
for file in files:
|
591 |
-
file.save()
|
592 |
-
record_installed(file.src_record_path, file.dest_path, file.changed)
|
593 |
-
|
594 |
-
def pyc_source_file_paths() -> Generator[str, None, None]:
|
595 |
-
# We de-duplicate installation paths, since there can be overlap (e.g.
|
596 |
-
# file in .data maps to same location as file in wheel root).
|
597 |
-
# Sorting installation paths makes it easier to reproduce and debug
|
598 |
-
# issues related to permissions on existing files.
|
599 |
-
for installed_path in sorted(set(installed.values())):
|
600 |
-
full_installed_path = os.path.join(lib_dir, installed_path)
|
601 |
-
if not os.path.isfile(full_installed_path):
|
602 |
-
continue
|
603 |
-
if not full_installed_path.endswith(".py"):
|
604 |
-
continue
|
605 |
-
yield full_installed_path
|
606 |
-
|
607 |
-
def pyc_output_path(path: str) -> str:
|
608 |
-
"""Return the path the pyc file would have been written to."""
|
609 |
-
return importlib.util.cache_from_source(path)
|
610 |
-
|
611 |
-
# Compile all of the pyc files for the installed files
|
612 |
-
if pycompile:
|
613 |
-
with captured_stdout() as stdout:
|
614 |
-
with warnings.catch_warnings():
|
615 |
-
warnings.filterwarnings("ignore")
|
616 |
-
for path in pyc_source_file_paths():
|
617 |
-
success = compileall.compile_file(path, force=True, quiet=True)
|
618 |
-
if success:
|
619 |
-
pyc_path = pyc_output_path(path)
|
620 |
-
assert os.path.exists(pyc_path)
|
621 |
-
pyc_record_path = cast(
|
622 |
-
"RecordPath", pyc_path.replace(os.path.sep, "/")
|
623 |
-
)
|
624 |
-
record_installed(pyc_record_path, pyc_path)
|
625 |
-
logger.debug(stdout.getvalue())
|
626 |
-
|
627 |
-
maker = PipScriptMaker(None, scheme.scripts)
|
628 |
-
|
629 |
-
# Ensure old scripts are overwritten.
|
630 |
-
# See https://github.com/pypa/pip/issues/1800
|
631 |
-
maker.clobber = True
|
632 |
-
|
633 |
-
# Ensure we don't generate any variants for scripts because this is almost
|
634 |
-
# never what somebody wants.
|
635 |
-
# See https://bitbucket.org/pypa/distlib/issue/35/
|
636 |
-
maker.variants = {""}
|
637 |
-
|
638 |
-
# This is required because otherwise distlib creates scripts that are not
|
639 |
-
# executable.
|
640 |
-
# See https://bitbucket.org/pypa/distlib/issue/32/
|
641 |
-
maker.set_mode = True
|
642 |
-
|
643 |
-
# Generate the console and GUI entry points specified in the wheel
|
644 |
-
scripts_to_generate = get_console_script_specs(console)
|
645 |
-
|
646 |
-
gui_scripts_to_generate = list(starmap("{} = {}".format, gui.items()))
|
647 |
-
|
648 |
-
generated_console_scripts = maker.make_multiple(scripts_to_generate)
|
649 |
-
generated.extend(generated_console_scripts)
|
650 |
-
|
651 |
-
generated.extend(maker.make_multiple(gui_scripts_to_generate, {"gui": True}))
|
652 |
-
|
653 |
-
if warn_script_location:
|
654 |
-
msg = message_about_scripts_not_on_PATH(generated_console_scripts)
|
655 |
-
if msg is not None:
|
656 |
-
logger.warning(msg)
|
657 |
-
|
658 |
-
generated_file_mode = 0o666 & ~current_umask()
|
659 |
-
|
660 |
-
@contextlib.contextmanager
|
661 |
-
def _generate_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]:
|
662 |
-
with adjacent_tmp_file(path, **kwargs) as f:
|
663 |
-
yield f
|
664 |
-
os.chmod(f.name, generated_file_mode)
|
665 |
-
replace(f.name, path)
|
666 |
-
|
667 |
-
dest_info_dir = os.path.join(lib_dir, info_dir)
|
668 |
-
|
669 |
-
# Record pip as the installer
|
670 |
-
installer_path = os.path.join(dest_info_dir, "INSTALLER")
|
671 |
-
with _generate_file(installer_path) as installer_file:
|
672 |
-
installer_file.write(b"pip\n")
|
673 |
-
generated.append(installer_path)
|
674 |
-
|
675 |
-
# Record the PEP 610 direct URL reference
|
676 |
-
if direct_url is not None:
|
677 |
-
direct_url_path = os.path.join(dest_info_dir, DIRECT_URL_METADATA_NAME)
|
678 |
-
with _generate_file(direct_url_path) as direct_url_file:
|
679 |
-
direct_url_file.write(direct_url.to_json().encode("utf-8"))
|
680 |
-
generated.append(direct_url_path)
|
681 |
-
|
682 |
-
# Record the REQUESTED file
|
683 |
-
if requested:
|
684 |
-
requested_path = os.path.join(dest_info_dir, "REQUESTED")
|
685 |
-
with open(requested_path, "wb"):
|
686 |
-
pass
|
687 |
-
generated.append(requested_path)
|
688 |
-
|
689 |
-
record_text = distribution.read_text("RECORD")
|
690 |
-
record_rows = list(csv.reader(record_text.splitlines()))
|
691 |
-
|
692 |
-
rows = get_csv_rows_for_installed(
|
693 |
-
record_rows,
|
694 |
-
installed=installed,
|
695 |
-
changed=changed,
|
696 |
-
generated=generated,
|
697 |
-
lib_dir=lib_dir,
|
698 |
-
)
|
699 |
-
|
700 |
-
# Record details of all files installed
|
701 |
-
record_path = os.path.join(dest_info_dir, "RECORD")
|
702 |
-
|
703 |
-
with _generate_file(record_path, **csv_io_kwargs("w")) as record_file:
|
704 |
-
# Explicitly cast to typing.IO[str] as a workaround for the mypy error:
|
705 |
-
# "writer" has incompatible type "BinaryIO"; expected "_Writer"
|
706 |
-
writer = csv.writer(cast("IO[str]", record_file))
|
707 |
-
writer.writerows(_normalized_outrows(rows))
|
708 |
-
|
709 |
-
|
710 |
-
@contextlib.contextmanager
|
711 |
-
def req_error_context(req_description: str) -> Generator[None, None, None]:
|
712 |
-
try:
|
713 |
-
yield
|
714 |
-
except InstallationError as e:
|
715 |
-
message = "For req: {}. {}".format(req_description, e.args[0])
|
716 |
-
raise InstallationError(message) from e
|
717 |
-
|
718 |
-
|
719 |
-
def install_wheel(
|
720 |
-
name: str,
|
721 |
-
wheel_path: str,
|
722 |
-
scheme: Scheme,
|
723 |
-
req_description: str,
|
724 |
-
pycompile: bool = True,
|
725 |
-
warn_script_location: bool = True,
|
726 |
-
direct_url: Optional[DirectUrl] = None,
|
727 |
-
requested: bool = False,
|
728 |
-
) -> None:
|
729 |
-
with ZipFile(wheel_path, allowZip64=True) as z:
|
730 |
-
with req_error_context(req_description):
|
731 |
-
_install_wheel(
|
732 |
-
name=name,
|
733 |
-
wheel_zip=z,
|
734 |
-
wheel_path=wheel_path,
|
735 |
-
scheme=scheme,
|
736 |
-
pycompile=pycompile,
|
737 |
-
warn_script_location=warn_script_location,
|
738 |
-
direct_url=direct_url,
|
739 |
-
requested=requested,
|
740 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Avarampoo Descarga De Pelculas Pelculas.md
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Avarampoo Movie Download Moviesda: Cómo ver en línea gratis</h1>
|
3 |
-
<p>¿Eres un fan de las películas tamiles y buscas una manera de ver la película de Avarampoo en línea gratis? Si es así, entonces has venido al lugar correcto. En este artículo, le diremos todo lo que necesita saber sobre la película de Avarampoo, el sitio web de Moviesda y cómo descargar o transmitir la película de Avarampoo en línea de forma gratuita. Así que, sin más preámbulos, empecemos. </p>
|
4 |
-
<h2>avarampoo descarga de películas películas</h2><br /><p><b><b>DOWNLOAD</b> ★★★★★ <a href="https://bltlly.com/2v6KoS">https://bltlly.com/2v6KoS</a></b></p><br /><br />
|
5 |
-
<h2>Introducción</h2>
|
6 |
-
<h3>¿Qué es la película de Avarampoo? </h3>
|
7 |
-
<p>Avarampoo (transl. Avaram senna) es una película de 1992 dirigida por Bharathan y producida por Keyaar. Es un remake de la película malayalam de 1980 Thakara, y protagonizada por Vineeth, Nandhini y Nassar en los papeles principales. La película fue lanzada el 5 de junio de 1992. </p>
|
8 |
-
<p>La trama de la película gira en torno a Sakkarai, un huérfano con problemas mentales que trabaja para el jefe de la aldea Thevar. Se enamora de la hija de su amo, Thamarai, pero se enfrenta a la oposición de Thevar y su segunda esposa. La película explora los temas de la inocencia, el amor, la traición y la injusticia social. </p>
|
9 |
-
<h3>¿Qué es el sitio web de Moviesda? </h3>
|
10 |
-
<p>Moviesda es un sitio web que ofrece una amplia gama de películas tamiles, incluyendo los últimos lanzamientos en alta calidad de definición. Su extenso catálogo y la posibilidad de acceder a las películas de forma gratuita ha dado a la plataforma una ventaja significativa en popularidad. Sin embargo, es importante tener en cuenta que Moviesda está asociado con la infracción de derechos de autor y la piratería. </p>
|
11 |
-
<p>Moviesda sube versiones piratas de películas en hindi, marathi, inglés, telugu, tamil y malayalam en su portal. Los usuarios pueden descargar o transmitir estas películas sin pagar ninguna cuota de suscripción o registro. Moviesda también ofrece varios formatos y calidades de películas como 300mb, 720p, 1080p, etc.</p>
|
12 |
-
<p></p>
|
13 |
-
<h3>¿Por qué es popular la película de Avarampoo? </h3>
|
14 |
-
<p>La película Avarampoo es popular entre los amantes del cine tamil por varias razones. Algunas de ellas son:</p>
|
15 |
-
<ul>
|
16 |
-
|
17 |
-
<li>La película cuenta con Vineeth, Nandhini y Nassar, que son actores muy conocidos en el cine tamil. Vineeth is known for his roles in films such as Pudhiya Mugam, May Madham, Kaadhal Desam, etc. Nandhini is known for her roles in films such as Chinna Thambi Periya Thambi, Kizhakku Vasal, etc. Nassar is known for his roles in films such as Nayakan, Roja, Bombay, etc.</li>
|
18 |
-
<li>La película tiene una historia conmovedora y realista que representa la difícil situación de un niño con discapacidad mental que se enamora de una chica de una casta superior y se enfrenta a la ira de su familia y la sociedad. </li>
|
19 |
-
<li>La película tiene una música melodiosa y conmovedora compuesta por Ilaiyaraaja, considerado uno de los mejores compositores del cine indio. Ha ganado cinco National Film Awards y seis Tamil Nadu State Film Awards por su música. </li>
|
20 |
-
<li>La película ha recibido críticas positivas de críticos y audiencias por igual. Ha sido elogiado por su dirección, actuaciones, música, cinematografía y guion. También ha sido seleccionado para varios festivales de cine y premios. </li>
|
21 |
-
</ul>
|
22 |
-
<h2>¿Cómo descargar la película de Avarampoo de Moviesda? </h2>
|
23 |
-
<p>Si desea descargar la película de Avarampoo desde Moviesda, debe seguir estos pasos:</p>
|
24 |
-
<h3>Paso 1: Instalar una VPN y conectarse a un servidor donde Moviesda es accesible</h3>
|
25 |
-
<p>Una VPN o una red privada virtual es un software que le permite acceder a sitios web bloqueados o restringidos cambiando su dirección IP y cifrando sus datos. Dado que Moviesda es un sitio web ilegal que puede estar prohibido o bloqueado en algunos países o regiones, debe usar una VPN para acceder a él de forma segura y anónima. </p>
|
26 |
-
|
27 |
-
<h3>Paso 2: Visita el sitio web de Moviesda y busca películas de Avarampoo</h3>
|
28 |
-
<p>Después de conectarse a un servidor VPN, debe visitar el sitio web de Moviesda usando su navegador. El sitio web puede tener diferentes nombres de dominio o extensiones debido a cambios frecuentes. Algunos de los posibles nombres de dominio son moviesda.com, moviesda.net, moviesda.in, etc. Puede utilizar un motor de búsqueda como Google o Bing para encontrar el último nombre de dominio de Moviesda.</p>
|
29 |
-
<p>Una vez que haya llegado al sitio web de Moviesda, debe buscar una película de Avarampoo usando la barra de búsqueda o las categorías. También puede navegar a través de las últimas subidas o la lista alfabética de películas. Puede encontrar múltiples resultados para la película de Avarampoo con diferentes fechas de lanzamiento, idiomas o versiones. Debe seleccionar el que coincida con su preferencia. </p>
|
30 |
-
<h3>Paso 3: Elija la calidad y el formato de la película y haga clic en el enlace de descarga</h3>
|
31 |
-
<p>Después de seleccionar la película de Avarampoo que desea descargar, debe elegir la calidad y el formato de la película que se adapte a su dispositivo y la velocidad de Internet. Moviesda ofrece varias calidades y formatos de películas como 300mb, 720p, 1080p, mp4, mkv, etc. También puede comprobar el tamaño del archivo y la duración de la película antes de descargarlo. </p>
|
32 |
-
<p>Una vez que haya elegido la calidad y el formato de la película, debe hacer clic en el enlace de descarga que lo redirigirá a otra página o sitio. Usted puede encontrar algunos anuncios emergentes o verificación de captcha en esta página o sitio. Necesitas cerrar los anuncios o completar la verificación para proceder con la descarga. </p>
|
33 |
-
<h3>Paso 4: Espere a que la descarga se complete y disfrute de la película sin conexión</h3>
|
34 |
-
<p>Después de hacer clic en el enlace de descarga, debe esperar a que se complete la descarga. La velocidad de descarga puede variar dependiendo de su conexión a Internet y la carga del servidor. Puede comprobar el progreso de la descarga en su navegador o gestor de descargas. </p>
|
35 |
-
|
36 |
-
<h2>¿Cómo ver la película de Avarampoo en línea gratis? </h2>
|
37 |
-
<p>Si no desea descargar la película de Avarampoo desde Moviesda o cualquier otro sitio web ilegal, puede verla en línea de forma gratuita utilizando algunas alternativas legales. Estas son algunas de las opciones que puedes probar:</p>
|
38 |
-
<h3>Opción 1: Utilice una plataforma de streaming que ofrece películas de Avarampoo</h3>
|
39 |
-
<p>Una plataforma de streaming es un sitio web o una aplicación que te permite ver películas y programas en línea sin descargarlos. Algunas de las plataformas de streaming son gratuitas, mientras que otras requieren una cuota de suscripción o registro. Algunas de las plataformas de streaming que ofrecen películas de Avarampoo son:</p>
|
40 |
-
<h4>Tamilyogi</h4>
|
41 |
-
<p>Tamilyogi es una plataforma de streaming gratuita que ofrece una gran colección de películas y programas tamiles. Puedes ver la película de Avarampoo en línea gratis en Tamilyogi sin ningún registro o suscripción. Sin embargo, es posible que tenga que lidiar con algunos anuncios molestos y ventanas emergentes mientras transmite la película. También puede elegir la calidad y el formato de la película según su preferencia. </p>
|
42 |
-
<h4>YouTube</h4>
|
43 |
-
<p>YouTube es una de las plataformas de streaming más populares y ampliamente utilizadas en el mundo. Ofrece una variedad de contenido incluyendo películas, programas, música, videos, etc. Puede ver la película de Avarampoo en línea de forma gratuita en YouTube si está disponible en la plataforma. También puede utilizar la barra de búsqueda o los filtros para encontrar la película fácilmente. También puede ajustar la calidad y la velocidad de la película según su conveniencia. </p>
|
44 |
-
<h3>Opción 2: Utilice un sitio de torrent que tiene película de Avarampoo</h3>
|
45 |
-
<p>Un sitio de torrent es un sitio web que le permite descargar o compartir archivos utilizando una red de igual a igual. Un archivo torrent es un archivo pequeño que contiene información sobre el archivo más grande que desea descargar. Necesita un cliente de torrent o software para descargar o cargar archivos usando un sitio de torrent. Algunos de los sitios de torrent que tienen una película de Avarampoo son:</p>
|
46 |
-
<h4>Tamilrockers</h4>
|
47 |
-
|
48 |
-
<h4>Isaimini</h4>
|
49 |
-
<p>Isaimini es otro popular sitio de torrents que ofrece una gran colección de películas y programas tamiles. Puede descargar la película de Avarampoo desde Isaimini utilizando un cliente de torrent como BitTorrent o uTorrent. Sin embargo, debe tener cuidado de que Isaimini también es un sitio web ilegal que puede estar sujeto a acciones legales o sanciones. </p>
|
50 |
-
<h2>Conclusión</h2>
|
51 |
-
<h3>Resumen de los puntos principales</h3>
|
52 |
-
<p>En este artículo, hemos discutido la película de Avarampoo, el sitio web de Moviesda, y cómo descargar o ver la película de Avarampoo en línea gratis. También hemos proporcionado algunas alternativas legales a Moviesda que puede utilizar para ver la película de Avarampoo en línea de forma gratuita. Esperamos que haya encontrado este artículo útil e informativo. </p>
|
53 |
-
<h3>Descargo de responsabilidad y advertencia sobre piratería y cuestiones legales</h3>
|
54 |
-
<p>Nos gustaría recordarle que descargar o transmitir películas piratas de sitios web ilegales como Moviesda es un delito y una violación de los derechos de propiedad intelectual. La piratería es un delito grave que puede dar lugar a acciones legales o sanciones. No respaldamos ni promovemos tales sitios web o actividades. Le aconsejamos respetar la ley y los derechos de los creadores y propietarios de las películas. </p>
|
55 |
-
<h3>Llamada a la acción y petición de comentarios</h3>
|
56 |
-
<p>Si te gustó este artículo, por favor compártelo con tus amigos y familiares que están interesados en ver la película de Avarampoo en línea gratis. También, por favor deje sus comentarios y comentarios a continuación. Nos encantaría saber de usted y mejorar nuestra calidad de contenido. </p>
|
57 |
-
<h2>Preguntas frecuentes</h2>
|
58 |
-
<ul>
|
59 |
-
<li><b>Q: ¿Cuál es la calificación IMDb de la película de Avarampoo? </b></li>
|
60 |
-
<li>A: La calificación de IMDb de la película de Avarampoo es 7.1 de 10 basado en 35 comentarios de usuarios. </li>
|
61 |
-
<li><b>Q: ¿Quiénes son los cantantes de las canciones en la película de Avarampoo? </b></li>
|
62 |
-
<li>A: Los cantantes de las canciones en la película de Avarampoo son S.P.Balasubrahmanyam, K.S.Chithra, Mano, Swarnalatha, etc.</li>
|
63 |
-
<li><b>Q: ¿Dónde puedo ver una película de Avarampoo con subtítulos? </b></li>
|
64 |
-
|
65 |
-
<li><b>Q: ¿Cuánto dura una película de Avarampoo? </b></li>
|
66 |
-
<li>A: La película de Avarampoo dura 2 horas y 17 minutos. </li>
|
67 |
-
<li><b>Q: ¿Cuáles son algunas otras películas similares a la película de Avarampoo? </b></li>
|
68 |
-
<li>A: Algunas otras películas similares a la película de Avarampoo son Thakara (1980), Kadhal Kottai (1996), Kaadhal (2004), etc.</li>
|
69 |
-
</ul></p> 64aa2da5cf<br />
|
70 |
-
<br />
|
71 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Betty Noir Fuente Descargar.md
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
|
2 |
-
<tabla>
|
3 |
-
<tr>
|
4 |
-
<td>
|
5 |
-
<h1>Betty Noir Font Download: Una tipografía retro y elegante para sus proyectos</h1>
|
6 |
-
<p>Si usted está buscando una fuente que puede añadir un toque vintage y personalidad a sus proyectos, es posible que desee echa un vistazo a Betty Noir Font. Esta fuente está inspirada en las clásicas películas noir de Hollywood de las décadas de 1940 y 1950, y tiene un distintivo aspecto retro y elegante. En este artículo, le diremos todo lo que necesita saber sobre Betty Noir Font, incluyendo su historia, características, uso y fuentes de descarga. También te mostraremos algunos ejemplos de cómo se puede usar Betty Noir Font en diseño web y arte gráfico. ¡Empecemos! </p>
|
7 |
-
<h2>betty noir fuente descargar</h2><br /><p><b><b>DOWNLOAD</b> ⏩ <a href="https://bltlly.com/2v6Ktr">https://bltlly.com/2v6Ktr</a></b></p><br /><br />
|
8 |
-
<h2>¿Qué es Betty Noir Font? </h2>
|
9 |
-
<p>Betty Noir Font es una fuente gratuita creada por Nate Piekos de Blambot Fonts. Fue lanzado por primera vez en 2004, y ha sido descargado más de 400.000 veces desde entonces. La fuente lleva el nombre de Betty Grable, una de las actrices más populares de la era negra. La fuente está diseñada para imitar el estilo de los títulos de películas y carteles de ese período de tiempo, con una mezcla de elementos art deco y script. </p>
|
10 |
-
<h3>La historia y características de Betty Noir Fuente</h3>
|
11 |
-
<p>El género negro surgió en la década de 1940 como una subcategoría de la ficción criminal y el cine. Fue influenciado por el expresionismo alemán, las novelas policíacas y la desilusión de la posguerra. Las historias noir típicamente presentaban protagonistas cínicos, mujeres fatales, autoridades corruptas y atmósferas oscuras. Algunas de las películas noir más famosas incluyen The Maltese Falcon, Double Indemnity, The Big Sleep, Sunset Boulevard y The Third Man.</p>
|
12 |
-
|
13 |
-
<h3>Cómo usar Betty Noir Font en tus diseños</h3>
|
14 |
-
<p>Betty Noir Font es una fuente versátil que se puede usar para varios propósitos. Aquí hay algunos consejos sobre cómo usarla eficazmente:</p>
|
15 |
-
<ul>
|
16 |
-
<li>Utilizarlo para titulares, títulos, logotipos, banners, carteles, folletos, invitaciones, etiquetas, etc. Puede crear un fuerte impacto visual y atraer la atención. </li>
|
17 |
-
<li>Úselo con moderación para el texto del cuerpo o los párrafos largos. Puede ser difícil de leer en tamaños más pequeños o en fondos de bajo contraste. </li>
|
18 |
-
<li>Úsalo con fuentes complementarias que coincidan con su estilo y estado de ánimo. Por ejemplo, puede combinarlo con fuentes sans-serif como Arial o Verdana para un aspecto moderno o con fuentes serif como Times New Roman o Georgia para un aspecto clásico. </li>
|
19 |
-
<li>Úsalo con colores apropiados que realcen su ambiente retro. Por ejemplo, puede usar blanco y negro para un efecto negro o colores brillantes como rojo o amarillo para un efecto de arte pop. </li>
|
20 |
-
<li>Úsalo con imágenes que se adapten a su tema y género. Por ejemplo, puedes usar fotos o ilustraciones de coches antiguos, edificios </td>
|
21 |
-
</tr>
|
22 |
-
<tr>
|
23 |
-
<td>
|
24 |
-
<h2>¿Dónde descargar Betty Noir Font? </h2>
|
25 |
-
<p>Betty Noir Font es una fuente gratuita que puede descargar de varios sitios web. Sin embargo, no todas las fuentes son confiables o legales. Algunos pueden contener virus o malware que pueden dañar su computadora o spyware que puede robar su información personal. Por lo tanto, siempre debe descargar fuentes de fuentes legales y de buena reputación. Estos son algunos de los mejores sitios web donde se puede descargar Betty Noir Font de forma gratuita y legal:</p>
|
26 |
-
<ul>
|
27 |
-
<li>[DaFont]( 1 ): Este es uno de los sitios web más populares para las fuentes gratuitas, con más de 40.000 fuentes para elegir. Puedes encontrar Betty Noir Font en la categoría Fantasía > Retro, o buscarlo por nombre. La fuente es gratuita para uso personal, pero debe ponerse en contacto con el diseñador para uso comercial. </li>
|
28 |
-
|
29 |
-
<li>[1001 Fonts]( 3 ): Este es un sitio web que ofrece fuentes gratuitas y premium, con más de 30.000 fuentes para elegir. Puede encontrar Betty Noir Font en la categoría Retro, o buscarlo por nombre. La fuente es gratuita para uso personal, pero no gratuita para uso comercial. </li>
|
30 |
-
</ul>
|
31 |
-
<p>Antes de descargar cualquier fuente, asegúrese de leer los términos de la licencia cuidadosamente y seguirlos en consecuencia. Algunas fuentes pueden requerir atribución, donación o permiso del diseñador para ciertos usos. </p>
|
32 |
-
<h3>Cómo instalar y aplicar Betty Noir Font en HTML</h3>
|
33 |
-
<p>Una vez que haya descargado Betty Noir Font desde una fuente confiable, debe instalarlo en su computadora y aplicarlo en su código HTML. Estos son los pasos para hacerlo:</p>
|
34 |
-
<p></p>
|
35 |
-
<ol>
|
36 |
-
<li>Descomprima el archivo de fuente y guárdelo en una carpeta en su computadora. </li>
|
37 |
-
<li>Abra el Panel de control y vaya a Fuentes. Arrastre y suelte el archivo de fuente en la carpeta Fuentes. Alternativamente, puede hacer clic derecho en el archivo de fuente y seleccionar Instalar.</li>
|
38 |
-
<li>Abra su editor HTML y cree un nuevo archivo o abra uno existente. </li>
|
39 |
-
<li>Agregue el siguiente código a la sección principal de su archivo HTML:<br><code>@font-face {<br> font-family: 'Betty Noir';<br> nbsp;src: url('bettynoir.ttf');<br><br><codebr><codebr>>face named Betty Noir and links it to the font file that you saved on your computer. Asegúrese de usar la ruta y el nombre correctos del archivo de fuente.</li>
|
40 |
-
<li>Agregue el siguiente código a la sección de estilo de su archivo HTML o en un archivo CSS separado:<br><code>h1 <br> font-family: 'Betty Noir', sans-serif;<br></code><br>Este código aplica la fuente Betty Noir a todos los elementos h1 en su archivo HTML. También puede usar otros selectores o propiedades para personalizar su estilo de fuente.</li>
|
41 |
-
<li>Guarda tu archivo HTML y ábrelo en un navegador para ver el resultado. </li>
|
42 |
-
</ol>
|
43 |
-
|
44 |
-
<h2>Ejemplos de Betty Noir Font en acción</h2>
|
45 |
-
<p>Para darle alguna inspiración e ideas sobre cómo usar Betty Noir Font en sus proyectos, aquí hay algunos ejemplos de sitios web y obras de arte gráficas que utilizan esta fuente:</p>
|
46 |
-
<h3>Sitios web que utilizan Betty Noir Font</h3>
|
47 |
-
<ul>
|
48 |
-
<li>[The Black Dahlia Murder]( 8 ): Este es un sitio web dedicado al caso de asesinato sin resolver de Elizabeth Short, también conocida como Black Dahlia, que fue asesinada en 1947 en Los Ángeles. El sitio web utiliza Betty Noir Font para su logotipo y titulares, creando una atmósfera noir. </li>
|
49 |
-
<li>[The Vintage News]( 9 ): Este es un sitio web que cubre historias y hechos de historia, cultura, ciencia, arte y más. El sitio web utiliza Betty Noir Font para su logotipo y algunos de sus titulares, dándole un aspecto retro y elegante. </li>
|
50 |
-
<li>[El arte de la hombría]( 10 ): Este es un sitio web que ofrece artículos, podcasts, videos y libros sobre temas relacionados con el estilo de vida de los hombres, como el aseo, la aptitud, el estilo, las relaciones, las habilidades y más. El sitio web utiliza Betty Noir Font para su logotipo y algunos de sus titulares, añadiendo algo de encanto vintage y personalidad. </li>
|
51 |
-
</ul>
|
52 |
-
<h3>Obras de arte gráficas que utilizan Betty Noir Font</h3>
|
53 |
-
<ul>
|
54 |
-
<li>[Noir Movie Poster]( 11 ): Esta es una obra gráfica creada por [Nate Piekos]( 12 ), el diseñador de Betty Noir Font. Es un póster de una película de cine negro llamado The Big Sleepover. Utiliza Betty Noir Font para el título y los créditos, junto con otras fuentes e imágenes para crear un póster realista y cautivador. </li>
|
55 |
-
<li>[Noir Book Cover]: Esta es una obra gráfica creada por [Jenny Zemanek], una diseñadora de portadas de libros. Es la portada de una novela negra llamada The Big Nothing de Paul D. Brazill. Utiliza Betty Noir Font para el título y el nombre del autor, junto con otras fuentes e imágenes para crear una cubierta misteriosa e intrigante. </li>
|
56 |
-
|
57 |
-
</ul>
|
58 |
-
<h2>Conclusión</h2>
|
59 |
-
<p>Betty Noir Font es una fuente gratuita que puede añadir un toque retro y elegante a sus proyectos. Está inspirado en las clásicas películas de cine negro de Hollywood de las décadas de 1940 y 1950, y tiene un distintivo estilo art déco y guion. Puede descargar Betty Noir Font desde varios sitios web, pero asegúrese de usar fuentes confiables y legales. También puede instalar y aplicar Betty Noir Font en su código HTML con algunos pasos simples. Betty Noir Font se puede usar para varios propósitos, como titulares, logotipos, carteles, pancartas, etc. También puede combinarlo con fuentes complementarias, colores, imágenes y temas para crear diseños impresionantes. Esperamos que este artículo te haya dado información útil e inspiración sobre cómo usar Betty Noir Font en tus proyectos. </p>
|
60 |
-
<h3>Resumen de los puntos principales</h3>
|
61 |
-
<ul>
|
62 |
-
<li>Betty Noir Font es una fuente gratuita creada por Nate Piekos de Blambot Fonts en 2004. </li>
|
63 |
-
<li>Betty Noir Font está inspirada en las clásicas películas noir de Hollywood de los años 40 y 50, y tiene un aspecto retro y elegante. </li>
|
64 |
-
<li>Betty Noir Font se puede descargar desde varios sitios web, pero siempre debe utilizar fuentes legales y de buena reputación. </li>
|
65 |
-
<li>Betty Noir Font se puede instalar y aplicar en su código HTML con algunos pasos simples. </li>
|
66 |
-
<li>Betty Noir Font se puede utilizar para diversos fines, como titulares, logotipos, carteles, banners, etc.</li>
|
67 |
-
<li>Betty Noir Font se puede combinar con fuentes complementarias, colores, imágenes y temas para crear diseños impresionantes. </li>
|
68 |
-
</ul>
|
69 |
-
<h3>Llamada a la acción y retroalimentación</h3>
|
70 |
-
|
71 |
-
<h2>Preguntas frecuentes</h2>
|
72 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre Betty Noir Font:</p>
|
73 |
-
<ul>
|
74 |
-
<li><b>Q: ¿Es Betty Noir fuente libre? </b><br>A: Sí, Betty Noir Font es gratis para uso personal, pero es posible que tenga que ponerse en contacto con el diseñador para uso comercial. </li>
|
75 |
-
<li><b>P: ¿Qué tipo de fuente es Betty Noir Font? </b><br>A: Betty Noir Font es una fuente retro inspirada en las clásicas películas de cine negro de Hollywood de los años 40 y 50. </li>
|
76 |
-
<li><b>P: ¿Cómo descargo Betty Noir Font? </b><br>A: Puede descargar Betty Noir Font desde varios sitios web, pero asegúrese de usar fuentes confiables y legales. </li>
|
77 |
-
<li><b>Q: ¿Cómo puedo instalar y aplicar Betty Noir Font en HTML? </b><br>A: Necesitas descomprimir el archivo de fuente, guardarlo en una carpeta en tu computadora, instalarlo en tu carpeta de fuentes, definirlo en tu código HTML usando @font-face, y aplicarlo a tus elementos usando font-family. </li>
|
78 |
-
<li><b>P: ¿Cómo uso Betty Noir Font en mis diseños? </b><br>A: Puede usar Betty Noir Font para varios propósitos, como titulares, logotipos, carteles, banners, etc. También puede combinarlo con fuentes complementarias, colores, imágenes y temas para crear diseños impresionantes. </li>
|
79 |
-
</ul></p> 64aa2da5cf<br />
|
80 |
-
<br />
|
81 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Cubic 234 Juegos De Jugadores Mod Apk.md
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cubic 234 Juegos de jugadores Mod APK: Disfruta de la diversión y adictivo Mini juegos con amigos</h1>
|
3 |
-
<h2>Introducción</h2>
|
4 |
-
<p>¿Te encanta jugar mini juegos con tus amigos y familiares? ¿Quieres divertirte con juegos sencillos y emocionantes que puedas disfrutar en tu dispositivo móvil? Si usted respondió que sí, entonces usted debe definitivamente echa un vistazo Cubic 234 Player Games Mod APK, una colección de increíbles mini juegos que se puede jugar con hasta 6 jugadores en un solo partido. </p>
|
5 |
-
<h2>cubic 234 juegos de jugadores mod apk</h2><br /><p><b><b>Download Zip</b> ––– <a href="https://bltlly.com/2v6JfX">https://bltlly.com/2v6JfX</a></b></p><br /><br />
|
6 |
-
<h3>¿Qué son los juegos de jugador Cubic 234? </h3>
|
7 |
-
<p>Cubic 234 Player Games es un juego para móviles desarrollado por CubeCube Sports, un estudio especializado en crear juegos divertidos y adictivos para todas las edades. El juego cuenta con varios mini juegos que puedes jugar con 2, 3 o 4 jugadores en el mismo dispositivo. También puedes unirte al modo torneo y competir con hasta 6 jugadores online. Los mini juegos son simples pero desafiantes, y pondrán a prueba tus habilidades, reflejos y coordinación. Algunos de los mini juegos incluyen:</p>
|
8 |
-
<ul>
|
9 |
-
<li>Snake Arena: Controla tu serpiente y tratar de comer tantas manzanas como sea posible evitando las paredes y otras serpientes. </li>
|
10 |
-
<li>ludo: un clásico juego de mesa donde tienes que mover tus cuatro piezas al centro del tablero antes que tus oponentes. </li>
|
11 |
-
<li>Fútbol: Un juego de fútbol donde tienes que anotar más goles que tu oponente pateando la pelota en la red. </li>
|
12 |
-
<li>batalla de tanques: un juego de tanques donde tienes que disparar el tanque de tu enemigo mientras esquiva sus balas. </li>
|
13 |
-
<li>Sumo Wrestling: Un juego de lucha libre donde tienes que empujar a tu oponente fuera del ring usando tu peso corporal. </li>
|
14 |
-
<li> ¡Y muchos más! </li>
|
15 |
-
</ul>
|
16 |
-
<h3> ¿Por qué descargar Cubic 234 Player Games Mod APK? </h3>
|
17 |
-
<p>Si se está preguntando por qué debe descargar Cubic 234 Player Games Mod APK en lugar de la versión original de la Google Play Store, aquí hay algunas razones:</p>
|
18 |
-
<ul>
|
19 |
-
<li> Obtendrá dinero ilimitado que puede utilizar para comprar nuevas pieles, sombreros y accesorios para sus personajes. </li>
|
20 |
-
|
21 |
-
<li>Te librarás de los molestos anuncios que pueden interrumpir tu juego o agotar tu batería. </li>
|
22 |
-
<li> Obtendrá un mejor rendimiento y estabilidad en su dispositivo ya que la versión modificada está optimizada para dispositivos de gama baja. </li>
|
23 |
-
</ul>
|
24 |
-
<h2>Características de Cubic 234 Player Games Mod APK</h2>
|
25 |
-
<h3>Múltiples modos de juego y mini juegos para elegir</h3>
|
26 |
-
<p>Una de las mejores características de Cubic 234 Player Games Mod APK es que ofrece una variedad de modos de juego y mini juegos que puedes jugar con tus amigos. Puedes elegir entre diferentes categorías como acción, árcade, tablero, deportes, rompecabezas y más. También puedes personalizar el nivel de dificultad, el número de rondas y el límite de tiempo para cada juego. Estos son algunos de los modos de juego que puedes disfrutar:</p>
|
27 |
-
<p></p>
|
28 |
-
<h4>modo de jugador 2</h4>
|
29 |
-
<p>Este modo te permite jugar con un amigo en el mismo dispositivo. Puedes elegir entre más de 20 mini juegos diseñados para dos jugadores. Puedes usar uno o dos botones para controlar a tu personaje dependiendo del juego. Algunos de los mini juegos que puedes jugar en este modo son:</p>
|
30 |
-
<ul>
|
31 |
-
<li>ping pong: un juego de ping pong donde tienes que golpear la pelota con la paleta y hacer que rebote en el lado de tu oponente. </li>
|
32 |
-
<li>Tijeras de papel de roca: un juego clásico donde tienes que elegir entre piedra, papel o tijeras y ver quién gana. </li>
|
33 |
-
<li>examen de matemáticas: un juego de matemáticas donde tienes que resolver ecuaciones simples más rápido que tu oponente. </li>
|
34 |
-
<li> ¡Y muchos más! </li>
|
35 |
-
</ul>
|
36 |
-
<h4>3 Modo reproductor</h4>
|
37 |
-
<p>Este modo te permite jugar con dos amigos en el mismo dispositivo. Puedes elegir entre más de 10 mini juegos diseñados para tres jugadores. Puedes usar uno o dos botones para controlar a tu personaje dependiendo del juego. Algunos de los mini juegos que puedes jugar en este modo son:</p>
|
38 |
-
<ul>
|
39 |
-
<li>Tron: un juego tron donde tienes que conducir tu ciclo de luz y evitar chocar contra las paredes o los senderos de otros jugadores. </li>
|
40 |
-
|
41 |
-
<li>carrera: un juego de carreras donde tienes que tocar el botón lo más rápido posible para hacer que su coche se mueva más rápido y llegar a la línea de meta primero. </li>
|
42 |
-
<li> ¡Y muchos más! </li>
|
43 |
-
</ul>
|
44 |
-
<h4>4 Modo reproductor</h4>
|
45 |
-
<p>Este modo te permite jugar con tres amigos en el mismo dispositivo. Puedes elegir entre más de 10 mini juegos diseñados para cuatro jugadores. Puedes usar uno o dos botones para controlar a tu personaje dependiendo del juego. Algunos de los mini juegos que puedes jugar en este modo son:</p>
|
46 |
-
<ul>
|
47 |
-
<li>escuadrón de bombas: un juego de bombas donde tienes que desactivar la bomba antes de que explote cortando el cable derecho. </li>
|
48 |
-
<li>Poker: Un juego de póquer donde tienes que apostar, farolear y ganar con la mejor mano de cartas. </li>
|
49 |
-
<li>Memoria: Un juego de memoria donde tienes que hacer coincidir pares de cartas al voltearlas y recordar sus ubicaciones. </li>
|
50 |
-
<li> ¡Y muchos más! </li>
|
51 |
-
</ul>
|
52 |
-
<h4>Modo de torneo</h4>
|
53 |
-
<p>Este modo le permite jugar con hasta 6 jugadores en línea. Puede unirse o crear una habitación e invitar a sus amigos o jugadores al azar a unirse. También puedes chatear con otros jugadores y enviar emojis. Puedes elegir entre más de 30 minijuegos seleccionados al azar para cada ronda. El jugador con más puntos al final del torneo gana. </p>
|
54 |
-
<h3>Controles simples e intuitivos para un juego fácil</h3>
|
55 |
-
<p>Otra gran característica de Cubic 234 Player Games Mod APK es que tiene controles simples e intuitivos que hacen que sea fácil para cualquiera jugar. No necesitas gestos complicados ni golpes para controlar a tu personaje. Solo tienes que pulsar uno o dos botones dependiendo del juego. Los botones son grandes y sensibles, y se colocan en las esquinas de la pantalla para que no interfieran con el juego. También puede ajustar los ajustes de sensibilidad y vibración según su preferencia. </p>
|
56 |
-
<h3>Gráficos coloridos y caricaturescos para un estado de ánimo alegre</h3>
|
57 |
-
|
58 |
-
<h3>Dinero ilimitado y funciones desbloqueadas para más diversión</h3>
|
59 |
-
<p>La mejor característica de Cubic 234 Player Games Mod APK es que le da dinero ilimitado y desbloqueado características que hacen que el juego más divertido y agradable. Con dinero ilimitado, puedes comprar nuevas pieles, sombreros y accesorios para tus personajes. También puedes desbloquear todos los mini juegos y modos de juego sin tener que gastar dinero real o ver anuncios. También puedes eliminar todos los anuncios que puedan molestarte o ralentizar tu dispositivo. Con estas características, puede tener más opciones y personalización para su juego. </p>
|
60 |
-
<h2>Cómo descargar e instalar Cubic 234 Player Games Mod APK</h2>
|
61 |
-
<p>Si desea descargar e instalar Cubic 234 Player Games Mod APK en su dispositivo, aquí están los pasos que debe seguir:</p>
|
62 |
-
<h3>Paso 1: Descargar el archivo APK de una fuente de confianza</h3>
|
63 |
-
<p>El primer paso es descargar el archivo APK de una fuente de confianza. Puede utilizar el siguiente enlace para descargar la última versión de Cubic 234 Player Games Mod APK gratis. Asegúrate de tener suficiente espacio de almacenamiento en tu dispositivo antes de descargar el archivo. </p>
|
64 |
-
<p><a href=" 1 ">Descargar Cubic 234 Juegos de Jugadores Mod APK</a></p>
|
65 |
-
<h3>Paso 2: Habilitar fuentes desconocidas en la configuración del dispositivo</h3>
|
66 |
-
<p>El segundo paso es habilitar fuentes desconocidas en la configuración de su dispositivo. Esto le permitirá instalar aplicaciones que no son de Google Play Store. Para hacer esto, vaya a la configuración del dispositivo y busque la opción de seguridad o privacidad. Luego, busque la opción de fuentes desconocidas y conéctela. Puede ver un mensaje de advertencia que le indica los riesgos de instalar aplicaciones desconocidas, pero puede ignorarlo y continuar. </p>
|
67 |
-
<h3>Paso 3: Instalar el archivo APK y lanzar el juego</h3>
|
68 |
-
|
69 |
-
<h2>Conclusión</h2>
|
70 |
-
<p>Cubic 234 Player Games Mod APK es un juego divertido y adictivo que puedes jugar con tus amigos en tu dispositivo móvil. Ofrece una variedad de mini juegos que puedes jugar con 2, 3 o 4 jugadores en el mismo dispositivo, o con hasta 6 jugadores en línea. El juego tiene controles simples e intuitivos, gráficos coloridos y de dibujos animados, dinero ilimitado y funciones desbloqueadas. Puede descargar e instalar Cubic 234 Player Games Mod APK gratis siguiendo los pasos anteriores. Entonces, ¿qué estás esperando? ¡Descarga Cubic 234 Player Games Mod APK ahora y diviértete con tus amigos! </p>
|
71 |
-
<h2>Preguntas frecuentes</h2>
|
72 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre Cubic 234 Player Games Mod APK:</p>
|
73 |
-
<ul>
|
74 |
-
<li>Q: ¿Es Cubic 234 Player Games Mod APK seguro de usar? </li>
|
75 |
-
<li>A: Sí, Cubic 234 Player Games Mod APK es seguro de usar, siempre y cuando se descarga desde una fuente de confianza. La versión modificada no contiene ningún virus o malware que pueda dañar su dispositivo o datos. </li>
|
76 |
-
<li>Q: ¿Necesito una conexión a Internet para jugar Cubic 234 Player Games Mod APK? </li>
|
77 |
-
<li>A: No, no necesitas una conexión a Internet para jugar Cubic 234 Player Games Mod APK. Puedes jugar sin conexión con tus amigos en el mismo dispositivo. Sin embargo, si quieres jugar online con otros jugadores, necesitarás una conexión a Internet. </li>
|
78 |
-
<li>Q: ¿Cuántos mini juegos están disponibles en Cubic 234 Player Games Mod APK? </li>
|
79 |
-
<li>A: Hay más de 60 mini juegos disponibles en Cubic 234 Player Games Mod APK, dividido en diferentes categorías como acción, árcade, tablero, deportes, rompecabezas y más. Puedes jugar a todos ellos sin tener que desbloquearlos o pagar por ellos. </li>
|
80 |
-
<li>Q: ¿Puedo personalizar mi personaje en Cubic 234 Player Games Mod APK? </li>
|
81 |
-
<li>A: Sí, puede personalizar su personaje en Cubic 234 Player Games Mod APK comprando nuevas pieles, sombreros y accesorios con dinero ilimitado. También puedes cambiar el color de tu personaje según tu preferencia. </li>
|
82 |
-
|
83 |
-
<li>A: Sí, puede chatear con otros jugadores en Cubic 234 Player Games Mod APK mediante el uso de la función de chat en el juego. También puede enviar emojis y pegatinas para expresar sus emociones. </li>
|
84 |
-
</ul></p> 64aa2da5cf<br />
|
85 |
-
<br />
|
86 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/jmespath/ast.py
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
# AST nodes have this structure:
|
2 |
-
# {"type": <node type>", children: [], "value": ""}
|
3 |
-
|
4 |
-
|
5 |
-
def comparator(name, first, second):
|
6 |
-
return {'type': 'comparator', 'children': [first, second], 'value': name}
|
7 |
-
|
8 |
-
|
9 |
-
def current_node():
|
10 |
-
return {'type': 'current', 'children': []}
|
11 |
-
|
12 |
-
|
13 |
-
def expref(expression):
|
14 |
-
return {'type': 'expref', 'children': [expression]}
|
15 |
-
|
16 |
-
|
17 |
-
def function_expression(name, args):
|
18 |
-
return {'type': 'function_expression', 'children': args, 'value': name}
|
19 |
-
|
20 |
-
|
21 |
-
def field(name):
|
22 |
-
return {"type": "field", "children": [], "value": name}
|
23 |
-
|
24 |
-
|
25 |
-
def filter_projection(left, right, comparator):
|
26 |
-
return {'type': 'filter_projection', 'children': [left, right, comparator]}
|
27 |
-
|
28 |
-
|
29 |
-
def flatten(node):
|
30 |
-
return {'type': 'flatten', 'children': [node]}
|
31 |
-
|
32 |
-
|
33 |
-
def identity():
|
34 |
-
return {"type": "identity", 'children': []}
|
35 |
-
|
36 |
-
|
37 |
-
def index(index):
|
38 |
-
return {"type": "index", "value": index, "children": []}
|
39 |
-
|
40 |
-
|
41 |
-
def index_expression(children):
|
42 |
-
return {"type": "index_expression", 'children': children}
|
43 |
-
|
44 |
-
|
45 |
-
def key_val_pair(key_name, node):
|
46 |
-
return {"type": "key_val_pair", 'children': [node], "value": key_name}
|
47 |
-
|
48 |
-
|
49 |
-
def literal(literal_value):
|
50 |
-
return {'type': 'literal', 'value': literal_value, 'children': []}
|
51 |
-
|
52 |
-
|
53 |
-
def multi_select_dict(nodes):
|
54 |
-
return {"type": "multi_select_dict", "children": nodes}
|
55 |
-
|
56 |
-
|
57 |
-
def multi_select_list(nodes):
|
58 |
-
return {"type": "multi_select_list", "children": nodes}
|
59 |
-
|
60 |
-
|
61 |
-
def or_expression(left, right):
|
62 |
-
return {"type": "or_expression", "children": [left, right]}
|
63 |
-
|
64 |
-
|
65 |
-
def and_expression(left, right):
|
66 |
-
return {"type": "and_expression", "children": [left, right]}
|
67 |
-
|
68 |
-
|
69 |
-
def not_expression(expr):
|
70 |
-
return {"type": "not_expression", "children": [expr]}
|
71 |
-
|
72 |
-
|
73 |
-
def pipe(left, right):
|
74 |
-
return {'type': 'pipe', 'children': [left, right]}
|
75 |
-
|
76 |
-
|
77 |
-
def projection(left, right):
|
78 |
-
return {'type': 'projection', 'children': [left, right]}
|
79 |
-
|
80 |
-
|
81 |
-
def subexpression(children):
|
82 |
-
return {"type": "subexpression", 'children': children}
|
83 |
-
|
84 |
-
|
85 |
-
def slice(start, end, step):
|
86 |
-
return {"type": "slice", "children": [start, end, step]}
|
87 |
-
|
88 |
-
|
89 |
-
def value_projection(left, right):
|
90 |
-
return {'type': 'value_projection', 'children': [left, right]}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/file_proxy.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
import io
|
2 |
-
from typing import IO, TYPE_CHECKING, Any, List
|
3 |
-
|
4 |
-
from .ansi import AnsiDecoder
|
5 |
-
from .text import Text
|
6 |
-
|
7 |
-
if TYPE_CHECKING:
|
8 |
-
from .console import Console
|
9 |
-
|
10 |
-
|
11 |
-
class FileProxy(io.TextIOBase):
|
12 |
-
"""Wraps a file (e.g. sys.stdout) and redirects writes to a console."""
|
13 |
-
|
14 |
-
def __init__(self, console: "Console", file: IO[str]) -> None:
|
15 |
-
self.__console = console
|
16 |
-
self.__file = file
|
17 |
-
self.__buffer: List[str] = []
|
18 |
-
self.__ansi_decoder = AnsiDecoder()
|
19 |
-
|
20 |
-
@property
|
21 |
-
def rich_proxied_file(self) -> IO[str]:
|
22 |
-
"""Get proxied file."""
|
23 |
-
return self.__file
|
24 |
-
|
25 |
-
def __getattr__(self, name: str) -> Any:
|
26 |
-
return getattr(self.__file, name)
|
27 |
-
|
28 |
-
def write(self, text: str) -> int:
|
29 |
-
if not isinstance(text, str):
|
30 |
-
raise TypeError(f"write() argument must be str, not {type(text).__name__}")
|
31 |
-
buffer = self.__buffer
|
32 |
-
lines: List[str] = []
|
33 |
-
while text:
|
34 |
-
line, new_line, text = text.partition("\n")
|
35 |
-
if new_line:
|
36 |
-
lines.append("".join(buffer) + line)
|
37 |
-
buffer.clear()
|
38 |
-
else:
|
39 |
-
buffer.append(line)
|
40 |
-
break
|
41 |
-
if lines:
|
42 |
-
console = self.__console
|
43 |
-
with console:
|
44 |
-
output = Text("\n").join(
|
45 |
-
self.__ansi_decoder.decode_line(line) for line in lines
|
46 |
-
)
|
47 |
-
console.print(output)
|
48 |
-
return len(text)
|
49 |
-
|
50 |
-
def flush(self) -> None:
|
51 |
-
output = "".join(self.__buffer)
|
52 |
-
if output:
|
53 |
-
self.__console.print(output)
|
54 |
-
del self.__buffer[:]
|
55 |
-
|
56 |
-
def fileno(self) -> int:
|
57 |
-
return self.__file.fileno()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_resources/_itertools.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
from itertools import filterfalse
|
2 |
-
|
3 |
-
from typing import (
|
4 |
-
Callable,
|
5 |
-
Iterable,
|
6 |
-
Iterator,
|
7 |
-
Optional,
|
8 |
-
Set,
|
9 |
-
TypeVar,
|
10 |
-
Union,
|
11 |
-
)
|
12 |
-
|
13 |
-
# Type and type variable definitions
|
14 |
-
_T = TypeVar('_T')
|
15 |
-
_U = TypeVar('_U')
|
16 |
-
|
17 |
-
|
18 |
-
def unique_everseen(
|
19 |
-
iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None
|
20 |
-
) -> Iterator[_T]:
|
21 |
-
"List unique elements, preserving order. Remember all elements ever seen."
|
22 |
-
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
|
23 |
-
# unique_everseen('ABBCcAD', str.lower) --> A B C D
|
24 |
-
seen: Set[Union[_T, _U]] = set()
|
25 |
-
seen_add = seen.add
|
26 |
-
if key is None:
|
27 |
-
for element in filterfalse(seen.__contains__, iterable):
|
28 |
-
seen_add(element)
|
29 |
-
yield element
|
30 |
-
else:
|
31 |
-
for element in iterable:
|
32 |
-
k = key(element)
|
33 |
-
if k not in seen:
|
34 |
-
seen_add(k)
|
35 |
-
yield element
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Binguii/Venus_Proxy/Dockerfile
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
FROM node:18-bullseye-slim
|
2 |
-
|
3 |
-
RUN apt-get update && \
|
4 |
-
|
5 |
-
apt-get install -y git
|
6 |
-
|
7 |
-
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
|
8 |
-
|
9 |
-
WORKDIR /app
|
10 |
-
|
11 |
-
RUN npm install
|
12 |
-
|
13 |
-
COPY Dockerfile greeting.md* .env* ./
|
14 |
-
|
15 |
-
RUN npm run build
|
16 |
-
|
17 |
-
EXPOSE 7860
|
18 |
-
|
19 |
-
ENV NODE_ENV=production
|
20 |
-
|
21 |
-
CMD [ "npm", "start" ]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Dual-Key Backdoor Attacks
|
3 |
-
emoji: 🔑
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.0.17
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: gpl-3.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/grid-feats-vqa/grid_feats/build_loader.py
DELETED
@@ -1,105 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import logging
|
3 |
-
import operator
|
4 |
-
import torch.utils.data
|
5 |
-
|
6 |
-
from detectron2.utils.comm import get_world_size
|
7 |
-
from detectron2.data import samplers
|
8 |
-
from detectron2.data.build import get_detection_dataset_dicts, worker_init_reset_seed, trivial_batch_collator
|
9 |
-
from detectron2.data.common import AspectRatioGroupedDataset, DatasetFromList, MapDataset
|
10 |
-
|
11 |
-
from .dataset_mapper import AttributeDatasetMapper
|
12 |
-
|
13 |
-
|
14 |
-
def build_detection_train_loader_with_attributes(cfg, mapper=None):
|
15 |
-
num_workers = get_world_size()
|
16 |
-
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
|
17 |
-
assert (
|
18 |
-
images_per_batch % num_workers == 0
|
19 |
-
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format(
|
20 |
-
images_per_batch, num_workers
|
21 |
-
)
|
22 |
-
assert (
|
23 |
-
images_per_batch >= num_workers
|
24 |
-
), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format(
|
25 |
-
images_per_batch, num_workers
|
26 |
-
)
|
27 |
-
images_per_worker = images_per_batch // num_workers
|
28 |
-
|
29 |
-
dataset_dicts = get_detection_dataset_dicts(
|
30 |
-
cfg.DATASETS.TRAIN,
|
31 |
-
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
|
32 |
-
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
|
33 |
-
if cfg.MODEL.KEYPOINT_ON
|
34 |
-
else 0,
|
35 |
-
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
|
36 |
-
)
|
37 |
-
dataset = DatasetFromList(dataset_dicts, copy=False)
|
38 |
-
|
39 |
-
if mapper is None:
|
40 |
-
mapper = AttributeDatasetMapper(cfg, True)
|
41 |
-
dataset = MapDataset(dataset, mapper)
|
42 |
-
|
43 |
-
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
|
44 |
-
logger = logging.getLogger(__name__)
|
45 |
-
logger.info("Using training sampler {}".format(sampler_name))
|
46 |
-
if sampler_name == "TrainingSampler":
|
47 |
-
sampler = samplers.TrainingSampler(len(dataset))
|
48 |
-
elif sampler_name == "RepeatFactorTrainingSampler":
|
49 |
-
sampler = samplers.RepeatFactorTrainingSampler(
|
50 |
-
dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
|
51 |
-
)
|
52 |
-
else:
|
53 |
-
raise ValueError("Unknown training sampler: {}".format(sampler_name))
|
54 |
-
|
55 |
-
if cfg.DATALOADER.ASPECT_RATIO_GROUPING:
|
56 |
-
data_loader = torch.utils.data.DataLoader(
|
57 |
-
dataset,
|
58 |
-
sampler=sampler,
|
59 |
-
num_workers=cfg.DATALOADER.NUM_WORKERS,
|
60 |
-
batch_sampler=None,
|
61 |
-
collate_fn=operator.itemgetter(0),
|
62 |
-
worker_init_fn=worker_init_reset_seed,
|
63 |
-
)
|
64 |
-
data_loader = AspectRatioGroupedDataset(data_loader, images_per_worker)
|
65 |
-
else:
|
66 |
-
batch_sampler = torch.utils.data.sampler.BatchSampler(
|
67 |
-
sampler, images_per_worker, drop_last=True
|
68 |
-
)
|
69 |
-
data_loader = torch.utils.data.DataLoader(
|
70 |
-
dataset,
|
71 |
-
num_workers=cfg.DATALOADER.NUM_WORKERS,
|
72 |
-
batch_sampler=batch_sampler,
|
73 |
-
collate_fn=trivial_batch_collator,
|
74 |
-
worker_init_fn=worker_init_reset_seed,
|
75 |
-
)
|
76 |
-
|
77 |
-
return data_loader
|
78 |
-
|
79 |
-
|
80 |
-
def build_detection_test_loader_with_attributes(cfg, dataset_name, mapper=None):
|
81 |
-
dataset_dicts = get_detection_dataset_dicts(
|
82 |
-
[dataset_name],
|
83 |
-
filter_empty=False,
|
84 |
-
proposal_files=[
|
85 |
-
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]
|
86 |
-
]
|
87 |
-
if cfg.MODEL.LOAD_PROPOSALS
|
88 |
-
else None,
|
89 |
-
)
|
90 |
-
|
91 |
-
dataset = DatasetFromList(dataset_dicts)
|
92 |
-
if mapper is None:
|
93 |
-
mapper = AttributeDatasetMapper(cfg, False)
|
94 |
-
dataset = MapDataset(dataset, mapper)
|
95 |
-
|
96 |
-
sampler = samplers.InferenceSampler(len(dataset))
|
97 |
-
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)
|
98 |
-
|
99 |
-
data_loader = torch.utils.data.DataLoader(
|
100 |
-
dataset,
|
101 |
-
num_workers=cfg.DATALOADER.NUM_WORKERS,
|
102 |
-
batch_sampler=batch_sampler,
|
103 |
-
collate_fn=trivial_batch_collator,
|
104 |
-
)
|
105 |
-
return data_loader
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/backbones/hourglass.py
DELETED
@@ -1,198 +0,0 @@
|
|
1 |
-
import torch.nn as nn
|
2 |
-
from mmcv.cnn import ConvModule
|
3 |
-
|
4 |
-
from ..builder import BACKBONES
|
5 |
-
from ..utils import ResLayer
|
6 |
-
from .resnet import BasicBlock
|
7 |
-
|
8 |
-
|
9 |
-
class HourglassModule(nn.Module):
|
10 |
-
"""Hourglass Module for HourglassNet backbone.
|
11 |
-
|
12 |
-
Generate module recursively and use BasicBlock as the base unit.
|
13 |
-
|
14 |
-
Args:
|
15 |
-
depth (int): Depth of current HourglassModule.
|
16 |
-
stage_channels (list[int]): Feature channels of sub-modules in current
|
17 |
-
and follow-up HourglassModule.
|
18 |
-
stage_blocks (list[int]): Number of sub-modules stacked in current and
|
19 |
-
follow-up HourglassModule.
|
20 |
-
norm_cfg (dict): Dictionary to construct and config norm layer.
|
21 |
-
"""
|
22 |
-
|
23 |
-
def __init__(self,
|
24 |
-
depth,
|
25 |
-
stage_channels,
|
26 |
-
stage_blocks,
|
27 |
-
norm_cfg=dict(type='BN', requires_grad=True)):
|
28 |
-
super(HourglassModule, self).__init__()
|
29 |
-
|
30 |
-
self.depth = depth
|
31 |
-
|
32 |
-
cur_block = stage_blocks[0]
|
33 |
-
next_block = stage_blocks[1]
|
34 |
-
|
35 |
-
cur_channel = stage_channels[0]
|
36 |
-
next_channel = stage_channels[1]
|
37 |
-
|
38 |
-
self.up1 = ResLayer(
|
39 |
-
BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg)
|
40 |
-
|
41 |
-
self.low1 = ResLayer(
|
42 |
-
BasicBlock,
|
43 |
-
cur_channel,
|
44 |
-
next_channel,
|
45 |
-
cur_block,
|
46 |
-
stride=2,
|
47 |
-
norm_cfg=norm_cfg)
|
48 |
-
|
49 |
-
if self.depth > 1:
|
50 |
-
self.low2 = HourglassModule(depth - 1, stage_channels[1:],
|
51 |
-
stage_blocks[1:])
|
52 |
-
else:
|
53 |
-
self.low2 = ResLayer(
|
54 |
-
BasicBlock,
|
55 |
-
next_channel,
|
56 |
-
next_channel,
|
57 |
-
next_block,
|
58 |
-
norm_cfg=norm_cfg)
|
59 |
-
|
60 |
-
self.low3 = ResLayer(
|
61 |
-
BasicBlock,
|
62 |
-
next_channel,
|
63 |
-
cur_channel,
|
64 |
-
cur_block,
|
65 |
-
norm_cfg=norm_cfg,
|
66 |
-
downsample_first=False)
|
67 |
-
|
68 |
-
self.up2 = nn.Upsample(scale_factor=2)
|
69 |
-
|
70 |
-
def forward(self, x):
|
71 |
-
"""Forward function."""
|
72 |
-
up1 = self.up1(x)
|
73 |
-
low1 = self.low1(x)
|
74 |
-
low2 = self.low2(low1)
|
75 |
-
low3 = self.low3(low2)
|
76 |
-
up2 = self.up2(low3)
|
77 |
-
return up1 + up2
|
78 |
-
|
79 |
-
|
80 |
-
@BACKBONES.register_module()
|
81 |
-
class HourglassNet(nn.Module):
|
82 |
-
"""HourglassNet backbone.
|
83 |
-
|
84 |
-
Stacked Hourglass Networks for Human Pose Estimation.
|
85 |
-
More details can be found in the `paper
|
86 |
-
<https://arxiv.org/abs/1603.06937>`_ .
|
87 |
-
|
88 |
-
Args:
|
89 |
-
downsample_times (int): Downsample times in a HourglassModule.
|
90 |
-
num_stacks (int): Number of HourglassModule modules stacked,
|
91 |
-
1 for Hourglass-52, 2 for Hourglass-104.
|
92 |
-
stage_channels (list[int]): Feature channel of each sub-module in a
|
93 |
-
HourglassModule.
|
94 |
-
stage_blocks (list[int]): Number of sub-modules stacked in a
|
95 |
-
HourglassModule.
|
96 |
-
feat_channel (int): Feature channel of conv after a HourglassModule.
|
97 |
-
norm_cfg (dict): Dictionary to construct and config norm layer.
|
98 |
-
|
99 |
-
Example:
|
100 |
-
>>> from mmdet.models import HourglassNet
|
101 |
-
>>> import torch
|
102 |
-
>>> self = HourglassNet()
|
103 |
-
>>> self.eval()
|
104 |
-
>>> inputs = torch.rand(1, 3, 511, 511)
|
105 |
-
>>> level_outputs = self.forward(inputs)
|
106 |
-
>>> for level_output in level_outputs:
|
107 |
-
... print(tuple(level_output.shape))
|
108 |
-
(1, 256, 128, 128)
|
109 |
-
(1, 256, 128, 128)
|
110 |
-
"""
|
111 |
-
|
112 |
-
def __init__(self,
|
113 |
-
downsample_times=5,
|
114 |
-
num_stacks=2,
|
115 |
-
stage_channels=(256, 256, 384, 384, 384, 512),
|
116 |
-
stage_blocks=(2, 2, 2, 2, 2, 4),
|
117 |
-
feat_channel=256,
|
118 |
-
norm_cfg=dict(type='BN', requires_grad=True)):
|
119 |
-
super(HourglassNet, self).__init__()
|
120 |
-
|
121 |
-
self.num_stacks = num_stacks
|
122 |
-
assert self.num_stacks >= 1
|
123 |
-
assert len(stage_channels) == len(stage_blocks)
|
124 |
-
assert len(stage_channels) > downsample_times
|
125 |
-
|
126 |
-
cur_channel = stage_channels[0]
|
127 |
-
|
128 |
-
self.stem = nn.Sequential(
|
129 |
-
ConvModule(3, 128, 7, padding=3, stride=2, norm_cfg=norm_cfg),
|
130 |
-
ResLayer(BasicBlock, 128, 256, 1, stride=2, norm_cfg=norm_cfg))
|
131 |
-
|
132 |
-
self.hourglass_modules = nn.ModuleList([
|
133 |
-
HourglassModule(downsample_times, stage_channels, stage_blocks)
|
134 |
-
for _ in range(num_stacks)
|
135 |
-
])
|
136 |
-
|
137 |
-
self.inters = ResLayer(
|
138 |
-
BasicBlock,
|
139 |
-
cur_channel,
|
140 |
-
cur_channel,
|
141 |
-
num_stacks - 1,
|
142 |
-
norm_cfg=norm_cfg)
|
143 |
-
|
144 |
-
self.conv1x1s = nn.ModuleList([
|
145 |
-
ConvModule(
|
146 |
-
cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)
|
147 |
-
for _ in range(num_stacks - 1)
|
148 |
-
])
|
149 |
-
|
150 |
-
self.out_convs = nn.ModuleList([
|
151 |
-
ConvModule(
|
152 |
-
cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg)
|
153 |
-
for _ in range(num_stacks)
|
154 |
-
])
|
155 |
-
|
156 |
-
self.remap_convs = nn.ModuleList([
|
157 |
-
ConvModule(
|
158 |
-
feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)
|
159 |
-
for _ in range(num_stacks - 1)
|
160 |
-
])
|
161 |
-
|
162 |
-
self.relu = nn.ReLU(inplace=True)
|
163 |
-
|
164 |
-
def init_weights(self, pretrained=None):
|
165 |
-
"""Init module weights.
|
166 |
-
|
167 |
-
We do nothing in this function because all modules we used
|
168 |
-
(ConvModule, BasicBlock and etc.) have default initialization, and
|
169 |
-
currently we don't provide pretrained model of HourglassNet.
|
170 |
-
|
171 |
-
Detector's __init__() will call backbone's init_weights() with
|
172 |
-
pretrained as input, so we keep this function.
|
173 |
-
"""
|
174 |
-
# Training Centripetal Model needs to reset parameters for Conv2d
|
175 |
-
for m in self.modules():
|
176 |
-
if isinstance(m, nn.Conv2d):
|
177 |
-
m.reset_parameters()
|
178 |
-
|
179 |
-
def forward(self, x):
|
180 |
-
"""Forward function."""
|
181 |
-
inter_feat = self.stem(x)
|
182 |
-
out_feats = []
|
183 |
-
|
184 |
-
for ind in range(self.num_stacks):
|
185 |
-
single_hourglass = self.hourglass_modules[ind]
|
186 |
-
out_conv = self.out_convs[ind]
|
187 |
-
|
188 |
-
hourglass_feat = single_hourglass(inter_feat)
|
189 |
-
out_feat = out_conv(hourglass_feat)
|
190 |
-
out_feats.append(out_feat)
|
191 |
-
|
192 |
-
if ind < self.num_stacks - 1:
|
193 |
-
inter_feat = self.conv1x1s[ind](
|
194 |
-
inter_feat) + self.remap_convs[ind](
|
195 |
-
out_feat)
|
196 |
-
inter_feat = self.inters[ind](self.relu(inter_feat))
|
197 |
-
|
198 |
-
return out_feats
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/walt/apis/__init__.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
from .train import get_root_logger, set_random_seed, train_detector
|
2 |
-
|
3 |
-
|
4 |
-
__all__ = [
|
5 |
-
'get_root_logger', 'set_random_seed', 'train_detector'
|
6 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/walt/datasets/pipelines/test_time_aug.py
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
import warnings
|
2 |
-
|
3 |
-
import mmcv
|
4 |
-
|
5 |
-
from ..builder import PIPELINES
|
6 |
-
from .compose import Compose
|
7 |
-
|
8 |
-
|
9 |
-
@PIPELINES.register_module()
|
10 |
-
class MultiScaleFlipAug(object):
|
11 |
-
"""Test-time augmentation with multiple scales and flipping.
|
12 |
-
|
13 |
-
An example configuration is as followed:
|
14 |
-
|
15 |
-
.. code-block::
|
16 |
-
|
17 |
-
img_scale=[(1333, 400), (1333, 800)],
|
18 |
-
flip=True,
|
19 |
-
transforms=[
|
20 |
-
dict(type='Resize', keep_ratio=True),
|
21 |
-
dict(type='RandomFlip'),
|
22 |
-
dict(type='Normalize', **img_norm_cfg),
|
23 |
-
dict(type='Pad', size_divisor=32),
|
24 |
-
dict(type='ImageToTensor', keys=['img']),
|
25 |
-
dict(type='Collect', keys=['img']),
|
26 |
-
]
|
27 |
-
|
28 |
-
After MultiScaleFLipAug with above configuration, the results are wrapped
|
29 |
-
into lists of the same length as followed:
|
30 |
-
|
31 |
-
.. code-block::
|
32 |
-
|
33 |
-
dict(
|
34 |
-
img=[...],
|
35 |
-
img_shape=[...],
|
36 |
-
scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]
|
37 |
-
flip=[False, True, False, True]
|
38 |
-
...
|
39 |
-
)
|
40 |
-
|
41 |
-
Args:
|
42 |
-
transforms (list[dict]): Transforms to apply in each augmentation.
|
43 |
-
img_scale (tuple | list[tuple] | None): Images scales for resizing.
|
44 |
-
scale_factor (float | list[float] | None): Scale factors for resizing.
|
45 |
-
flip (bool): Whether apply flip augmentation. Default: False.
|
46 |
-
flip_direction (str | list[str]): Flip augmentation directions,
|
47 |
-
options are "horizontal" and "vertical". If flip_direction is list,
|
48 |
-
multiple flip augmentations will be applied.
|
49 |
-
It has no effect when flip == False. Default: "horizontal".
|
50 |
-
"""
|
51 |
-
|
52 |
-
def __init__(self,
|
53 |
-
transforms,
|
54 |
-
img_scale=None,
|
55 |
-
scale_factor=None,
|
56 |
-
flip=False,
|
57 |
-
flip_direction='horizontal'):
|
58 |
-
self.transforms = Compose(transforms)
|
59 |
-
assert (img_scale is None) ^ (scale_factor is None), (
|
60 |
-
'Must have but only one variable can be setted')
|
61 |
-
if img_scale is not None:
|
62 |
-
self.img_scale = img_scale if isinstance(img_scale,
|
63 |
-
list) else [img_scale]
|
64 |
-
self.scale_key = 'scale'
|
65 |
-
assert mmcv.is_list_of(self.img_scale, tuple)
|
66 |
-
else:
|
67 |
-
self.img_scale = scale_factor if isinstance(
|
68 |
-
scale_factor, list) else [scale_factor]
|
69 |
-
self.scale_key = 'scale_factor'
|
70 |
-
|
71 |
-
self.flip = flip
|
72 |
-
self.flip_direction = flip_direction if isinstance(
|
73 |
-
flip_direction, list) else [flip_direction]
|
74 |
-
assert mmcv.is_list_of(self.flip_direction, str)
|
75 |
-
if not self.flip and self.flip_direction != ['horizontal']:
|
76 |
-
warnings.warn(
|
77 |
-
'flip_direction has no effect when flip is set to False')
|
78 |
-
if (self.flip
|
79 |
-
and not any([t['type'] == 'RandomFlip' for t in transforms])):
|
80 |
-
warnings.warn(
|
81 |
-
'flip has no effect when RandomFlip is not in transforms')
|
82 |
-
|
83 |
-
def __call__(self, results):
|
84 |
-
"""Call function to apply test time augment transforms on results.
|
85 |
-
|
86 |
-
Args:
|
87 |
-
results (dict): Result dict contains the data to transform.
|
88 |
-
|
89 |
-
Returns:
|
90 |
-
dict[str: list]: The augmented data, where each value is wrapped
|
91 |
-
into a list.
|
92 |
-
"""
|
93 |
-
|
94 |
-
aug_data = []
|
95 |
-
flip_args = [(False, None)]
|
96 |
-
if self.flip:
|
97 |
-
flip_args += [(True, direction)
|
98 |
-
for direction in self.flip_direction]
|
99 |
-
for scale in self.img_scale:
|
100 |
-
for flip, direction in flip_args:
|
101 |
-
_results = results.copy()
|
102 |
-
_results[self.scale_key] = scale
|
103 |
-
_results['flip'] = flip
|
104 |
-
_results['flip_direction'] = direction
|
105 |
-
data = self.transforms(_results)
|
106 |
-
aug_data.append(data)
|
107 |
-
# list of dict to dict of list
|
108 |
-
aug_data_dict = {key: [] for key in aug_data[0]}
|
109 |
-
for data in aug_data:
|
110 |
-
for key, val in data.items():
|
111 |
-
aug_data_dict[key].append(val)
|
112 |
-
return aug_data_dict
|
113 |
-
|
114 |
-
def __repr__(self):
|
115 |
-
repr_str = self.__class__.__name__
|
116 |
-
repr_str += f'(transforms={self.transforms}, '
|
117 |
-
repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
|
118 |
-
repr_str += f'flip_direction={self.flip_direction})'
|
119 |
-
return repr_str
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/v-doc_abstractive_mac/extract_feature.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
import argparse, os, json
|
2 |
-
import numpy as np
|
3 |
-
from imageio import imread
|
4 |
-
from PIL import Image
|
5 |
-
|
6 |
-
import torch
|
7 |
-
import torchvision
|
8 |
-
import ssl
|
9 |
-
ssl._create_default_https_context = ssl._create_unverified_context
|
10 |
-
|
11 |
-
|
12 |
-
def build_model(model='resnet101', model_stage=3):
|
13 |
-
cnn = getattr(torchvision.models, model)(pretrained=True)
|
14 |
-
layers = [
|
15 |
-
cnn.conv1,
|
16 |
-
cnn.bn1,
|
17 |
-
cnn.relu,
|
18 |
-
cnn.maxpool,
|
19 |
-
]
|
20 |
-
for i in range(model_stage):
|
21 |
-
name = 'layer%d' % (i + 1)
|
22 |
-
layers.append(getattr(cnn, name))
|
23 |
-
model = torch.nn.Sequential(*layers)
|
24 |
-
# model.cuda()
|
25 |
-
model.eval()
|
26 |
-
return model
|
27 |
-
|
28 |
-
|
29 |
-
def run_image(img, model):
|
30 |
-
mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)
|
31 |
-
std = np.array([0.229, 0.224, 0.224]).reshape(1, 3, 1, 1)
|
32 |
-
|
33 |
-
image = np.concatenate([img], 0).astype(np.float32)
|
34 |
-
image = (image / 255.0 - mean) / std
|
35 |
-
image = torch.FloatTensor(image)
|
36 |
-
image = torch.autograd.Variable(image, volatile=True)
|
37 |
-
|
38 |
-
feats = model(image)
|
39 |
-
feats = feats.data.cpu().clone().numpy()
|
40 |
-
|
41 |
-
return feats
|
42 |
-
|
43 |
-
|
44 |
-
def get_img_feat(cnn_model, img, image_height=224, image_width=224):
|
45 |
-
img_size = (image_height, image_width)
|
46 |
-
img = np.array(Image.fromarray(np.uint8(img)).resize(img_size))
|
47 |
-
img = img.transpose(2, 0, 1)[None]
|
48 |
-
feats = run_image(img, cnn_model)
|
49 |
-
_, C, H, W = feats.shape
|
50 |
-
feat_dset = feats.reshape(1, C, H, W)
|
51 |
-
return feat_dset
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cat125/text-generator-v3/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Text Generator v3
|
3 |
-
emoji: 🐨
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.27.0
|
8 |
-
app_file: main.py
|
9 |
-
pinned: true
|
10 |
-
license: openrail
|
11 |
-
duplicated_from: Cat125/text-generator-v2
|
12 |
-
---
|
13 |
-
|
14 |
-
This tool allows you to generate texts based on given context.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CognitiveLabs/Research-Assistant/agent/llm_utils.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
from config import Config
|
3 |
-
import openai
|
4 |
-
|
5 |
-
CFG = Config()
|
6 |
-
|
7 |
-
openai.api_key = CFG.openai_api_key
|
8 |
-
openai.api_base = CFG.openai_api_base
|
9 |
-
|
10 |
-
from typing import Optional
|
11 |
-
|
12 |
-
def llm_response(model,
|
13 |
-
messages,
|
14 |
-
temperature: float = CFG.temperature,
|
15 |
-
max_tokens: Optional[int] = None):
|
16 |
-
return openai.ChatCompletion.create(
|
17 |
-
model=model,
|
18 |
-
messages=messages,
|
19 |
-
temperature=temperature,
|
20 |
-
max_tokens=max_tokens,
|
21 |
-
).choices[0].message["content"]
|
22 |
-
|
23 |
-
|
24 |
-
def llm_stream_response(model,
|
25 |
-
messages,
|
26 |
-
temperature: float = CFG.temperature,
|
27 |
-
max_tokens: Optional[int] = None):
|
28 |
-
response = ""
|
29 |
-
for chunk in openai.ChatCompletion.create(
|
30 |
-
model=model,
|
31 |
-
messages=messages,
|
32 |
-
temperature=temperature,
|
33 |
-
max_tokens=max_tokens,
|
34 |
-
stream=True,
|
35 |
-
):
|
36 |
-
content = chunk["choices"][0].get("delta", {}).get("content")
|
37 |
-
if content is not None:
|
38 |
-
response += content
|
39 |
-
yield response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cong723/gpt-academic-public/crazy_functions/下载arxiv论文翻译摘要.py
DELETED
@@ -1,194 +0,0 @@
|
|
1 |
-
from toolbox import update_ui
|
2 |
-
from toolbox import CatchException, report_execption, write_results_to_file, get_conf
|
3 |
-
import re, requests, unicodedata, os
|
4 |
-
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
5 |
-
def download_arxiv_(url_pdf):
|
6 |
-
if 'arxiv.org' not in url_pdf:
|
7 |
-
if ('.' in url_pdf) and ('/' not in url_pdf):
|
8 |
-
new_url = 'https://arxiv.org/abs/'+url_pdf
|
9 |
-
print('下载编号:', url_pdf, '自动定位:', new_url)
|
10 |
-
# download_arxiv_(new_url)
|
11 |
-
return download_arxiv_(new_url)
|
12 |
-
else:
|
13 |
-
print('不能识别的URL!')
|
14 |
-
return None
|
15 |
-
if 'abs' in url_pdf:
|
16 |
-
url_pdf = url_pdf.replace('abs', 'pdf')
|
17 |
-
url_pdf = url_pdf + '.pdf'
|
18 |
-
|
19 |
-
url_abs = url_pdf.replace('.pdf', '').replace('pdf', 'abs')
|
20 |
-
title, other_info = get_name(_url_=url_abs)
|
21 |
-
|
22 |
-
paper_id = title.split()[0] # '[1712.00559]'
|
23 |
-
if '2' in other_info['year']:
|
24 |
-
title = other_info['year'] + ' ' + title
|
25 |
-
|
26 |
-
known_conf = ['NeurIPS', 'NIPS', 'Nature', 'Science', 'ICLR', 'AAAI']
|
27 |
-
for k in known_conf:
|
28 |
-
if k in other_info['comment']:
|
29 |
-
title = k + ' ' + title
|
30 |
-
|
31 |
-
download_dir = './gpt_log/arxiv/'
|
32 |
-
os.makedirs(download_dir, exist_ok=True)
|
33 |
-
|
34 |
-
title_str = title.replace('?', '?')\
|
35 |
-
.replace(':', ':')\
|
36 |
-
.replace('\"', '“')\
|
37 |
-
.replace('\n', '')\
|
38 |
-
.replace(' ', ' ')\
|
39 |
-
.replace(' ', ' ')
|
40 |
-
|
41 |
-
requests_pdf_url = url_pdf
|
42 |
-
file_path = download_dir+title_str
|
43 |
-
# if os.path.exists(file_path):
|
44 |
-
# print('返回缓存文件')
|
45 |
-
# return './gpt_log/arxiv/'+title_str
|
46 |
-
|
47 |
-
print('下载中')
|
48 |
-
proxies, = get_conf('proxies')
|
49 |
-
r = requests.get(requests_pdf_url, proxies=proxies)
|
50 |
-
with open(file_path, 'wb+') as f:
|
51 |
-
f.write(r.content)
|
52 |
-
print('下载完成')
|
53 |
-
|
54 |
-
# print('输出下载命令:','aria2c -o \"%s\" %s'%(title_str,url_pdf))
|
55 |
-
# subprocess.call('aria2c --all-proxy=\"172.18.116.150:11084\" -o \"%s\" %s'%(download_dir+title_str,url_pdf), shell=True)
|
56 |
-
|
57 |
-
x = "%s %s %s.bib" % (paper_id, other_info['year'], other_info['authors'])
|
58 |
-
x = x.replace('?', '?')\
|
59 |
-
.replace(':', ':')\
|
60 |
-
.replace('\"', '“')\
|
61 |
-
.replace('\n', '')\
|
62 |
-
.replace(' ', ' ')\
|
63 |
-
.replace(' ', ' ')
|
64 |
-
return './gpt_log/arxiv/'+title_str, other_info
|
65 |
-
|
66 |
-
|
67 |
-
def get_name(_url_):
|
68 |
-
import os
|
69 |
-
from bs4 import BeautifulSoup
|
70 |
-
print('正在获取文献名!')
|
71 |
-
print(_url_)
|
72 |
-
|
73 |
-
# arxiv_recall = {}
|
74 |
-
# if os.path.exists('./arxiv_recall.pkl'):
|
75 |
-
# with open('./arxiv_recall.pkl', 'rb') as f:
|
76 |
-
# arxiv_recall = pickle.load(f)
|
77 |
-
|
78 |
-
# if _url_ in arxiv_recall:
|
79 |
-
# print('在缓存中')
|
80 |
-
# return arxiv_recall[_url_]
|
81 |
-
|
82 |
-
proxies, = get_conf('proxies')
|
83 |
-
res = requests.get(_url_, proxies=proxies)
|
84 |
-
|
85 |
-
bs = BeautifulSoup(res.text, 'html.parser')
|
86 |
-
other_details = {}
|
87 |
-
|
88 |
-
# get year
|
89 |
-
try:
|
90 |
-
year = bs.find_all(class_='dateline')[0].text
|
91 |
-
year = re.search(r'(\d{4})', year, re.M | re.I).group(1)
|
92 |
-
other_details['year'] = year
|
93 |
-
abstract = bs.find_all(class_='abstract mathjax')[0].text
|
94 |
-
other_details['abstract'] = abstract
|
95 |
-
except:
|
96 |
-
other_details['year'] = ''
|
97 |
-
print('年份获取失败')
|
98 |
-
|
99 |
-
# get author
|
100 |
-
try:
|
101 |
-
authors = bs.find_all(class_='authors')[0].text
|
102 |
-
authors = authors.split('Authors:')[1]
|
103 |
-
other_details['authors'] = authors
|
104 |
-
except:
|
105 |
-
other_details['authors'] = ''
|
106 |
-
print('authors获取失败')
|
107 |
-
|
108 |
-
# get comment
|
109 |
-
try:
|
110 |
-
comment = bs.find_all(class_='metatable')[0].text
|
111 |
-
real_comment = None
|
112 |
-
for item in comment.replace('\n', ' ').split(' '):
|
113 |
-
if 'Comments' in item:
|
114 |
-
real_comment = item
|
115 |
-
if real_comment is not None:
|
116 |
-
other_details['comment'] = real_comment
|
117 |
-
else:
|
118 |
-
other_details['comment'] = ''
|
119 |
-
except:
|
120 |
-
other_details['comment'] = ''
|
121 |
-
print('年份获取失败')
|
122 |
-
|
123 |
-
title_str = BeautifulSoup(
|
124 |
-
res.text, 'html.parser').find('title').contents[0]
|
125 |
-
print('获取成功:', title_str)
|
126 |
-
# arxiv_recall[_url_] = (title_str+'.pdf', other_details)
|
127 |
-
# with open('./arxiv_recall.pkl', 'wb') as f:
|
128 |
-
# pickle.dump(arxiv_recall, f)
|
129 |
-
|
130 |
-
return title_str+'.pdf', other_details
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
@CatchException
|
135 |
-
def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
136 |
-
|
137 |
-
CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要,函数插件作者[binary-husky]。正在提取摘要并下载PDF文档……"
|
138 |
-
import glob
|
139 |
-
import os
|
140 |
-
|
141 |
-
# 基本信息:功能、贡献者
|
142 |
-
chatbot.append(["函数插件功能?", CRAZY_FUNCTION_INFO])
|
143 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
144 |
-
|
145 |
-
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
146 |
-
try:
|
147 |
-
import pdfminer, bs4
|
148 |
-
except:
|
149 |
-
report_execption(chatbot, history,
|
150 |
-
a = f"解析项目: {txt}",
|
151 |
-
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
|
152 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
153 |
-
return
|
154 |
-
|
155 |
-
# 清空历史,以免输入溢出
|
156 |
-
history = []
|
157 |
-
|
158 |
-
# 提取摘要,下载PDF文档
|
159 |
-
try:
|
160 |
-
pdf_path, info = download_arxiv_(txt)
|
161 |
-
except:
|
162 |
-
report_execption(chatbot, history,
|
163 |
-
a = f"解析项目: {txt}",
|
164 |
-
b = f"下载pdf文件未成功")
|
165 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
166 |
-
return
|
167 |
-
|
168 |
-
# 翻译摘要等
|
169 |
-
i_say = f"请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。材料如下:{str(info)}"
|
170 |
-
i_say_show_user = f'请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。论文:{pdf_path}'
|
171 |
-
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
172 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
173 |
-
msg = '正常'
|
174 |
-
# ** gpt request **
|
175 |
-
# 单线,获取文章meta信息
|
176 |
-
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
177 |
-
inputs=i_say,
|
178 |
-
inputs_show_user=i_say_show_user,
|
179 |
-
llm_kwargs=llm_kwargs,
|
180 |
-
chatbot=chatbot, history=[],
|
181 |
-
sys_prompt="Your job is to collect information from materials and translate to Chinese。",
|
182 |
-
)
|
183 |
-
|
184 |
-
chatbot[-1] = (i_say_show_user, gpt_say)
|
185 |
-
history.append(i_say_show_user); history.append(gpt_say)
|
186 |
-
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
187 |
-
# 写入文件
|
188 |
-
import shutil
|
189 |
-
# 重置文件的创建时间
|
190 |
-
shutil.copyfile(pdf_path, f'./gpt_log/{os.path.basename(pdf_path)}'); os.remove(pdf_path)
|
191 |
-
res = write_results_to_file(history)
|
192 |
-
chatbot.append(("完成了吗?", res + "\n\nPDF文件也已经下载"))
|
193 |
-
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
194 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/streams/stapled.py
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
from dataclasses import dataclass
|
4 |
-
from typing import Any, Callable, Generic, Mapping, Sequence, TypeVar
|
5 |
-
|
6 |
-
from ..abc import (
|
7 |
-
ByteReceiveStream,
|
8 |
-
ByteSendStream,
|
9 |
-
ByteStream,
|
10 |
-
Listener,
|
11 |
-
ObjectReceiveStream,
|
12 |
-
ObjectSendStream,
|
13 |
-
ObjectStream,
|
14 |
-
TaskGroup,
|
15 |
-
)
|
16 |
-
|
17 |
-
T_Item = TypeVar("T_Item")
|
18 |
-
T_Stream = TypeVar("T_Stream")
|
19 |
-
|
20 |
-
|
21 |
-
@dataclass(eq=False)
|
22 |
-
class StapledByteStream(ByteStream):
|
23 |
-
"""
|
24 |
-
Combines two byte streams into a single, bidirectional byte stream.
|
25 |
-
|
26 |
-
Extra attributes will be provided from both streams, with the receive stream providing the
|
27 |
-
values in case of a conflict.
|
28 |
-
|
29 |
-
:param ByteSendStream send_stream: the sending byte stream
|
30 |
-
:param ByteReceiveStream receive_stream: the receiving byte stream
|
31 |
-
"""
|
32 |
-
|
33 |
-
send_stream: ByteSendStream
|
34 |
-
receive_stream: ByteReceiveStream
|
35 |
-
|
36 |
-
async def receive(self, max_bytes: int = 65536) -> bytes:
|
37 |
-
return await self.receive_stream.receive(max_bytes)
|
38 |
-
|
39 |
-
async def send(self, item: bytes) -> None:
|
40 |
-
await self.send_stream.send(item)
|
41 |
-
|
42 |
-
async def send_eof(self) -> None:
|
43 |
-
await self.send_stream.aclose()
|
44 |
-
|
45 |
-
async def aclose(self) -> None:
|
46 |
-
await self.send_stream.aclose()
|
47 |
-
await self.receive_stream.aclose()
|
48 |
-
|
49 |
-
@property
|
50 |
-
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
51 |
-
return {
|
52 |
-
**self.send_stream.extra_attributes,
|
53 |
-
**self.receive_stream.extra_attributes,
|
54 |
-
}
|
55 |
-
|
56 |
-
|
57 |
-
@dataclass(eq=False)
|
58 |
-
class StapledObjectStream(Generic[T_Item], ObjectStream[T_Item]):
|
59 |
-
"""
|
60 |
-
Combines two object streams into a single, bidirectional object stream.
|
61 |
-
|
62 |
-
Extra attributes will be provided from both streams, with the receive stream providing the
|
63 |
-
values in case of a conflict.
|
64 |
-
|
65 |
-
:param ObjectSendStream send_stream: the sending object stream
|
66 |
-
:param ObjectReceiveStream receive_stream: the receiving object stream
|
67 |
-
"""
|
68 |
-
|
69 |
-
send_stream: ObjectSendStream[T_Item]
|
70 |
-
receive_stream: ObjectReceiveStream[T_Item]
|
71 |
-
|
72 |
-
async def receive(self) -> T_Item:
|
73 |
-
return await self.receive_stream.receive()
|
74 |
-
|
75 |
-
async def send(self, item: T_Item) -> None:
|
76 |
-
await self.send_stream.send(item)
|
77 |
-
|
78 |
-
async def send_eof(self) -> None:
|
79 |
-
await self.send_stream.aclose()
|
80 |
-
|
81 |
-
async def aclose(self) -> None:
|
82 |
-
await self.send_stream.aclose()
|
83 |
-
await self.receive_stream.aclose()
|
84 |
-
|
85 |
-
@property
|
86 |
-
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
87 |
-
return {
|
88 |
-
**self.send_stream.extra_attributes,
|
89 |
-
**self.receive_stream.extra_attributes,
|
90 |
-
}
|
91 |
-
|
92 |
-
|
93 |
-
@dataclass(eq=False)
|
94 |
-
class MultiListener(Generic[T_Stream], Listener[T_Stream]):
|
95 |
-
"""
|
96 |
-
Combines multiple listeners into one, serving connections from all of them at once.
|
97 |
-
|
98 |
-
Any MultiListeners in the given collection of listeners will have their listeners moved into
|
99 |
-
this one.
|
100 |
-
|
101 |
-
Extra attributes are provided from each listener, with each successive listener overriding any
|
102 |
-
conflicting attributes from the previous one.
|
103 |
-
|
104 |
-
:param listeners: listeners to serve
|
105 |
-
:type listeners: Sequence[Listener[T_Stream]]
|
106 |
-
"""
|
107 |
-
|
108 |
-
listeners: Sequence[Listener[T_Stream]]
|
109 |
-
|
110 |
-
def __post_init__(self) -> None:
|
111 |
-
listeners: list[Listener[T_Stream]] = []
|
112 |
-
for listener in self.listeners:
|
113 |
-
if isinstance(listener, MultiListener):
|
114 |
-
listeners.extend(listener.listeners)
|
115 |
-
del listener.listeners[:] # type: ignore[attr-defined]
|
116 |
-
else:
|
117 |
-
listeners.append(listener)
|
118 |
-
|
119 |
-
self.listeners = listeners
|
120 |
-
|
121 |
-
async def serve(
|
122 |
-
self, handler: Callable[[T_Stream], Any], task_group: TaskGroup | None = None
|
123 |
-
) -> None:
|
124 |
-
from .. import create_task_group
|
125 |
-
|
126 |
-
async with create_task_group() as tg:
|
127 |
-
for listener in self.listeners:
|
128 |
-
tg.start_soon(listener.serve, handler, task_group)
|
129 |
-
|
130 |
-
async def aclose(self) -> None:
|
131 |
-
for listener in self.listeners:
|
132 |
-
await listener.aclose()
|
133 |
-
|
134 |
-
@property
|
135 |
-
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
136 |
-
attributes: dict = {}
|
137 |
-
for listener in self.listeners:
|
138 |
-
attributes.update(listener.extra_attributes)
|
139 |
-
|
140 |
-
return attributes
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_auth.py
DELETED
@@ -1,347 +0,0 @@
|
|
1 |
-
import hashlib
|
2 |
-
import netrc
|
3 |
-
import os
|
4 |
-
import re
|
5 |
-
import time
|
6 |
-
import typing
|
7 |
-
from base64 import b64encode
|
8 |
-
from urllib.request import parse_http_list
|
9 |
-
|
10 |
-
from ._exceptions import ProtocolError
|
11 |
-
from ._models import Request, Response
|
12 |
-
from ._utils import to_bytes, to_str, unquote
|
13 |
-
|
14 |
-
if typing.TYPE_CHECKING: # pragma: no cover
|
15 |
-
from hashlib import _Hash
|
16 |
-
|
17 |
-
|
18 |
-
class Auth:
|
19 |
-
"""
|
20 |
-
Base class for all authentication schemes.
|
21 |
-
|
22 |
-
To implement a custom authentication scheme, subclass `Auth` and override
|
23 |
-
the `.auth_flow()` method.
|
24 |
-
|
25 |
-
If the authentication scheme does I/O such as disk access or network calls, or uses
|
26 |
-
synchronization primitives such as locks, you should override `.sync_auth_flow()`
|
27 |
-
and/or `.async_auth_flow()` instead of `.auth_flow()` to provide specialized
|
28 |
-
implementations that will be used by `Client` and `AsyncClient` respectively.
|
29 |
-
"""
|
30 |
-
|
31 |
-
requires_request_body = False
|
32 |
-
requires_response_body = False
|
33 |
-
|
34 |
-
def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:
|
35 |
-
"""
|
36 |
-
Execute the authentication flow.
|
37 |
-
|
38 |
-
To dispatch a request, `yield` it:
|
39 |
-
|
40 |
-
```
|
41 |
-
yield request
|
42 |
-
```
|
43 |
-
|
44 |
-
The client will `.send()` the response back into the flow generator. You can
|
45 |
-
access it like so:
|
46 |
-
|
47 |
-
```
|
48 |
-
response = yield request
|
49 |
-
```
|
50 |
-
|
51 |
-
A `return` (or reaching the end of the generator) will result in the
|
52 |
-
client returning the last response obtained from the server.
|
53 |
-
|
54 |
-
You can dispatch as many requests as is necessary.
|
55 |
-
"""
|
56 |
-
yield request
|
57 |
-
|
58 |
-
def sync_auth_flow(
|
59 |
-
self, request: Request
|
60 |
-
) -> typing.Generator[Request, Response, None]:
|
61 |
-
"""
|
62 |
-
Execute the authentication flow synchronously.
|
63 |
-
|
64 |
-
By default, this defers to `.auth_flow()`. You should override this method
|
65 |
-
when the authentication scheme does I/O and/or uses concurrency primitives.
|
66 |
-
"""
|
67 |
-
if self.requires_request_body:
|
68 |
-
request.read()
|
69 |
-
|
70 |
-
flow = self.auth_flow(request)
|
71 |
-
request = next(flow)
|
72 |
-
|
73 |
-
while True:
|
74 |
-
response = yield request
|
75 |
-
if self.requires_response_body:
|
76 |
-
response.read()
|
77 |
-
|
78 |
-
try:
|
79 |
-
request = flow.send(response)
|
80 |
-
except StopIteration:
|
81 |
-
break
|
82 |
-
|
83 |
-
async def async_auth_flow(
|
84 |
-
self, request: Request
|
85 |
-
) -> typing.AsyncGenerator[Request, Response]:
|
86 |
-
"""
|
87 |
-
Execute the authentication flow asynchronously.
|
88 |
-
|
89 |
-
By default, this defers to `.auth_flow()`. You should override this method
|
90 |
-
when the authentication scheme does I/O and/or uses concurrency primitives.
|
91 |
-
"""
|
92 |
-
if self.requires_request_body:
|
93 |
-
await request.aread()
|
94 |
-
|
95 |
-
flow = self.auth_flow(request)
|
96 |
-
request = next(flow)
|
97 |
-
|
98 |
-
while True:
|
99 |
-
response = yield request
|
100 |
-
if self.requires_response_body:
|
101 |
-
await response.aread()
|
102 |
-
|
103 |
-
try:
|
104 |
-
request = flow.send(response)
|
105 |
-
except StopIteration:
|
106 |
-
break
|
107 |
-
|
108 |
-
|
109 |
-
class FunctionAuth(Auth):
|
110 |
-
"""
|
111 |
-
Allows the 'auth' argument to be passed as a simple callable function,
|
112 |
-
that takes the request, and returns a new, modified request.
|
113 |
-
"""
|
114 |
-
|
115 |
-
def __init__(self, func: typing.Callable[[Request], Request]) -> None:
|
116 |
-
self._func = func
|
117 |
-
|
118 |
-
def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:
|
119 |
-
yield self._func(request)
|
120 |
-
|
121 |
-
|
122 |
-
class BasicAuth(Auth):
|
123 |
-
"""
|
124 |
-
Allows the 'auth' argument to be passed as a (username, password) pair,
|
125 |
-
and uses HTTP Basic authentication.
|
126 |
-
"""
|
127 |
-
|
128 |
-
def __init__(
|
129 |
-
self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]
|
130 |
-
):
|
131 |
-
self._auth_header = self._build_auth_header(username, password)
|
132 |
-
|
133 |
-
def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:
|
134 |
-
request.headers["Authorization"] = self._auth_header
|
135 |
-
yield request
|
136 |
-
|
137 |
-
def _build_auth_header(
|
138 |
-
self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]
|
139 |
-
) -> str:
|
140 |
-
userpass = b":".join((to_bytes(username), to_bytes(password)))
|
141 |
-
token = b64encode(userpass).decode()
|
142 |
-
return f"Basic {token}"
|
143 |
-
|
144 |
-
|
145 |
-
class NetRCAuth(Auth):
|
146 |
-
"""
|
147 |
-
Use a 'netrc' file to lookup basic auth credentials based on the url host.
|
148 |
-
"""
|
149 |
-
|
150 |
-
def __init__(self, file: typing.Optional[str] = None):
|
151 |
-
self._netrc_info = netrc.netrc(file)
|
152 |
-
|
153 |
-
def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:
|
154 |
-
auth_info = self._netrc_info.authenticators(request.url.host)
|
155 |
-
if auth_info is None or not auth_info[2]:
|
156 |
-
# The netrc file did not have authentication credentials for this host.
|
157 |
-
yield request
|
158 |
-
else:
|
159 |
-
# Build a basic auth header with credentials from the netrc file.
|
160 |
-
request.headers["Authorization"] = self._build_auth_header(
|
161 |
-
username=auth_info[0], password=auth_info[2]
|
162 |
-
)
|
163 |
-
yield request
|
164 |
-
|
165 |
-
def _build_auth_header(
|
166 |
-
self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]
|
167 |
-
) -> str:
|
168 |
-
userpass = b":".join((to_bytes(username), to_bytes(password)))
|
169 |
-
token = b64encode(userpass).decode()
|
170 |
-
return f"Basic {token}"
|
171 |
-
|
172 |
-
|
173 |
-
class DigestAuth(Auth):
|
174 |
-
_ALGORITHM_TO_HASH_FUNCTION: typing.Dict[str, typing.Callable[[bytes], "_Hash"]] = {
|
175 |
-
"MD5": hashlib.md5,
|
176 |
-
"MD5-SESS": hashlib.md5,
|
177 |
-
"SHA": hashlib.sha1,
|
178 |
-
"SHA-SESS": hashlib.sha1,
|
179 |
-
"SHA-256": hashlib.sha256,
|
180 |
-
"SHA-256-SESS": hashlib.sha256,
|
181 |
-
"SHA-512": hashlib.sha512,
|
182 |
-
"SHA-512-SESS": hashlib.sha512,
|
183 |
-
}
|
184 |
-
|
185 |
-
def __init__(
|
186 |
-
self, username: typing.Union[str, bytes], password: typing.Union[str, bytes]
|
187 |
-
) -> None:
|
188 |
-
self._username = to_bytes(username)
|
189 |
-
self._password = to_bytes(password)
|
190 |
-
self._last_challenge: typing.Optional[_DigestAuthChallenge] = None
|
191 |
-
self._nonce_count = 1
|
192 |
-
|
193 |
-
def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:
|
194 |
-
if self._last_challenge:
|
195 |
-
request.headers["Authorization"] = self._build_auth_header(
|
196 |
-
request, self._last_challenge
|
197 |
-
)
|
198 |
-
|
199 |
-
response = yield request
|
200 |
-
|
201 |
-
if response.status_code != 401 or "www-authenticate" not in response.headers:
|
202 |
-
# If the response is not a 401 then we don't
|
203 |
-
# need to build an authenticated request.
|
204 |
-
return
|
205 |
-
|
206 |
-
for auth_header in response.headers.get_list("www-authenticate"):
|
207 |
-
if auth_header.lower().startswith("digest "):
|
208 |
-
break
|
209 |
-
else:
|
210 |
-
# If the response does not include a 'WWW-Authenticate: Digest ...'
|
211 |
-
# header, then we don't need to build an authenticated request.
|
212 |
-
return
|
213 |
-
|
214 |
-
self._last_challenge = self._parse_challenge(request, response, auth_header)
|
215 |
-
self._nonce_count = 1
|
216 |
-
|
217 |
-
request.headers["Authorization"] = self._build_auth_header(
|
218 |
-
request, self._last_challenge
|
219 |
-
)
|
220 |
-
yield request
|
221 |
-
|
222 |
-
def _parse_challenge(
|
223 |
-
self, request: Request, response: Response, auth_header: str
|
224 |
-
) -> "_DigestAuthChallenge":
|
225 |
-
"""
|
226 |
-
Returns a challenge from a Digest WWW-Authenticate header.
|
227 |
-
These take the form of:
|
228 |
-
`Digest realm="[email protected]",qop="auth,auth-int",nonce="abc",opaque="xyz"`
|
229 |
-
"""
|
230 |
-
scheme, _, fields = auth_header.partition(" ")
|
231 |
-
|
232 |
-
# This method should only ever have been called with a Digest auth header.
|
233 |
-
assert scheme.lower() == "digest"
|
234 |
-
|
235 |
-
header_dict: typing.Dict[str, str] = {}
|
236 |
-
for field in parse_http_list(fields):
|
237 |
-
key, value = field.strip().split("=", 1)
|
238 |
-
header_dict[key] = unquote(value)
|
239 |
-
|
240 |
-
try:
|
241 |
-
realm = header_dict["realm"].encode()
|
242 |
-
nonce = header_dict["nonce"].encode()
|
243 |
-
algorithm = header_dict.get("algorithm", "MD5")
|
244 |
-
opaque = header_dict["opaque"].encode() if "opaque" in header_dict else None
|
245 |
-
qop = header_dict["qop"].encode() if "qop" in header_dict else None
|
246 |
-
return _DigestAuthChallenge(
|
247 |
-
realm=realm, nonce=nonce, algorithm=algorithm, opaque=opaque, qop=qop
|
248 |
-
)
|
249 |
-
except KeyError as exc:
|
250 |
-
message = "Malformed Digest WWW-Authenticate header"
|
251 |
-
raise ProtocolError(message, request=request) from exc
|
252 |
-
|
253 |
-
def _build_auth_header(
|
254 |
-
self, request: Request, challenge: "_DigestAuthChallenge"
|
255 |
-
) -> str:
|
256 |
-
hash_func = self._ALGORITHM_TO_HASH_FUNCTION[challenge.algorithm.upper()]
|
257 |
-
|
258 |
-
def digest(data: bytes) -> bytes:
|
259 |
-
return hash_func(data).hexdigest().encode()
|
260 |
-
|
261 |
-
A1 = b":".join((self._username, challenge.realm, self._password))
|
262 |
-
|
263 |
-
path = request.url.raw_path
|
264 |
-
A2 = b":".join((request.method.encode(), path))
|
265 |
-
# TODO: implement auth-int
|
266 |
-
HA2 = digest(A2)
|
267 |
-
|
268 |
-
nc_value = b"%08x" % self._nonce_count
|
269 |
-
cnonce = self._get_client_nonce(self._nonce_count, challenge.nonce)
|
270 |
-
self._nonce_count += 1
|
271 |
-
|
272 |
-
HA1 = digest(A1)
|
273 |
-
if challenge.algorithm.lower().endswith("-sess"):
|
274 |
-
HA1 = digest(b":".join((HA1, challenge.nonce, cnonce)))
|
275 |
-
|
276 |
-
qop = self._resolve_qop(challenge.qop, request=request)
|
277 |
-
if qop is None:
|
278 |
-
digest_data = [HA1, challenge.nonce, HA2]
|
279 |
-
else:
|
280 |
-
digest_data = [challenge.nonce, nc_value, cnonce, qop, HA2]
|
281 |
-
key_digest = b":".join(digest_data)
|
282 |
-
|
283 |
-
format_args = {
|
284 |
-
"username": self._username,
|
285 |
-
"realm": challenge.realm,
|
286 |
-
"nonce": challenge.nonce,
|
287 |
-
"uri": path,
|
288 |
-
"response": digest(b":".join((HA1, key_digest))),
|
289 |
-
"algorithm": challenge.algorithm.encode(),
|
290 |
-
}
|
291 |
-
if challenge.opaque:
|
292 |
-
format_args["opaque"] = challenge.opaque
|
293 |
-
if qop:
|
294 |
-
format_args["qop"] = b"auth"
|
295 |
-
format_args["nc"] = nc_value
|
296 |
-
format_args["cnonce"] = cnonce
|
297 |
-
|
298 |
-
return "Digest " + self._get_header_value(format_args)
|
299 |
-
|
300 |
-
def _get_client_nonce(self, nonce_count: int, nonce: bytes) -> bytes:
|
301 |
-
s = str(nonce_count).encode()
|
302 |
-
s += nonce
|
303 |
-
s += time.ctime().encode()
|
304 |
-
s += os.urandom(8)
|
305 |
-
|
306 |
-
return hashlib.sha1(s).hexdigest()[:16].encode()
|
307 |
-
|
308 |
-
def _get_header_value(self, header_fields: typing.Dict[str, bytes]) -> str:
|
309 |
-
NON_QUOTED_FIELDS = ("algorithm", "qop", "nc")
|
310 |
-
QUOTED_TEMPLATE = '{}="{}"'
|
311 |
-
NON_QUOTED_TEMPLATE = "{}={}"
|
312 |
-
|
313 |
-
header_value = ""
|
314 |
-
for i, (field, value) in enumerate(header_fields.items()):
|
315 |
-
if i > 0:
|
316 |
-
header_value += ", "
|
317 |
-
template = (
|
318 |
-
QUOTED_TEMPLATE
|
319 |
-
if field not in NON_QUOTED_FIELDS
|
320 |
-
else NON_QUOTED_TEMPLATE
|
321 |
-
)
|
322 |
-
header_value += template.format(field, to_str(value))
|
323 |
-
|
324 |
-
return header_value
|
325 |
-
|
326 |
-
def _resolve_qop(
|
327 |
-
self, qop: typing.Optional[bytes], request: Request
|
328 |
-
) -> typing.Optional[bytes]:
|
329 |
-
if qop is None:
|
330 |
-
return None
|
331 |
-
qops = re.split(b", ?", qop)
|
332 |
-
if b"auth" in qops:
|
333 |
-
return b"auth"
|
334 |
-
|
335 |
-
if qops == [b"auth-int"]:
|
336 |
-
raise NotImplementedError("Digest auth-int support is not yet implemented")
|
337 |
-
|
338 |
-
message = f'Unexpected qop value "{qop!r}" in digest auth'
|
339 |
-
raise ProtocolError(message, request=request)
|
340 |
-
|
341 |
-
|
342 |
-
class _DigestAuthChallenge(typing.NamedTuple):
|
343 |
-
realm: bytes
|
344 |
-
nonce: bytes
|
345 |
-
algorithm: str
|
346 |
-
opaque: typing.Optional[bytes]
|
347 |
-
qop: typing.Optional[bytes]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dantra1/CeliaSensei/attentions.py
DELETED
@@ -1,300 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
from torch.nn import functional as F
|
5 |
-
|
6 |
-
import commons
|
7 |
-
from modules import LayerNorm
|
8 |
-
|
9 |
-
|
10 |
-
class Encoder(nn.Module):
|
11 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
|
12 |
-
super().__init__()
|
13 |
-
self.hidden_channels = hidden_channels
|
14 |
-
self.filter_channels = filter_channels
|
15 |
-
self.n_heads = n_heads
|
16 |
-
self.n_layers = n_layers
|
17 |
-
self.kernel_size = kernel_size
|
18 |
-
self.p_dropout = p_dropout
|
19 |
-
self.window_size = window_size
|
20 |
-
|
21 |
-
self.drop = nn.Dropout(p_dropout)
|
22 |
-
self.attn_layers = nn.ModuleList()
|
23 |
-
self.norm_layers_1 = nn.ModuleList()
|
24 |
-
self.ffn_layers = nn.ModuleList()
|
25 |
-
self.norm_layers_2 = nn.ModuleList()
|
26 |
-
for i in range(self.n_layers):
|
27 |
-
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
|
28 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
29 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
30 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
31 |
-
|
32 |
-
def forward(self, x, x_mask):
|
33 |
-
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
34 |
-
x = x * x_mask
|
35 |
-
for i in range(self.n_layers):
|
36 |
-
y = self.attn_layers[i](x, x, attn_mask)
|
37 |
-
y = self.drop(y)
|
38 |
-
x = self.norm_layers_1[i](x + y)
|
39 |
-
|
40 |
-
y = self.ffn_layers[i](x, x_mask)
|
41 |
-
y = self.drop(y)
|
42 |
-
x = self.norm_layers_2[i](x + y)
|
43 |
-
x = x * x_mask
|
44 |
-
return x
|
45 |
-
|
46 |
-
|
47 |
-
class Decoder(nn.Module):
|
48 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
|
49 |
-
super().__init__()
|
50 |
-
self.hidden_channels = hidden_channels
|
51 |
-
self.filter_channels = filter_channels
|
52 |
-
self.n_heads = n_heads
|
53 |
-
self.n_layers = n_layers
|
54 |
-
self.kernel_size = kernel_size
|
55 |
-
self.p_dropout = p_dropout
|
56 |
-
self.proximal_bias = proximal_bias
|
57 |
-
self.proximal_init = proximal_init
|
58 |
-
|
59 |
-
self.drop = nn.Dropout(p_dropout)
|
60 |
-
self.self_attn_layers = nn.ModuleList()
|
61 |
-
self.norm_layers_0 = nn.ModuleList()
|
62 |
-
self.encdec_attn_layers = nn.ModuleList()
|
63 |
-
self.norm_layers_1 = nn.ModuleList()
|
64 |
-
self.ffn_layers = nn.ModuleList()
|
65 |
-
self.norm_layers_2 = nn.ModuleList()
|
66 |
-
for i in range(self.n_layers):
|
67 |
-
self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
|
68 |
-
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
69 |
-
self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
|
70 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
71 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
|
72 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
73 |
-
|
74 |
-
def forward(self, x, x_mask, h, h_mask):
|
75 |
-
"""
|
76 |
-
x: decoder input
|
77 |
-
h: encoder output
|
78 |
-
"""
|
79 |
-
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
|
80 |
-
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
81 |
-
x = x * x_mask
|
82 |
-
for i in range(self.n_layers):
|
83 |
-
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
84 |
-
y = self.drop(y)
|
85 |
-
x = self.norm_layers_0[i](x + y)
|
86 |
-
|
87 |
-
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
88 |
-
y = self.drop(y)
|
89 |
-
x = self.norm_layers_1[i](x + y)
|
90 |
-
|
91 |
-
y = self.ffn_layers[i](x, x_mask)
|
92 |
-
y = self.drop(y)
|
93 |
-
x = self.norm_layers_2[i](x + y)
|
94 |
-
x = x * x_mask
|
95 |
-
return x
|
96 |
-
|
97 |
-
|
98 |
-
class MultiHeadAttention(nn.Module):
|
99 |
-
def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
|
100 |
-
super().__init__()
|
101 |
-
assert channels % n_heads == 0
|
102 |
-
|
103 |
-
self.channels = channels
|
104 |
-
self.out_channels = out_channels
|
105 |
-
self.n_heads = n_heads
|
106 |
-
self.p_dropout = p_dropout
|
107 |
-
self.window_size = window_size
|
108 |
-
self.heads_share = heads_share
|
109 |
-
self.block_length = block_length
|
110 |
-
self.proximal_bias = proximal_bias
|
111 |
-
self.proximal_init = proximal_init
|
112 |
-
self.attn = None
|
113 |
-
|
114 |
-
self.k_channels = channels // n_heads
|
115 |
-
self.conv_q = nn.Conv1d(channels, channels, 1)
|
116 |
-
self.conv_k = nn.Conv1d(channels, channels, 1)
|
117 |
-
self.conv_v = nn.Conv1d(channels, channels, 1)
|
118 |
-
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
119 |
-
self.drop = nn.Dropout(p_dropout)
|
120 |
-
|
121 |
-
if window_size is not None:
|
122 |
-
n_heads_rel = 1 if heads_share else n_heads
|
123 |
-
rel_stddev = self.k_channels**-0.5
|
124 |
-
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
125 |
-
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
126 |
-
|
127 |
-
nn.init.xavier_uniform_(self.conv_q.weight)
|
128 |
-
nn.init.xavier_uniform_(self.conv_k.weight)
|
129 |
-
nn.init.xavier_uniform_(self.conv_v.weight)
|
130 |
-
if proximal_init:
|
131 |
-
with torch.no_grad():
|
132 |
-
self.conv_k.weight.copy_(self.conv_q.weight)
|
133 |
-
self.conv_k.bias.copy_(self.conv_q.bias)
|
134 |
-
|
135 |
-
def forward(self, x, c, attn_mask=None):
|
136 |
-
q = self.conv_q(x)
|
137 |
-
k = self.conv_k(c)
|
138 |
-
v = self.conv_v(c)
|
139 |
-
|
140 |
-
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
141 |
-
|
142 |
-
x = self.conv_o(x)
|
143 |
-
return x
|
144 |
-
|
145 |
-
def attention(self, query, key, value, mask=None):
|
146 |
-
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
147 |
-
b, d, t_s, t_t = (*key.size(), query.size(2))
|
148 |
-
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
149 |
-
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
150 |
-
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
151 |
-
|
152 |
-
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
153 |
-
if self.window_size is not None:
|
154 |
-
assert t_s == t_t, "Relative attention is only available for self-attention."
|
155 |
-
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
156 |
-
rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
|
157 |
-
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
158 |
-
scores = scores + scores_local
|
159 |
-
if self.proximal_bias:
|
160 |
-
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
161 |
-
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
162 |
-
if mask is not None:
|
163 |
-
scores = scores.masked_fill(mask == 0, -1e4)
|
164 |
-
if self.block_length is not None:
|
165 |
-
assert t_s == t_t, "Local attention is only available for self-attention."
|
166 |
-
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
|
167 |
-
scores = scores.masked_fill(block_mask == 0, -1e4)
|
168 |
-
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
169 |
-
p_attn = self.drop(p_attn)
|
170 |
-
output = torch.matmul(p_attn, value)
|
171 |
-
if self.window_size is not None:
|
172 |
-
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
173 |
-
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
|
174 |
-
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
|
175 |
-
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
176 |
-
return output, p_attn
|
177 |
-
|
178 |
-
def _matmul_with_relative_values(self, x, y):
|
179 |
-
"""
|
180 |
-
x: [b, h, l, m]
|
181 |
-
y: [h or 1, m, d]
|
182 |
-
ret: [b, h, l, d]
|
183 |
-
"""
|
184 |
-
ret = torch.matmul(x, y.unsqueeze(0))
|
185 |
-
return ret
|
186 |
-
|
187 |
-
def _matmul_with_relative_keys(self, x, y):
|
188 |
-
"""
|
189 |
-
x: [b, h, l, d]
|
190 |
-
y: [h or 1, m, d]
|
191 |
-
ret: [b, h, l, m]
|
192 |
-
"""
|
193 |
-
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
194 |
-
return ret
|
195 |
-
|
196 |
-
def _get_relative_embeddings(self, relative_embeddings, length):
|
197 |
-
max_relative_position = 2 * self.window_size + 1
|
198 |
-
# Pad first before slice to avoid using cond ops.
|
199 |
-
pad_length = max(length - (self.window_size + 1), 0)
|
200 |
-
slice_start_position = max((self.window_size + 1) - length, 0)
|
201 |
-
slice_end_position = slice_start_position + 2 * length - 1
|
202 |
-
if pad_length > 0:
|
203 |
-
padded_relative_embeddings = F.pad(
|
204 |
-
relative_embeddings,
|
205 |
-
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
|
206 |
-
else:
|
207 |
-
padded_relative_embeddings = relative_embeddings
|
208 |
-
used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
|
209 |
-
return used_relative_embeddings
|
210 |
-
|
211 |
-
def _relative_position_to_absolute_position(self, x):
|
212 |
-
"""
|
213 |
-
x: [b, h, l, 2*l-1]
|
214 |
-
ret: [b, h, l, l]
|
215 |
-
"""
|
216 |
-
batch, heads, length, _ = x.size()
|
217 |
-
# Concat columns of pad to shift from relative to absolute indexing.
|
218 |
-
x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
|
219 |
-
|
220 |
-
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
221 |
-
x_flat = x.view([batch, heads, length * 2 * length])
|
222 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
|
223 |
-
|
224 |
-
# Reshape and slice out the padded elements.
|
225 |
-
x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
|
226 |
-
return x_final
|
227 |
-
|
228 |
-
def _absolute_position_to_relative_position(self, x):
|
229 |
-
"""
|
230 |
-
x: [b, h, l, l]
|
231 |
-
ret: [b, h, l, 2*l-1]
|
232 |
-
"""
|
233 |
-
batch, heads, length, _ = x.size()
|
234 |
-
# padd along column
|
235 |
-
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
|
236 |
-
x_flat = x.view([batch, heads, length**2 + length*(length -1)])
|
237 |
-
# add 0's in the beginning that will skew the elements after reshape
|
238 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
239 |
-
x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
|
240 |
-
return x_final
|
241 |
-
|
242 |
-
def _attention_bias_proximal(self, length):
|
243 |
-
"""Bias for self-attention to encourage attention to close positions.
|
244 |
-
Args:
|
245 |
-
length: an integer scalar.
|
246 |
-
Returns:
|
247 |
-
a Tensor with shape [1, 1, length, length]
|
248 |
-
"""
|
249 |
-
r = torch.arange(length, dtype=torch.float32)
|
250 |
-
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
251 |
-
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
252 |
-
|
253 |
-
|
254 |
-
class FFN(nn.Module):
|
255 |
-
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
|
256 |
-
super().__init__()
|
257 |
-
self.in_channels = in_channels
|
258 |
-
self.out_channels = out_channels
|
259 |
-
self.filter_channels = filter_channels
|
260 |
-
self.kernel_size = kernel_size
|
261 |
-
self.p_dropout = p_dropout
|
262 |
-
self.activation = activation
|
263 |
-
self.causal = causal
|
264 |
-
|
265 |
-
if causal:
|
266 |
-
self.padding = self._causal_padding
|
267 |
-
else:
|
268 |
-
self.padding = self._same_padding
|
269 |
-
|
270 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
271 |
-
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
272 |
-
self.drop = nn.Dropout(p_dropout)
|
273 |
-
|
274 |
-
def forward(self, x, x_mask):
|
275 |
-
x = self.conv_1(self.padding(x * x_mask))
|
276 |
-
if self.activation == "gelu":
|
277 |
-
x = x * torch.sigmoid(1.702 * x)
|
278 |
-
else:
|
279 |
-
x = torch.relu(x)
|
280 |
-
x = self.drop(x)
|
281 |
-
x = self.conv_2(self.padding(x * x_mask))
|
282 |
-
return x * x_mask
|
283 |
-
|
284 |
-
def _causal_padding(self, x):
|
285 |
-
if self.kernel_size == 1:
|
286 |
-
return x
|
287 |
-
pad_l = self.kernel_size - 1
|
288 |
-
pad_r = 0
|
289 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
290 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
291 |
-
return x
|
292 |
-
|
293 |
-
def _same_padding(self, x):
|
294 |
-
if self.kernel_size == 1:
|
295 |
-
return x
|
296 |
-
pad_l = (self.kernel_size - 1) // 2
|
297 |
-
pad_r = self.kernel_size // 2
|
298 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
299 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
300 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dauzy/whisper-webui/src/whisper/fasterWhisperContainer.py
DELETED
@@ -1,207 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from typing import List, Union
|
3 |
-
|
4 |
-
from faster_whisper import WhisperModel, download_model
|
5 |
-
from src.config import ModelConfig, VadInitialPromptMode
|
6 |
-
from src.hooks.progressListener import ProgressListener
|
7 |
-
from src.languages import get_language_from_name
|
8 |
-
from src.modelCache import ModelCache
|
9 |
-
from src.prompts.abstractPromptStrategy import AbstractPromptStrategy
|
10 |
-
from src.whisper.abstractWhisperContainer import AbstractWhisperCallback, AbstractWhisperContainer
|
11 |
-
from src.utils import format_timestamp
|
12 |
-
|
13 |
-
class FasterWhisperContainer(AbstractWhisperContainer):
|
14 |
-
def __init__(self, model_name: str, device: str = None, compute_type: str = "float16",
|
15 |
-
download_root: str = None,
|
16 |
-
cache: ModelCache = None, models: List[ModelConfig] = []):
|
17 |
-
super().__init__(model_name, device, compute_type, download_root, cache, models)
|
18 |
-
|
19 |
-
def ensure_downloaded(self):
|
20 |
-
"""
|
21 |
-
Ensure that the model is downloaded. This is useful if you want to ensure that the model is downloaded before
|
22 |
-
passing the container to a subprocess.
|
23 |
-
"""
|
24 |
-
model_config = self._get_model_config()
|
25 |
-
|
26 |
-
if os.path.isdir(model_config.url):
|
27 |
-
model_config.path = model_config.url
|
28 |
-
else:
|
29 |
-
model_config.path = download_model(model_config.url, output_dir=self.download_root)
|
30 |
-
|
31 |
-
def _get_model_config(self) -> ModelConfig:
|
32 |
-
"""
|
33 |
-
Get the model configuration for the model.
|
34 |
-
"""
|
35 |
-
for model in self.models:
|
36 |
-
if model.name == self.model_name:
|
37 |
-
return model
|
38 |
-
return None
|
39 |
-
|
40 |
-
def _create_model(self):
|
41 |
-
print("Loading faster whisper model " + self.model_name + " for device " + str(self.device))
|
42 |
-
model_config = self._get_model_config()
|
43 |
-
model_url = model_config.url
|
44 |
-
|
45 |
-
if model_config.type == "whisper":
|
46 |
-
if model_url not in ["tiny", "base", "small", "medium", "large", "large-v1", "large-v2"]:
|
47 |
-
raise Exception("FasterWhisperContainer does not yet support Whisper models. Use ct2-transformers-converter to convert the model to a faster-whisper model.")
|
48 |
-
if model_url == "large":
|
49 |
-
# large is an alias for large-v1
|
50 |
-
model_url = "large-v1"
|
51 |
-
|
52 |
-
device = self.device
|
53 |
-
|
54 |
-
if (device is None):
|
55 |
-
device = "auto"
|
56 |
-
|
57 |
-
model = WhisperModel(model_url, device=device, compute_type=self.compute_type)
|
58 |
-
return model
|
59 |
-
|
60 |
-
def create_callback(self, language: str = None, task: str = None,
|
61 |
-
prompt_strategy: AbstractPromptStrategy = None,
|
62 |
-
**decodeOptions: dict) -> AbstractWhisperCallback:
|
63 |
-
"""
|
64 |
-
Create a WhisperCallback object that can be used to transcript audio files.
|
65 |
-
|
66 |
-
Parameters
|
67 |
-
----------
|
68 |
-
language: str
|
69 |
-
The target language of the transcription. If not specified, the language will be inferred from the audio content.
|
70 |
-
task: str
|
71 |
-
The task - either translate or transcribe.
|
72 |
-
prompt_strategy: AbstractPromptStrategy
|
73 |
-
The prompt strategy to use. If not specified, the prompt from Whisper will be used.
|
74 |
-
decodeOptions: dict
|
75 |
-
Additional options to pass to the decoder. Must be pickleable.
|
76 |
-
|
77 |
-
Returns
|
78 |
-
-------
|
79 |
-
A WhisperCallback object.
|
80 |
-
"""
|
81 |
-
return FasterWhisperCallback(self, language=language, task=task, prompt_strategy=prompt_strategy, **decodeOptions)
|
82 |
-
|
83 |
-
class FasterWhisperCallback(AbstractWhisperCallback):
|
84 |
-
def __init__(self, model_container: FasterWhisperContainer, language: str = None, task: str = None,
|
85 |
-
prompt_strategy: AbstractPromptStrategy = None,
|
86 |
-
**decodeOptions: dict):
|
87 |
-
self.model_container = model_container
|
88 |
-
self.language = language
|
89 |
-
self.task = task
|
90 |
-
self.prompt_strategy = prompt_strategy
|
91 |
-
self.decodeOptions = decodeOptions
|
92 |
-
|
93 |
-
self._printed_warning = False
|
94 |
-
|
95 |
-
def invoke(self, audio, segment_index: int, prompt: str, detected_language: str, progress_listener: ProgressListener = None):
|
96 |
-
"""
|
97 |
-
Peform the transcription of the given audio file or data.
|
98 |
-
|
99 |
-
Parameters
|
100 |
-
----------
|
101 |
-
audio: Union[str, np.ndarray, torch.Tensor]
|
102 |
-
The audio file to transcribe, or the audio data as a numpy array or torch tensor.
|
103 |
-
segment_index: int
|
104 |
-
The target language of the transcription. If not specified, the language will be inferred from the audio content.
|
105 |
-
task: str
|
106 |
-
The task - either translate or transcribe.
|
107 |
-
progress_listener: ProgressListener
|
108 |
-
A callback to receive progress updates.
|
109 |
-
"""
|
110 |
-
model: WhisperModel = self.model_container.get_model()
|
111 |
-
language_code = self._lookup_language_code(self.language) if self.language else None
|
112 |
-
|
113 |
-
# Copy decode options and remove options that are not supported by faster-whisper
|
114 |
-
decodeOptions = self.decodeOptions.copy()
|
115 |
-
verbose = decodeOptions.pop("verbose", None)
|
116 |
-
|
117 |
-
logprob_threshold = decodeOptions.pop("logprob_threshold", None)
|
118 |
-
|
119 |
-
patience = decodeOptions.pop("patience", None)
|
120 |
-
length_penalty = decodeOptions.pop("length_penalty", None)
|
121 |
-
suppress_tokens = decodeOptions.pop("suppress_tokens", None)
|
122 |
-
|
123 |
-
if (decodeOptions.pop("fp16", None) is not None):
|
124 |
-
if not self._printed_warning:
|
125 |
-
print("WARNING: fp16 option is ignored by faster-whisper - use compute_type instead.")
|
126 |
-
self._printed_warning = True
|
127 |
-
|
128 |
-
# Fix up decode options
|
129 |
-
if (logprob_threshold is not None):
|
130 |
-
decodeOptions["log_prob_threshold"] = logprob_threshold
|
131 |
-
|
132 |
-
decodeOptions["patience"] = float(patience) if patience is not None else 1.0
|
133 |
-
decodeOptions["length_penalty"] = float(length_penalty) if length_penalty is not None else 1.0
|
134 |
-
|
135 |
-
# See if supress_tokens is a string - if so, convert it to a list of ints
|
136 |
-
decodeOptions["suppress_tokens"] = self._split_suppress_tokens(suppress_tokens)
|
137 |
-
|
138 |
-
initial_prompt = self.prompt_strategy.get_segment_prompt(segment_index, prompt, detected_language) \
|
139 |
-
if self.prompt_strategy else prompt
|
140 |
-
|
141 |
-
segments_generator, info = model.transcribe(audio, \
|
142 |
-
language=language_code if language_code else detected_language, task=self.task, \
|
143 |
-
initial_prompt=initial_prompt, \
|
144 |
-
**decodeOptions
|
145 |
-
)
|
146 |
-
|
147 |
-
segments = []
|
148 |
-
|
149 |
-
for segment in segments_generator:
|
150 |
-
segments.append(segment)
|
151 |
-
|
152 |
-
if progress_listener is not None:
|
153 |
-
progress_listener.on_progress(segment.end, info.duration)
|
154 |
-
if verbose:
|
155 |
-
print("[{}->{}] {}".format(format_timestamp(segment.start, True), format_timestamp(segment.end, True),
|
156 |
-
segment.text))
|
157 |
-
|
158 |
-
text = " ".join([segment.text for segment in segments])
|
159 |
-
|
160 |
-
# Convert the segments to a format that is easier to serialize
|
161 |
-
whisper_segments = [{
|
162 |
-
"text": segment.text,
|
163 |
-
"start": segment.start,
|
164 |
-
"end": segment.end,
|
165 |
-
|
166 |
-
# Extra fields added by faster-whisper
|
167 |
-
"words": [{
|
168 |
-
"start": word.start,
|
169 |
-
"end": word.end,
|
170 |
-
"word": word.word,
|
171 |
-
"probability": word.probability
|
172 |
-
} for word in (segment.words if segment.words is not None else []) ]
|
173 |
-
} for segment in segments]
|
174 |
-
|
175 |
-
result = {
|
176 |
-
"segments": whisper_segments,
|
177 |
-
"text": text,
|
178 |
-
"language": info.language if info else None,
|
179 |
-
|
180 |
-
# Extra fields added by faster-whisper
|
181 |
-
"language_probability": info.language_probability if info else None,
|
182 |
-
"duration": info.duration if info else None
|
183 |
-
}
|
184 |
-
|
185 |
-
# If we have a prompt strategy, we need to increment the current prompt
|
186 |
-
if self.prompt_strategy:
|
187 |
-
self.prompt_strategy.on_segment_finished(segment_index, prompt, detected_language, result)
|
188 |
-
|
189 |
-
if progress_listener is not None:
|
190 |
-
progress_listener.on_finished()
|
191 |
-
return result
|
192 |
-
|
193 |
-
def _split_suppress_tokens(self, suppress_tokens: Union[str, List[int]]):
|
194 |
-
if (suppress_tokens is None):
|
195 |
-
return None
|
196 |
-
if (isinstance(suppress_tokens, list)):
|
197 |
-
return suppress_tokens
|
198 |
-
|
199 |
-
return [int(token) for token in suppress_tokens.split(",")]
|
200 |
-
|
201 |
-
def _lookup_language_code(self, language: str):
|
202 |
-
language = get_language_from_name(language)
|
203 |
-
|
204 |
-
if language is None:
|
205 |
-
raise ValueError("Invalid language: " + language)
|
206 |
-
|
207 |
-
return language.code
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Detomo/ai-comic-generation/src/app/ocr.tsx
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
"use client"
|
2 |
-
|
3 |
-
import { createWorker } from "tesseract.js"
|
|
|
|
|
|
|
|