Commit
·
dd9da8f
1
Parent(s):
67d41b6
Update parquet files (step 32 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/Provider/Providers/H2o.py +0 -94
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Clash Of The Titans 1080p Bluray X264 The Best Way to Watch the 2010 Remake.md +0 -103
- spaces/1gistliPinn/ChatGPT4/Examples/Ab Bulk Mailer 8 5 License Ndb Decommissioning !!LINK!!.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Autodesk AutoCAD 2010- Keygens Only (X-FORCE 32-64bits) [RH] Download Pc.md +0 -24
- spaces/1gistliPinn/ChatGPT4/Examples/Bibleworks9__FULL__ Crack.md +0 -13
- spaces/1gistliPinn/ChatGPT4/Examples/Buku Pendidikan Pancasila Kaelan Pdf.md +0 -28
- spaces/1gistliPinn/ChatGPT4/Examples/Estudiodebellezagirltechsoftware REPACK.md +0 -22
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Army Reserve Serve Part-Time Earn Full-Time Benefits goarmy.com.md +0 -126
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bad 2 Bad Apocalypse Mod APK 1.2.4 - The Best Role Playing Game of 2023.md +0 -171
- spaces/1phancelerku/anime-remove-background/AirReceiver for Windows 10 The Best Way to Stream Music Video and Photos.md +0 -144
- spaces/1phancelerku/anime-remove-background/Como baixar o Roblox APK Mod e aproveitar ao mximo o seu celular.md +0 -93
- spaces/1phancelerku/anime-remove-background/Fate Grand Order Mod Apk Unlimited Quartz 2022.md +0 -127
- spaces/AIFILMS/StyleGANEX/utils/data_utils.py +0 -25
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/emotion/model.py +0 -78
- spaces/AILab-CVC/SEED-LLaMA/models/pipeline_stable_unclip_img2img.py +0 -794
- spaces/ASJMO/freegpt/server/bp.py +0 -6
- spaces/AUBADA-ALARABI/poetry2023/README.md +0 -13
- spaces/Abubakari/Sepsis-fastapi-prediction-app/main.py +0 -85
- spaces/AchyuthGamer/AchyuthGamer-OpenGPT/app.py +0 -3
- spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/conversation/[id]/+page.server.ts +0 -13
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/grayscalepipeline-plugin.d.ts +0 -29
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/label/Factory.d.ts +0 -5
- spaces/AliUsama98/Aliusama_spellchecker/app.py +0 -3
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/score_sde_vp.md +0 -26
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/score_sde_ve/__init__.py +0 -0
- spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py +0 -5
- spaces/Andy1621/uniformer_image_detection/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py +0 -2
- spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/match_costs/match_cost.py +0 -184
- spaces/Andy1621/uniformer_image_detection/mmdet/models/necks/bfp.py +0 -104
- spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_512x1024_40k_cityscapes.py +0 -10
- spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/c_model.py +0 -194
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/version.py +0 -35
- spaces/Anonymous-sub/Rerender/ControlNet/cldm/cldm.py +0 -435
- spaces/Asahi402/anime-remove-background/app.py +0 -52
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/setuptools_build.py +0 -146
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/vcs/__init__.py +0 -15
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/.github/workflows/levenshtein.js +0 -44
- spaces/BaiyuS/Real-CUGAN-YZ/app.py +0 -62
- spaces/Banbri/zcvzcv/src/app/ocr.tsx +0 -3
- spaces/BasToTheMax/voicechange/app.py +0 -18
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/engine/train_loop.py +0 -273
- spaces/CVPR/LIVE/thrust/thrust/detail/type_deduction.h +0 -74
- spaces/CVPR/LIVE/thrust/thrust/version.h +0 -83
- spaces/CVPR/WALT/configs/_base_/datasets/people_real_coco.py +0 -49
- spaces/ChrisCaviar/ControlNet-v1-1/preprocessor.py +0 -77
- spaces/ChrisPreston/diff-svc_minato_aqua/infer_tools/slicer.py +0 -142
- spaces/CikeyQI/meme-api/meme_generator/__init__.py +0 -21
- spaces/CikeyQI/meme-api/meme_generator/memes/dinosaur/__init__.py +0 -22
- spaces/CofAI/chat.b4/g4f/Provider/Providers/Better.py +0 -56
- spaces/CofAI/chat.b4/g4f/Provider/Providers/You.py +0 -24
spaces/101-5/gpt4free/g4f/Provider/Providers/H2o.py
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
from requests import Session
|
2 |
-
from uuid import uuid4
|
3 |
-
from json import loads
|
4 |
-
import os
|
5 |
-
import json
|
6 |
-
import requests
|
7 |
-
from ...typing import sha256, Dict, get_type_hints
|
8 |
-
|
9 |
-
url = 'https://gpt-gm.h2o.ai'
|
10 |
-
model = ['falcon-40b', 'falcon-7b', 'llama-13b']
|
11 |
-
supports_stream = True
|
12 |
-
needs_auth = False
|
13 |
-
|
14 |
-
models = {
|
15 |
-
'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
|
16 |
-
'falcon-40b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
|
17 |
-
'llama-13b': 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b'
|
18 |
-
}
|
19 |
-
|
20 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
21 |
-
conversation = ''
|
22 |
-
for message in messages:
|
23 |
-
conversation += '%s: %s\n' % (message['role'], message['content'])
|
24 |
-
|
25 |
-
conversation += 'assistant: '
|
26 |
-
session = requests.Session()
|
27 |
-
|
28 |
-
response = session.get("https://gpt-gm.h2o.ai/")
|
29 |
-
headers = {
|
30 |
-
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
|
31 |
-
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
|
32 |
-
"Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
|
33 |
-
"Content-Type": "application/x-www-form-urlencoded",
|
34 |
-
"Upgrade-Insecure-Requests": "1",
|
35 |
-
"Sec-Fetch-Dest": "document",
|
36 |
-
"Sec-Fetch-Mode": "navigate",
|
37 |
-
"Sec-Fetch-Site": "same-origin",
|
38 |
-
"Sec-Fetch-User": "?1",
|
39 |
-
"Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"
|
40 |
-
}
|
41 |
-
data = {
|
42 |
-
"ethicsModalAccepted": "true",
|
43 |
-
"shareConversationsWithModelAuthors": "true",
|
44 |
-
"ethicsModalAcceptedAt": "",
|
45 |
-
"activeModel": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
|
46 |
-
"searchEnabled": "true"
|
47 |
-
}
|
48 |
-
response = session.post("https://gpt-gm.h2o.ai/settings", headers=headers, data=data)
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
headers = {
|
53 |
-
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
|
54 |
-
"Accept": "*/*",
|
55 |
-
"Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
|
56 |
-
"Content-Type": "application/json",
|
57 |
-
"Sec-Fetch-Dest": "empty",
|
58 |
-
"Sec-Fetch-Mode": "cors",
|
59 |
-
"Sec-Fetch-Site": "same-origin",
|
60 |
-
"Referer": "https://gpt-gm.h2o.ai/"
|
61 |
-
}
|
62 |
-
data = {
|
63 |
-
"model": models[model]
|
64 |
-
}
|
65 |
-
|
66 |
-
conversation_id = session.post("https://gpt-gm.h2o.ai/conversation", headers=headers, json=data)
|
67 |
-
data = {
|
68 |
-
"inputs": conversation,
|
69 |
-
"parameters": {
|
70 |
-
"temperature": kwargs.get('temperature', 0.4),
|
71 |
-
"truncate": kwargs.get('truncate', 2048),
|
72 |
-
"max_new_tokens": kwargs.get('max_new_tokens', 1024),
|
73 |
-
"do_sample": kwargs.get('do_sample', True),
|
74 |
-
"repetition_penalty": kwargs.get('repetition_penalty', 1.2),
|
75 |
-
"return_full_text": kwargs.get('return_full_text', False)
|
76 |
-
},
|
77 |
-
"stream": True,
|
78 |
-
"options": {
|
79 |
-
"id": kwargs.get('id', str(uuid4())),
|
80 |
-
"response_id": kwargs.get('response_id', str(uuid4())),
|
81 |
-
"is_retry": False,
|
82 |
-
"use_cache": False,
|
83 |
-
"web_search_id": ""
|
84 |
-
}
|
85 |
-
}
|
86 |
-
|
87 |
-
response = session.post(f"https://gpt-gm.h2o.ai/conversation/{conversation_id.json()['conversationId']}", headers=headers, json=data)
|
88 |
-
generated_text = response.text.replace("\n", "").split("data:")
|
89 |
-
generated_text = json.loads(generated_text[-1])
|
90 |
-
|
91 |
-
return generated_text["generated_text"]
|
92 |
-
|
93 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
94 |
-
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Clash Of The Titans 1080p Bluray X264 The Best Way to Watch the 2010 Remake.md
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Clash of the Titans 1080p Blu-ray x264: A Review</h1>
|
3 |
-
<p>If you are a fan of Greek mythology, action, adventure, and fantasy, you might have heard of <strong>Clash of the Titans</strong>, a 2010 movie that is a remake of a 1981 film of the same name. The movie follows Perseus, a demigod and son of Zeus, who embarks on a perilous journey to stop Hades, god of the underworld, from unleashing his wrath on Earth and Olympus. The movie stars Sam Worthington, Liam Neeson, Ralph Fiennes, Gemma Arterton, and Mads Mikkelsen, and is directed by Louis Leterrier. In this review, we will take a look at how this movie fares in 1080p Blu-ray x264 format, which is a high-quality video encoding that offers sharp images, vibrant colors, and clear sound.</p>
|
4 |
-
<h2>The Story</h2>
|
5 |
-
<p>The movie begins with a narration that explains how Zeus, Poseidon, and Hades defeated their father Cronus and his army of Titans, and divided the world among themselves. Zeus became king of Olympus and god of sky and thunder, Poseidon became god of sea and earthquakes, and Hades was tricked into ruling over the underworld. Zeus created humans to worship him and his fellow gods, but over time, humans became rebellious and defiant.</p>
|
6 |
-
<h2>Clash Of The Titans 1080p Bluray X264</h2><br /><p><b><b>Download File</b> ——— <a href="https://byltly.com/2uKyYf">https://byltly.com/2uKyYf</a></b></p><br /><br />
|
7 |
-
<p>One day, a group of soldiers from Argos destroy a statue of Zeus as a sign of their contempt for the gods. This angers Hades, who appears before them and unleashes his monstrous creatures called harpies. He also kills Perseus' adoptive family, who were fishing nearby. Perseus is rescued by soldiers and taken to Argos, where he learns that he is a demigod.</p>
|
8 |
-
<p>Hades then visits Olympus and convinces Zeus to let him punish humans for their insolence. He reveals his plan to unleash his most fearsome beast, the Kraken, on Argos unless they sacrifice their princess Andromeda. He also warns Zeus that Perseus is his son and a potential threat.</p>
|
9 |
-
<p>Perseus decides to join a group of warriors who volunteer to find a way to stop Hades and save Andromeda. He is accompanied by Io, a mysterious woman who has been watching over him since his birth. Along their journey, they encounter various dangers and wonders, such as giant scorpions, Medusa, Pegasus, Stygian witches, Calibos (a former king who was cursed by Zeus for killing his wife and son), and Djinn (desert sorcerers who have replaced their body parts with sand).</p>
|
10 |
-
<p>Clash Of The Titans full movie HD 1080p download<br />
|
11 |
-
Watch Clash Of The Titans online free bluray quality<br />
|
12 |
-
Clash Of The Titans 2010 1080p bluray torrent<br />
|
13 |
-
How to stream Clash Of The Titans in HD 1080p<br />
|
14 |
-
Clash Of The Titans bluray x264 subtitles<br />
|
15 |
-
Clash Of The Titans 1080p bluray rip<br />
|
16 |
-
Clash Of The Titans movie review HD 1080p<br />
|
17 |
-
Clash Of The Titans bluray x264 release date<br />
|
18 |
-
Clash Of The Titans 1080p bluray dual audio<br />
|
19 |
-
Clash Of The Titans HD 1080p trailer<br />
|
20 |
-
Clash Of The Titans bluray x264 extras<br />
|
21 |
-
Clash Of The Titans 1080p bluray yify<br />
|
22 |
-
Clash Of The Titans HD 1080p wallpapers<br />
|
23 |
-
Clash Of The Titans bluray x264 cast<br />
|
24 |
-
Clash Of The Titans 1080p bluray hindi dubbed<br />
|
25 |
-
Clash Of The Titans HD 1080p behind the scenes<br />
|
26 |
-
Clash Of The Titans bluray x264 director's cut<br />
|
27 |
-
Clash Of The Titans 1080p bluray box office<br />
|
28 |
-
Clash Of The Titans HD 1080p soundtrack<br />
|
29 |
-
Clash Of The Titans bluray x264 trivia<br />
|
30 |
-
Clash Of The Titans 1080p bluray comparison<br />
|
31 |
-
Clash Of The Titans HD 1080p quotes<br />
|
32 |
-
Clash Of The Titans bluray x264 awards<br />
|
33 |
-
Clash Of The Titans 1080p bluray special edition<br />
|
34 |
-
Clash Of The Titans HD 1080p remake<br />
|
35 |
-
Clash Of The Titans bluray x264 deleted scenes<br />
|
36 |
-
Clash Of The Titans 1080p bluray netflix<br />
|
37 |
-
Clash Of The Titans HD 1080p poster<br />
|
38 |
-
Clash Of The Titans bluray x264 rating<br />
|
39 |
-
Clash Of The Titans 1080p bluray amazon prime<br />
|
40 |
-
Clash Of The Titans HD 1080p sequel<br />
|
41 |
-
Clash Of The Titans bluray x264 budget<br />
|
42 |
-
Clash Of The Titans 1080p bluray imdb<br />
|
43 |
-
Clash Of The Titans HD 1080p easter eggs<br />
|
44 |
-
Clash Of The Titans bluray x264 runtime<br />
|
45 |
-
Clash Of The Titans 1080p bluray rotten tomatoes<br />
|
46 |
-
Clash Of The Titans HD 1080p plot summary<br />
|
47 |
-
Clash Of The Titans bluray x264 genre<br />
|
48 |
-
Clash Of The Titans 1080p bluray hulu<br />
|
49 |
-
Clash Of The Titans HD 1080p fan art<br />
|
50 |
-
Clash Of The Titans bluray x264 production company<br />
|
51 |
-
Clash Of The Titans 1080p bluray disney plus<br />
|
52 |
-
Clash Of The Titans HD 1080p cosplay<br />
|
53 |
-
Clash Of The Titans bluray x264 based on true story<br />
|
54 |
-
Clash Of The Titans 1080p bluray youtube video downloader</p>
|
55 |
-
<p>Perseus also learns more about his origins and his destiny. He discovers that he was conceived when Zeus disguised himself as Danae, Perseus' mother's husband. He also learns that he has a special weapon called Zeus' thunderbolt that can kill any god or monster. He also realizes that he has a choice between being a god or being a man.</p>
|
56 |
-
<p>The movie ends with an epic battle between Perseus and Hades at Argos. Perseus manages to defeat Hades with Zeus' thunderbolt and use Medusa's head to turn the Kraken into stone. He saves Andromeda from being sacrificed and declares his love for Io. He then returns Zeus' thunderbolt to Olympus and rejects his offer to join him as a god. He chooses to live as a man with Io by his side.</p>
|
57 |
-
<h2>The Visuals</h2>
|
58 |
-
<p>One of the main attractions of this movie is its stunning visuals that bring Greek mythology to life. The movie quality in 1080p Blu-ray x264 format is superb, as it offers crisp details, vivid colors, and smooth motion. The movie also features impressive special effects that create realistic and spectacular scenes.</p>
|
59 |
-
<p>Some of the visual highlights include:</p>
|
60 |
-
<ul>
|
61 |
-
<li>The opening scene where Zeus creates humans from clay.</li>
|
62 |
-
<li>The scene where Hades emerges from a cloud of black smoke and unleashes harpies on Argos.</li>
|
63 |
-
<li>The scene where Perseus fights giant scorpions in the desert.</li>
|
64 |
-
<li>The scene where Perseus enters Medusa's lair and faces her deadly gaze.</li>
|
65 |
-
<li>The scene where Perseus flies on Pegasus over Argos.</li>
|
66 |
-
<li>The scene where Perseus confronts Hades in front of the Kraken.</li>
|
67 |
-
</ul>
|
68 |
-
<p>The movie also has excellent cinematography that captures different angles and perspectives of the action. The editing is smooth and coherent, as it transitions between different scenes without losing focus or continuity.</p>
|
69 |
-
<h2>The Sound</h2>
|
70 |
-
<h2>The Sound</h2>
|
71 |
-
<p>The sound quality in 1080p Blu-ray x264 format is also outstanding, as it delivers clear dialogue, powerful sound effects, and immersive surround sound. The movie also features a captivating soundtrack that enhances the mood and the atmosphere of the movie.</p>
|
72 |
-
<p>Some of the sound highlights include:</p>
|
73 |
-
<ul>
|
74 |
-
<li>The song "The Storm That Brought Me To You" by Tina Dico, Neil Davidge, and Ramin Djawadi, which plays during the opening credits and sets the tone for the movie.</li>
|
75 |
-
<li>The sound of thunder and lightning that accompanies Zeus' presence and actions.</li>
|
76 |
-
<li>The sound of Hades' voice and his black smoke that creates a sense of dread and menace.</li>
|
77 |
-
<li>The sound of the Kraken's roar and its massive body that creates a sense of awe and terror.</li>
|
78 |
-
<li>The sound of Perseus' sword and shield clashing with his enemies' weapons and armor.</li>
|
79 |
-
<li>The sound of Medusa's hissing and her stone gaze that creates a sense of suspense and horror.</li>
|
80 |
-
</ul>
|
81 |
-
<p>The movie also has excellent voice acting that brings out the personality and the emotion of the characters. The dialogue is well-written and delivered, as it conveys the story and the themes of the movie.</p>
|
82 |
-
<h2>The Verdict</h2>
|
83 |
-
<p>Clash of the Titans is a movie that offers a thrilling and entertaining experience for fans of Greek mythology, action, adventure, and fantasy. The movie has a strong story that follows Perseus' journey from a fisherman to a hero. The movie has amazing visuals that showcase the beauty and the danger of the ancient world. The movie has superb sound that enhances the mood and the impact of the movie. The movie has a talented cast that brings out the best of their characters.</p>
|
84 |
-
<p>However, the movie also has some weaknesses that may affect its appeal for some viewers. The movie has some historical and mythological inaccuracies that may bother purists and scholars. The movie has some cheesy and clichéd moments that may reduce its credibility and originality. The movie has some weak character development and motivation that may reduce its depth and complexity.</p>
|
85 |
-
<p>Compared to other movies in the same genre or franchise, Clash of the Titans is a decent but not outstanding movie. It is better than its sequel Wrath of the Titans (2012), which was poorly received by critics and audiences. It is worse than its original Clash of the Titans (1981), which was a cult classic and a pioneer in stop-motion animation. It is similar to other movies based on Greek mythology, such as Immortals (2011), Hercules (2014), or 300 (2006), which have their own strengths and weaknesses.</p>
|
86 |
-
<p>Clash of the Titans is a movie that would appeal to viewers who enjoy action-packed, visually stunning, and mythologically inspired movies. It would not appeal to viewers who prefer realistic, accurate, or sophisticated movies.</p>
|
87 |
-
<h2>Conclusion</h2>
|
88 |
-
<p>In conclusion, Clash of the Titans is a movie that delivers a fun and exciting adventure in 1080p Blu-ray x264 format. The movie has a solid story, stunning visuals, superb sound, and a talented cast. The movie also has some flaws, such as historical and mythological inaccuracies, cheesy and clichéd moments, and weak character development. The movie is better than its sequel but worse than its original. The movie is similar to other movies based on Greek mythology. The movie would appeal to fans of Greek mythology, action, adventure, and fantasy.</p>
|
89 |
-
<p>I would give this movie a rating of 3.5 out of 5 stars. I would recommend this movie to anyone who likes epic movies with great effects and soundtracks.</p>
|
90 |
-
<h2>FAQs</h2>
|
91 |
-
<ol>
|
92 |
-
<li>Q: Who is Perseus in Greek mythology?<br>A: Perseus is one of the most famous heroes in Greek mythology. He is the son of Zeus and Danae, a mortal princess. He is best known for slaying Medusa, a gorgon who could turn anyone who looked at her into stone. He also rescued Andromeda from a sea monster sent by Poseidon.</li>
|
93 |
-
<li>Q: What is the Kraken in Greek mythology?<br>A: The Kraken is not actually part of Greek mythology but rather Norse mythology. It is a giant sea creature that resembles a squid or an octopus. It was said to attack ships and drag them down to the depths of the ocean. In Clash of the Titans, it is depicted as Hades' ultimate weapon against humans.</li>
|
94 |
-
<li>Q: What is x264?<br>A: x264 is a video encoding format that compresses video data into smaller files without losing much quality. It is widely used for high-definition video streaming and downloading. It can produce videos with resolutions up to 4K (4096x2160 pixels).</li>
|
95 |
-
<li>Q: What is Blu-ray?<br>A: Blu-ray is a digital optical disc format that can store large amounts of data, such as high-definition video and audio. It can hold up to 25 GB on a single-layer disc or 50 GB on a dual-layer disc. It can play videos with resolutions up to 1080p (1920x1080 pixels).</li>
|
96 |
-
<li>God of War (2018), which is a video game that follows the adventures of Kratos, a former Spartan warrior who becomes the god of war and travels to different mythological realms.</li>
|
97 |
-
<li>Wonder Woman (2017), which tells the story of Diana, an Amazon princess and daughter of Zeus, who joins forces with a World War I spy to stop a war god.</li>
|
98 |
-
</ul>
|
99 |
-
</li>
|
100 |
-
</ol>
|
101 |
-
</p> 0a6ba089eb<br />
|
102 |
-
<br />
|
103 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Ab Bulk Mailer 8 5 License Ndb Decommissioning !!LINK!!.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Ab Bulk Mailer 8 5 License Ndb Decommissioning</h2><br /><p><b><b>Download File</b> ⚹⚹⚹ <a href="https://imgfil.com/2uy04Q">https://imgfil.com/2uy04Q</a></b></p><br /><br />
|
2 |
-
|
3 |
-
com (freeshell) DOWNLOAD: (freeshell) $ a &&./8.5 $ cd www &&./8.5 /usr/bin/env mkdir -p www /usr/bin/env mkdir -p www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown -R www:www www /usr/bin/env chown 4fefd39f24<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Autodesk AutoCAD 2010- Keygens Only (X-FORCE 32-64bits) [RH] Download Pc.md
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
<h2>Autodesk AutoCAD 2010- Keygens only (X-FORCE 32-64bits) [RH] download pc</h2><br /><p><b><b>DOWNLOAD</b> ✵ <a href="https://imgfil.com/2uy1Ue">https://imgfil.com/2uy1Ue</a></b></p><br /><br />
|
2 |
-
|
3 |
-
0 date september. their the bork is the. some of your hives. when i run the bit of code that they told me to enter. CAD Designer 2002 or later. 2012 Activation key. 29 May 2012 your key will activate the downloaded xforce 2012 but you have to download the activation key from here. Windows 8 64-bit 10/01/2012. I already have license and activation. CAD-AutoCAD-2007.. the application. 2012. my key doesnt work. But when i use the key from the vcd file its work well.
|
4 |
-
|
5 |
-
Keygen or Activation for the Balsamiq
|
6 |
-
|
7 |
-
The only way I know to activate it is if you use your keygen: file which is under your autocad 2010 folder. A user wants to remotely install the program from a remote PC using the Software Activation Wizard. You click, the user types his password, and the installation is complete. The problem? There is a hidden file in the executable file that indicates the license and activation information, and that file can only be opened by software that is a copy of the.... 2008.. 3) Unfortunately it is not possible to transfer the. keygen which is to activate the current license that is currently. - Activate. Create. I dont know which license did you get.
|
8 |
-
|
9 |
-
CAD InDesign Protractor.. Create, Extract, Check.. as digital CAD. license key.. is any.. is suitable for.. some CAD software.. The license can be activated.. xforce keygen or activate your.. InDesign Protractor needs a Microsoft.. license key.. online.. to get a different license key to. 2012 and it.. xforce.
|
10 |
-
|
11 |
-
Autodesk.. Microsoft Software.. License. The License.. to the original license.. expires on.. The Microsoft.. Activation key.. the program to get a new.
|
12 |
-
|
13 |
-
Autodesk to Excel - What is "Activation key"?
|
14 |
-
|
15 |
-
Xforce Activation Key For CAD
|
16 |
-
|
17 |
-
Xforce Keygen. Activation Key Generator. Installed the downloaded Autodesk software and I. XFORCE is a powerful program for designing and.
|
18 |
-
|
19 |
-
CAD Keygens-Autodesk-Xforce-2011-64bit
|
20 |
-
|
21 |
-
It is important to be careful with this kind of keygens. They generally work, but just like any other keygens, 4fefd39f24<br />
|
22 |
-
<br />
|
23 |
-
<br />
|
24 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Bibleworks9__FULL__ Crack.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
<h2>bibleworks9crack</h2><br /><p><b><b>Download Zip</b> ===> <a href="https://imgfil.com/2uxYiG">https://imgfil.com/2uxYiG</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
A brief demo of BibleWorks 9 running VirtualBox on a Mac (MacBook Pro, 2.4 GHz, Intel Core i5). I was able to create a .bgp file using bibleworks.
|
4 |
-
The file is created successfully and everything on the device works fine.
|
5 |
-
I'm trying to include a .bgp file on a GNOME (Linux mint 18) system.
|
6 |
-
I tried a few things that didn't work.
|
7 |
-
I also tried using bibleworks to change the .bgp file.
|
8 |
-
As a result, I was unable to create the bibleworks.
|
9 |
-
I tried everything I found on the internet.
|
10 |
-
So I decided to start a new project on a new Linux distribution. 8a78ff9644<br />
|
11 |
-
<br />
|
12 |
-
<br />
|
13 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Buku Pendidikan Pancasila Kaelan Pdf.md
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
<h2>Buku Pendidikan Pancasila Kaelan Pdf</h2><br /><p><b><b>DOWNLOAD</b> ››› <a href="https://imgfil.com/2uy1HR">https://imgfil.com/2uy1HR</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
modal kebisaan yang kuat dan tumbuh individu secara bijaksana dalam satu waktu dengan cara yang sederhana tanpa hadirnya intelektualisasi.
|
4 |
-
|
5 |
-
On the first time of essay thesis custom writing service writing, I had been worried about a little theses for me personally. Some problem in the course of your entire life and then you possibly will definitely survive without help from the skilled academic freelance writers.
|
6 |
-
|
7 |
-
Somebody that you can hire for each one of your work. Employing a thesis writing specialist to accomplish your thesis may be a fantastic concept since it will permit you to enjoy the time.
|
8 |
-
|
9 |
-
If you may not remember how to invest in a dissertation publishing service or just how to finish a thesis, our authorities are here to assist you.
|
10 |
-
|
11 |
-
Now is the day when you will be able to be relieved with your initial do the job. Just watch your purchase of class projects on our site, you'll notice how uncomplicated and quick it's to do this activity.
|
12 |
-
|
13 |
-
Working with an expert will be useful in every single place. You are able to trust our service simply because we also present a variety of other writing professional services, including research papers and essays. You can count on us to employ an essay author.
|
14 |
-
|
15 |
-
Our experts can be found on-line 24 hours a day, seven days a week. This makes them excellent for customers who are on the whole practical and would not want to invest their company time inside their product.
|
16 |
-
|
17 |
-
While you can find pretty much any course projects with our service, this area of the site is all about thesis creating.
|
18 |
-
|
19 |
-
In case you have to discover an excellent thesis writing service, then you can consider our company as a first-rate choice. We can easily supply thesis composing solutions on any essay topic and topic and guarantee a worthwhile function from the whole process. You may rest assured that our thesis publishing professional services are of high quality and won't demand much time.
|
20 |
-
|
21 |
-
Our company has created a terrific portfolio of clientele. Our purchasers have a wide selection of improvements to review about our working experience.
|
22 |
-
|
23 |
-
Our company offers you totally free topic suggestions, models, timeline and everything which you'll need to perform all over a specialized making. Our experts are accessible to assist you at any stage of the process and guarantee great results.
|
24 |
-
|
25 |
-
Thesis composing support is the wonderful, cost-effective strategy to handle your training course 4fefd39f24<br />
|
26 |
-
<br />
|
27 |
-
<br />
|
28 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Estudiodebellezagirltechsoftware REPACK.md
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
<h2>Estudiodebellezagirltechsoftware</h2><br /><p><b><b>Download File</b> –––––>>> <a href="https://imgfil.com/2uxXHo">https://imgfil.com/2uxXHo</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Photo by Bobby Doherty.
|
4 |
-
|
5 |
-
Finding the perfect photo isn't always as easy as pointing the camera and firing away. In fact, learning how to shoot well takes practice. Fortunately, there's no shortage of great books out there that can teach you the art and craft of photography, especially for beginning photographers. These books will help you unlock your camera's full potential.
|
6 |
-
|
7 |
-
Getting started
|
8 |
-
|
9 |
-
Many photographers start out with the cameras they already have in their hands. Often, the camera they already own is the camera they had at age five. For beginning photographers, this can be a great choice. The learning curve for older digital cameras is very gentle, making it easy to get comfortable and feel like you're learning to shoot with a grandparent's camera. Some cameras, however, are not beginner-friendly and require some knowledge of how to operate a camera before you begin shooting.
|
10 |
-
|
11 |
-
A decent entry-level DSLR from one of the manufacturers that offer a good-value basic package can help get you started on the right foot and save you some money. In fact, some of the very best digital cameras for beginners can be found among cameras that are no more expensive than an entry-level DSLR (or more affordable than the aforementioned basic package).
|
12 |
-
|
13 |
-
The lens
|
14 |
-
|
15 |
-
The camera alone does not a great photographer make. It's how the camera is used — with the right lens — that makes a difference in the quality of the pictures you take. It's almost impossible to overlook the impact the lens has on your pictures; without a great lens, you can't take pictures that have any depth or true impact.
|
16 |
-
|
17 |
-
Although an entry-level DSLR will include the basic camera features, it will not include the wide variety of quality lenses available for advanced photography. An entry-level DSLR camera and lens package will cost significantly more than the individual components, but it's a good choice if you want to get your feet wet as a photographer and aren't in a hurry to upgrade.
|
18 |
-
|
19 |
-
A basic lens is one that is capable of shooting pictures in the basic photo formats: 4×5, 5×7, 8×10, 4×6 and 8×12. These are the most common formats for amateur and professional photographers alike. A basic lens will cost you between $75 and $150, and will cover you for about 90 to 100 pictures. This is a modest lens, but will help you to understand the basics of framing, composition and light. It will also help you to identify the proper 4fefd39f24<br />
|
20 |
-
<br />
|
21 |
-
<br />
|
22 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Army Reserve Serve Part-Time Earn Full-Time Benefits goarmy.com.md
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>What is the Army and Why is it Important?</h1>
|
3 |
-
<p>The army is a branch of the military that fights primarily on land. It is one of the oldest and most essential forms of organized armed forces in human history. The army provides security and defense for a nation or country, as well as supporting other national interests and objectives.</p>
|
4 |
-
<h2>army</h2><br /><p><b><b>Download Zip</b> ✯ <a href="https://urlin.us/2uSTIb">https://urlin.us/2uSTIb</a></b></p><br /><br />
|
5 |
-
<h2>Definition and history of the army</h2>
|
6 |
-
<p>According to the OxfordDictionaries, an army is "an organized military force equipped for fighting on land" or "the part of a country's military force trained to fight on land". The word "army" comes from the Latin word "arma", meaning "arms" or "weapons".</p>
|
7 |
-
<p>The first recorded armies date back to ancient times, when kings and rulers mobilized their subjects or hired mercenaries to wage wars against their enemies. Some of the earliest examples of armies are the Egyptian, Assyrian, Babylonian, Persian, Greek, Roman, Chinese, Indian, and Mongol armies. These armies developed various tactics, formations, weapons, and strategies to achieve victory on the battlefield.</p>
|
8 |
-
<p>Over time, armies evolved to adapt to changing technologies, environments, and challenges. Some of the major milestones in army history include the development of gunpowder, firearms, artillery, cavalry, infantry, tanks, aircraft, helicopters, missiles, nuclear weapons, drones, cyberwarfare, and special forces. These innovations have increased the range, speed, accuracy, mobility, firepower, stealth, and sophistication of army operations.</p>
|
9 |
-
<h2>Roles and missions of the army</h2>
|
10 |
-
<p>The army has a variety of roles and missions that depend on the needs and goals of a nation or country. Some of the common roles and missions of the army are:</p>
|
11 |
-
<ul>
|
12 |
-
<li>Deterrence: The army maintains a credible and capable force that can discourage potential adversaries from attacking or threatening a nation or its allies.</li>
|
13 |
-
<li>Defense: The army protects a nation's territory, sovereignty, and interests from external aggression or invasion.</li>
|
14 |
-
<li>Offense: The army conducts offensive operations against enemy forces or targets to achieve strategic or operational objectives.</li>
|
15 |
-
<li>Stability: The army supports peacekeeping, humanitarian assistance, disaster relief, counterinsurgency, counterterrorism, and nation-building efforts to promote stability and security in regions of conflict or crisis.</li>
|
16 |
-
<li>Cooperation: The army works with other branches of the military, other nations' militaries, international organizations, and non-governmental organizations to enhance interoperability, coordination, and collaboration.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>Benefits and challenges of serving in the army</h2>
|
19 |
-
<p>Serving in the army can be rewarding and challenging at the same time. Some of the benefits of serving in the army are:</p>
|
20 |
-
<p>army surplus store near me<br />
|
21 |
-
army of the dead release date<br />
|
22 |
-
army national guard pay chart<br />
|
23 |
-
army wives cast where are they now<br />
|
24 |
-
army basic training packing list<br />
|
25 |
-
army of darkness full movie<br />
|
26 |
-
army green nail polish<br />
|
27 |
-
army corps of engineers campgrounds<br />
|
28 |
-
army men rts download<br />
|
29 |
-
army of two the devil's cartel<br />
|
30 |
-
army navy game 2022 tickets<br />
|
31 |
-
army reserve retirement calculator<br />
|
32 |
-
army ant bite treatment<br />
|
33 |
-
army birthday party ideas<br />
|
34 |
-
army combat fitness test scorecard<br />
|
35 |
-
army dress blues uniform guide<br />
|
36 |
-
army email login from home<br />
|
37 |
-
army field manual 3-0<br />
|
38 |
-
army general order 1<br />
|
39 |
-
army height and weight standards<br />
|
40 |
-
army intelligence mos list<br />
|
41 |
-
army jag corps application<br />
|
42 |
-
army knowledge online help desk<br />
|
43 |
-
army leadership principles acronym<br />
|
44 |
-
army medical waiver process<br />
|
45 |
-
army navy surplus store online<br />
|
46 |
-
army officer candidate school requirements<br />
|
47 |
-
army physical fitness test standards<br />
|
48 |
-
army quartermaster branch insignia<br />
|
49 |
-
army ranger school packing list<br />
|
50 |
-
army regulation for awards and decorations<br />
|
51 |
-
army sharp training answers<br />
|
52 |
-
army tattoo policy 2022<br />
|
53 |
-
army unit patches and their meanings<br />
|
54 |
-
army values acronym loyalty duty respect selfless service honor integrity personal courage<br />
|
55 |
-
army warrant officer flight training program<br />
|
56 |
-
army yellow ribbon program guidelines<br />
|
57 |
-
how to join the french foreign legion as an american<br />
|
58 |
-
how to make an origami ninja star with one paper<br />
|
59 |
-
how to play risk board game with 2 players<br />
|
60 |
-
how to write a letter of recommendation for west point<br />
|
61 |
-
is the salvation army a good charity to donate to?<br />
|
62 |
-
what are the benefits of joining the coast guard reserve?<br />
|
63 |
-
what are the qualifications for the purple heart medal?<br />
|
64 |
-
what does a green beret do in the army?<br />
|
65 |
-
what does it mean to be a conscientious objector in the military?<br />
|
66 |
-
what is the difference between active duty and reserve in the military?<br />
|
67 |
-
what is the highest rank in the us army?</p>
|
68 |
-
<ul>
|
69 |
-
<li>Patriotism: Serving in the army can instill a sense of pride and loyalty to one's nation and its values.</li>
|
70 |
-
<li>Service: Serving in the army can provide an opportunity to contribute to a greater cause and make a positive difference in the world.</li>
|
71 |
-
<li>Career: Serving in the army can offer a variety of career options and opportunities for personal and professional growth.</li>
|
72 |
-
<li>Education: Serving in the army can provide access to training and education benefits that can enhance one's skills and knowledge.</li>
|
73 |
-
<li>Compensation: Serving in the army can provide competitive pay and benefits that can support one's financial well-being.</li>
|
74 |
-
</ul>
|
75 |
-
<p>Some of the challenges of serving in the army are:</p>
|
76 |
-
<ul>
|
77 |
-
<li>Risk: Serving in the army can expose one to physical danger, injury, illness, death, or psychological trauma.</li>
|
78 |
-
<h1>How to Join the Army and What to Expect</h1>
|
79 |
-
<p>If you are interested in joining the army, you need to meet certain eligibility and requirements, undergo training and education, and choose a career path that suits your skills and interests.</p>
|
80 |
-
<h2>Eligibility and requirements for enlistment or commissioning</h2>
|
81 |
-
<p>To join the army, you must be a U.S. citizen or a permanent resident, be at least 17 years old (with parental consent) and not older than 34 years old, have a high school diploma or equivalent, pass a physical and medical exam, and pass a background check. You must also take the Armed Services Vocational Aptitude Battery (ASVAB) test to determine your aptitude for various military occupations.</p>
|
82 |
-
<p>You can join the army as an enlisted soldier or an officer. Enlisted soldiers are the backbone of the army, performing various duties and tasks in different specialties. Officers are the leaders of the army, planning and directing operations and overseeing enlisted soldiers. To become an officer, you need to have a bachelor's degree or higher, complete an officer training program, and earn a commission.</p>
|
83 |
-
<h2>Training and education opportunities in the army</h2>
|
84 |
-
<p>Once you join the army, you will undergo basic training, also known as boot camp, where you will learn the basic skills and values of being a soldier. Basic training lasts about 10 weeks and consists of physical fitness, weapons training, drill and ceremony, first aid, land navigation, and survival skills.</p>
|
85 |
-
<p>After basic training, you will attend advanced individual training (AIT), where you will learn the specific skills and knowledge of your chosen military occupation. AIT can last from a few weeks to a few months, depending on your specialty. Some examples of army specialties are infantry, artillery, engineer, signal, intelligence, medical, aviation, logistics, and cyber.</p>
|
86 |
-
<p>The army also offers various education opportunities for soldiers who want to further their academic or professional development. Some of these opportunities are tuition assistance, scholarships, grants, loans, college credits, certifications, apprenticeships, and online courses. The army also has its own educational institutions, such as the U.S. Military Academy at West Point, the U.S. Army War College, and the U.S. Army Command and General Staff College.</p>
|
87 |
-
<h2>Career paths and advancement in the army</h2>
|
88 |
-
<p>The army has a diverse and dynamic career system that allows soldiers to explore different options and opportunities throughout their service. Soldiers can change their specialties, apply for special programs or assignments, or pursue leadership positions as they progress in their careers.</p>
|
89 |
-
<p>The army uses a rank structure to indicate the level of responsibility and authority of each soldier. There are two types of ranks: enlisted ranks and officer ranks. Enlisted ranks range from private (E-1) to sergeant major of the army (E-9). Officer ranks range from second lieutenant (O-1) to general (O-10). Soldiers can advance in rank by meeting certain criteria, such as time in service, time in grade, performance evaluation, education, and promotion board.</p>
|
90 |
-
<h1>Conclusion</h1>
|
91 |
-
<p>The army is a vital part of the military that performs various roles and missions to protect and serve the nation. Joining the army can be a rewarding and challenging experience that requires meeting certain eligibility and requirements, undergoing training and education, and choosing a career path that suits your skills and interests.</p>
|
92 |
-
<h2>FAQs</h2>
|
93 |
-
<ul>
|
94 |
-
<li><b>What is the difference between the army and the other branches of the military?</b><br>The army is one of the five branches of the U.S. military, along with the navy, air force, marine corps, and coast guard. Each branch has its own unique mission, culture, and organization. The army focuses on land warfare, the navy on sea warfare, the air force on air warfare, the marine corps on amphibious warfare, and the coast guard on maritime law enforcement and rescue operations.</li>
|
95 |
-
<li><b>How long do I have to serve in the army?</b><br>The length of your service in the army depends on your contract and your status. Typically, you can choose to serve for two, three, four, or six years as an active duty soldier or for six years as a reserve soldier. Active duty soldiers serve full-time and can be deployed anywhere in the world. Reserve soldiers serve part-time and train one weekend per month and two weeks per year. They can also be activated to support active duty missions when needed.</li>
|
96 |
-
<li><b>What are some of the benefits of being a soldier in the army?</b><br>Some of the benefits of being a soldier in the army are: <ul>
|
97 |
-
<li>Health care: Soldiers and their families receive free or low-cost medical and dental care.</li>
|
98 |
-
<li>Housing: Soldiers can live on base or receive a housing allowance to live off base.</li>
|
99 |
-
<li>Food: Soldiers can eat for free or receive a food allowance to buy their own food.</li>
|
100 |
-
<li>Clothing: Soldiers receive a uniform allowance to purchase and maintain their uniforms.</li>
|
101 |
-
<li>Travel: Soldiers can travel to different places for training, assignments, or vacations.</li>
|
102 |
-
<li>Retirement: Soldiers can retire after 20 years of service and receive a monthly pension and other benefits.</li>
|
103 |
-
<li>Veterans benefits: Soldiers can access various benefits and services after they leave the army, such as education, employment, disability, home loans, and counseling.</li>
|
104 |
-
</ul>
|
105 |
-
</li>
|
106 |
-
<li><b>What are some of the challenges of being a soldier in the army?</b><br>Some of the challenges of being a soldier in the army are: <ul>
|
107 |
-
<li>Discipline: Soldiers must follow strict rules and regulations, such as wearing uniforms, saluting officers, and obeying orders.</li>
|
108 |
-
<li>Stress: Soldiers must cope with high-pressure situations, such as combat, training, deadlines, and inspections.</li>
|
109 |
-
<li>Lifestyle: Soldiers must adapt to frequent changes in their schedule, location, environment, and culture.</li>
|
110 |
-
<li>Family: Soldiers must balance their personal and professional lives, especially when they are away from their loved ones for long periods of time.</li>
|
111 |
-
<li>Morale: Soldiers must maintain a positive attitude and outlook, despite facing hardships, challenges, and sacrifices.</li>
|
112 |
-
</ul>
|
113 |
-
</li>
|
114 |
-
<li><b>What are some of the skills and qualities that a soldier needs to succeed in the army?</b><br>Some of the skills and qualities that a soldier needs to succeed in the army are: <ul>
|
115 |
-
<li>Physical fitness: Soldiers need to be fit and healthy to perform their duties and tasks effectively.</li>
|
116 |
-
<li>Mental toughness: Soldiers need to be resilient and determined to overcome obstacles and difficulties.</li>
|
117 |
-
<li>Teamwork: Soldiers need to work well with others and cooperate with their peers, leaders, and partners.</li>
|
118 |
-
<li>Leadership: Soldiers need to lead by example and inspire others to follow them.</li>
|
119 |
-
<li>Communication: Soldiers need to communicate clearly and effectively with different audiences and mediums.</li>
|
120 |
-
<li>Critical thinking: Soldiers need to think logically and creatively to solve problems and make decisions.</li>
|
121 |
-
<li>Lifelong learning: Soldiers need to keep learning new skills and knowledge to stay relevant and competitive in their careers.</li>
|
122 |
-
</ul>
|
123 |
-
</li>
|
124 |
-
<p>I hope you enjoyed reading this article about the army. If you have any questions or feedback, please let me know. </p> 197e85843d<br />
|
125 |
-
<br />
|
126 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bad 2 Bad Apocalypse Mod APK 1.2.4 - The Best Role Playing Game of 2023.md
DELETED
@@ -1,171 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bad 2 Bad: Apocalypse Mod APK 1.2.4 - A Fun and Action-Packed Role-Playing Game</h1>
|
3 |
-
<p>If you are looking for a fun and action-packed role-playing game that will keep you entertained for hours, then you should try <strong>Bad 2 Bad: Apocalypse Mod APK 1.2.4</strong>. This is a modified version of the original game that gives you unlimited money, unlocked characters, and more. In this article, we will tell you everything you need to know about this game, including its features, its benefits, its drawbacks, and how to download and install it on your device.</p>
|
4 |
-
<h2>bad 2 bad apocalypse mod apk 1.2.4</h2><br /><p><b><b>Download File</b> ✸ <a href="https://urlin.us/2uT3hz">https://urlin.us/2uT3hz</a></b></p><br /><br />
|
5 |
-
<h2>What is Bad 2 Bad: Apocalypse?</h2>
|
6 |
-
<p><strong>Bad 2 Bad: Apocalypse</strong> is a role-playing game developed by <em>Dawinstone</em>, a South Korean game studio. It is the sequel to <em>Bad 2 Bad: Delta</em>, which was released in 2018. The game follows the adventures of a team of animal soldiers who fight against the evil forces of <em>Tarantula</em>, a terrorist organization that wants to destroy the world.</p>
|
7 |
-
<h3>The story and the gameplay of Bad 2 Bad: Apocalypse</h3>
|
8 |
-
<p>The game has a rich and immersive story that will keep you hooked from the start. You will play as <em>Chief</em>, the leader of the <em>Delta Team</em>, who has to recruit new members and lead them into various missions across different regions. You will encounter different enemies, such as zombies, mutants, robots, and human soldiers, as well as boss battles that will test your skills and strategy.</p>
|
9 |
-
<p>The gameplay of <strong>Bad 2 Bad: Apocalypse</strong> is simple and intuitive. You will control your character with a virtual joystick on the left side of the screen, and use buttons on the right side to shoot, reload, switch weapons, and use special skills. You can also customize your character's appearance, equipment, and skills with the money you earn from completing missions. The game has over 100 missions to complete, as well as an endless mode where you can challenge yourself and see how long you can survive.</p>
|
10 |
-
<h3>The features and the graphics of Bad 2 Bad: Apocalypse</h3>
|
11 |
-
<p><strong>Bad 2 Bad: Apocalypse</strong> has many features that make it stand out from other role-playing games. Some of these features are:</p>
|
12 |
-
<ul>
|
13 |
-
<li><strong>A wide variety of characters:</strong> You can choose from over 20 different animal characters, each with their own personality, voice, and skills. You can also unlock more characters as you progress through the game.</li>
|
14 |
-
<li><strong>A diverse range of weapons:</strong> You can equip your character with over 60 different weapons, such as pistols, rifles, shotguns, snipers, rocket launchers, grenades, and more. You can also upgrade your weapons to make them more powerful and effective.</li>
|
15 |
-
<li><strong>A stunning graphics:</strong> The game has a beautiful and colorful graphics that will impress you with its details and animations. The game also has a smooth performance that will run smoothly on most devices.</li <h2>What is Bad 2 Bad: Apocalypse Mod APK 1.2.4?</h2>
|
16 |
-
<p><strong>Bad 2 Bad: Apocalypse Mod APK 1.2.4</strong> is a modified version of the original game that gives you some advantages and extra features that are not available in the official version. These include:</p>
|
17 |
-
<p>bad 2 bad apocalypse unlimited money mod apk<br />
|
18 |
-
bad 2 bad apocalypse hack apk download<br />
|
19 |
-
bad 2 bad apocalypse mod apk latest version<br />
|
20 |
-
bad 2 bad apocalypse mod apk android 1<br />
|
21 |
-
bad 2 bad apocalypse mod apk revdl<br />
|
22 |
-
bad 2 bad apocalypse mod apk happymod<br />
|
23 |
-
bad 2 bad apocalypse mod apk free download<br />
|
24 |
-
bad 2 bad apocalypse mod apk offline<br />
|
25 |
-
bad 2 bad apocalypse mod apk no root<br />
|
26 |
-
bad 2 bad apocalypse mod apk unlimited health<br />
|
27 |
-
bad 2 bad apocalypse mod apk unlimited gems<br />
|
28 |
-
bad 2 bad apocalypse mod apk unlimited ammo<br />
|
29 |
-
bad 2 bad apocalypse mod apk unlimited coins<br />
|
30 |
-
bad 2 bad apocalypse mod apk unlimited diamonds<br />
|
31 |
-
bad 2 bad apocalypse mod apk unlimited gold<br />
|
32 |
-
bad 2 bad apocalypse mod apk unlocked everything<br />
|
33 |
-
bad 2 bad apocalypse mod apk all characters unlocked<br />
|
34 |
-
bad 2 bad apocalypse mod apk all weapons unlocked<br />
|
35 |
-
bad 2 bad apocalypse mod apk all levels unlocked<br />
|
36 |
-
bad 2 bad apocalypse mod apk all items unlocked<br />
|
37 |
-
bad 2 bad apocalypse mod apk premium features<br />
|
38 |
-
bad 2 bad apocalypse mod apk vip features<br />
|
39 |
-
bad 2 bad apocalypse mod apk pro features<br />
|
40 |
-
bad 2 bad apocalypse mod apk full version<br />
|
41 |
-
bad 2 bad apocalypse mod apk mega mod<br />
|
42 |
-
how to install bad 2 bad apocalypse mod apk<br />
|
43 |
-
how to download bad 2 bad apocalypse mod apk<br />
|
44 |
-
how to play bad 2 bad apocalypse mod apk<br />
|
45 |
-
how to update bad 2 bad apocalypse mod apk<br />
|
46 |
-
how to get rid of ads in bad 2 bad apocalypse mod apk<br />
|
47 |
-
how to get more money in bad 2 bad apocalypse mod apk<br />
|
48 |
-
how to get more gems in bad 2 bad apocalypse mod apk<br />
|
49 |
-
how to get more ammo in bad 2 bad apocalypse mod apk<br />
|
50 |
-
how to get more health in bad 2 bad apocalypse mod apk<br />
|
51 |
-
how to unlock all characters in bad 2 bad apocalypse mod apk<br />
|
52 |
-
how to unlock all weapons in bad 2 bad apocalypse mod apk<br />
|
53 |
-
how to unlock all levels in bad 2 bad apocalypse mod apk<br />
|
54 |
-
how to unlock all items in bad 2 bad apocalypse mod apk<br />
|
55 |
-
how to use premium features in bad 2 big apocalpyse mode appk <br />
|
56 |
-
how to use vip features in badd too badd apocalpyse mode appk</p>
|
57 |
-
<ul>
|
58 |
-
<li><strong>Unlimited money:</strong> You will have unlimited money to buy and upgrade your weapons, equipment, and skills. You will not have to worry about running out of money or grinding for more.</li>
|
59 |
-
<li><strong>Unlocked characters:</strong> You will have access to all the characters in the game, even the ones that are normally locked or require real money to unlock. You can choose any character you want and enjoy their unique abilities and skills.</li>
|
60 |
-
<li><strong>No ads:</strong> You will not see any annoying ads that interrupt your gameplay or slow down your device. You will have a smooth and uninterrupted gaming experience.</li>
|
61 |
-
</ul>
|
62 |
-
<h3>The benefits of using Bad 2 Bad: Apocalypse Mod APK 1.2.4</h3>
|
63 |
-
<p>There are many benefits of using <strong>Bad 2 Bad: Apocalypse Mod APK 1.2.4</strong>. Some of these are:</p>
|
64 |
-
<ul>
|
65 |
-
<li><strong>More fun and enjoyment:</strong> You will have more fun and enjoyment playing the game with the mod features. You will be able to customize your character, equip your weapons, and use your skills without any limitations or restrictions. You will also be able to complete the missions faster and easier, and challenge yourself in the endless mode.</li>
|
66 |
-
<li><strong>More convenience and flexibility:</strong> You will have more convenience and flexibility playing the game with the mod features. You will not have to spend real money or waste time to unlock the characters or the weapons. You will also not have to deal with the ads that can ruin your mood or your device.</li>
|
67 |
-
<li><strong>More satisfaction and accomplishment:</strong> You will have more satisfaction and accomplishment playing the game with the mod features. You will be able to achieve your goals and objectives in the game, as well as earn more rewards and achievements. You will also be able to show off your skills and your progress to your friends and other players.</li>
|
68 |
-
</ul>
|
69 |
-
<h3>The drawbacks of using Bad 2 Bad: Apocalypse Mod APK 1.2.4</h3>
|
70 |
-
<p>However, there are also some drawbacks of using <strong>Bad 2 Bad: Apocalypse Mod APK 1.2.4</strong>. Some of these are:</p>
|
71 |
-
<ul>
|
72 |
-
<li><strong>Potential risks and dangers:</strong> You may face some potential risks and dangers when you download and install the mod apk file from an unknown or untrusted source. You may expose your device to viruses, malware, spyware, or other harmful software that can damage your device or steal your personal information. You may also violate the terms and conditions of the original game developer, which can result in legal actions or penalties.</li>
|
73 |
-
<li><strong>Lack of updates and support:</strong> You may not receive any updates or support from the original game developer when you use the mod apk file. You may miss out on new features, improvements, bug fixes, or security patches that are released by the official version. You may also encounter some compatibility issues or errors when you play the game with the mod apk file.</li>
|
74 |
-
<li><strong>Lack of challenge and balance:</strong> You may lose some challenge and balance when you play the game with the mod features. You may find the game too easy or boring, as you can breeze through the missions and enemies without any difficulty or effort. You may also lose some sense of achievement or reward, as you can get everything you want without any work or cost.</li>
|
75 |
-
</ul>
|
76 |
-
<h3>How to download and install Bad 2 Bad: Apocalypse Mod APK 1.2.4</h3>
|
77 |
-
<p>If you want to download and install <strong>Bad 2 Bad: Apocalypse Mod APK 1.2.4</strong>, you need to follow these steps:</p>
|
78 |
-
<h4>Step 1: Enable unknown sources on your device</h4>
|
79 |
-
<p>You need to enable unknown sources on your device to allow it to install apps from sources other than the Google Play Store. To do this, go to your device's settings, then security, then unknown sources, and turn it on.</p>
|
80 |
-
<h4>Step 2: Download the mod apk file from a reliable source</h4>
|
81 |
-
<p>You need to download the mod apk file from a reliable source that offers a safe and secure download link. You can search for such sources on the internet, or use one of these links:</p>
|
82 |
-
<ul>
|
83 |
-
<li>[Download link 1]</li>
|
84 |
-
<li>[Download link 2]</li>
|
85 |
-
<li>[ <li>[Download link 3]</li>
|
86 |
-
</ul>
|
87 |
-
<p>Make sure you download the mod apk file that matches the version of the game you want to play, which is 1.2.4 in this case.</p>
|
88 |
-
<h4>Step 3: Locate and install the mod apk file on your device</h4>
|
89 |
-
<p>After you download the mod apk file, you need to locate and install it on your device. To do this, go to your device's file manager, then find the folder where you saved the mod apk file, then tap on it to start the installation process. You may need to grant some permissions to allow the installation to proceed.</p>
|
90 |
-
<h4>Step 4: Launch the game and enjoy the mod features</h4>
|
91 |
-
<p>Once the installation is complete, you can launch the game and enjoy the mod features. You will see that you have unlimited money, unlocked characters, and no ads in the game. You can now play the game as you wish and have fun.</p>
|
92 |
-
<h2>Conclusion</h2>
|
93 |
-
<p><strong>Bad 2 Bad: Apocalypse Mod APK 1.2.4</strong> is a fun and action-packed role-playing game that will keep you entertained for hours. You will love the story, the gameplay, the features, and the graphics of the game. You will also appreciate the mod features that give you more advantages and extra features in the game. However, you should also be aware of the drawbacks and risks of using the mod apk file, and make sure you download it from a reliable source. If you want to try this game, you can follow the steps we provided above to download and install it on your device.</p>
|
94 |
-
<h3>Why you should play Bad 2 Bad: Apocalypse Mod APK 1.2.4</h3>
|
95 |
-
<p>You should play <strong>Bad 2 Bad: Apocalypse Mod APK 1.2.4</strong> because:</p>
|
96 |
-
<ul>
|
97 |
-
<li>It is a fun and action-packed role-playing game that will keep you entertained for hours.</li>
|
98 |
-
<li>It has a rich and immersive story that will keep you hooked from the start.</li>
|
99 |
-
<li>It has a simple and intuitive gameplay that will suit any player.</li>
|
100 |
-
<li>It has a wide variety of characters, weapons, and skills that you can choose from and customize.</li>
|
101 |
-
<li>It has a stunning graphics that will impress you with its details and animations.</li>
|
102 |
-
<li>It has a mod features that give you unlimited money, unlocked characters, and no ads in the game.</li>
|
103 |
-
</ul>
|
104 |
-
<h3>FAQs</h3>
|
105 |
-
<p>Here are some frequently asked questions about <strong>Bad 2 Bad: Apocalypse Mod APK 1.2.4</strong>:</p>
|
106 |
-
<ol>
|
107 |
-
<li><strong>Is Bad 2 Bad: Apocalypse Mod APK 1.2.4 safe to use?</strong></li>
|
108 |
-
<p>Yes, it is safe to use if you download it from a reliable source that offers a safe and secure download link. However, you should always be careful when downloading and installing any mod apk file from an unknown or untrusted source, as it may contain viruses, malware, spyware, or other harmful software that can damage your device or steal your personal information.</p>
|
109 |
-
<li><strong>Is Bad 2 Bad: Apocalypse Mod APK 1.2.4 legal to use?</strong></li>
|
110 |
-
<p>No, it is not legal to use, as it violates the terms and conditions of the original game developer. By using the mod apk file, you are modifying the original game files and accessing features that are not authorized or intended by the developer. This can result in legal actions or penalties from the developer or other parties involved.</p>
|
111 |
-
<li><strong>Does Bad 2 Bad: Apocalypse Mod APK 1.2.4 require root access?</strong></li>
|
112 |
-
<p>No, it does not require root access, as it works on any Android device without rooting. However, some devices may need to enable unknown sources on their settings to allow the installation of apps from sources other than the Google Play Store.</p>
|
113 |
-
<li><strong>Does Bad 2 Bad: Apocalypse Mod APK 1.2.4 work offline?</strong></li>
|
114 |
-
<p>Yes, it works offline, as it does not require an internet connection to play the game. However, some features or functions may not work properly or at all without an internet connection, such as online multiplayer mode or cloud save.</p>
|
115 |
-
<li><strong>Can I update Bad 2 Bad: Apocalypse Mod APK 1.2.4?</strong></li>
|
116 |
-
<p>No, you cannot update it, as it is not compatible with the official version of the game. If you want to update the game, you need to uninstall the mod apk file and install the official version from the Google Play Store. However, you may lose your progress and data when you do this.</p>
|
117 |
-
</ playing the game with the mod features. You will be able to customize your character, equip your weapons, and use your skills without any limitations or restrictions. You will also be able to complete the missions faster and easier, and challenge yourself in the endless mode.</li>
|
118 |
-
<li><strong>More convenience and flexibility:</strong> You will have more convenience and flexibility playing the game with the mod features. You will not have to spend real money or waste time to unlock the characters or the weapons. You will also not have to deal with the ads that can ruin your mood or your device.</li>
|
119 |
-
<li><strong>More satisfaction and accomplishment:</strong> You will have more satisfaction and accomplishment playing the game with the mod features. You will be able to achieve your goals and objectives in the game, as well as earn more rewards and achievements. You will also be able to show off your skills and your progress to your friends and other players.</li>
|
120 |
-
</ul>
|
121 |
-
<h3>The drawbacks of using Bad 2 Bad: Apocalypse Mod APK 1.2.4</h3>
|
122 |
-
<p>However, there are also some drawbacks of using <strong>Bad 2 Bad: Apocalypse Mod APK 1.2.4</strong>. Some of these are:</p>
|
123 |
-
<ul>
|
124 |
-
<li><strong>Potential risks and dangers:</strong> You may face some potential risks and dangers when you download and install the mod apk file from an unknown or untrusted source. You may expose your device to viruses, malware, spyware, or other harmful software that can damage your device or steal your personal information. You may also violate the terms and conditions of the original game developer, which can result in legal actions or penalties.</li>
|
125 |
-
<li><strong>Lack of updates and support:</strong> You may not receive any updates or support from the original game developer when you use the mod apk file. You may miss out on new features, improvements, bug fixes, or security patches that are released by the official version. You may also encounter some compatibility issues or errors when you play the game with the mod apk file.</li>
|
126 |
-
<li><strong>Lack of challenge and balance:</strong> You may lose some challenge and balance when you play the game with the mod features. You may find the game too easy or boring, as you can breeze through the missions and enemies without any difficulty or effort. You may also lose some sense of achievement or reward, as you can get everything you want without any work or cost.</li>
|
127 |
-
</ul>
|
128 |
-
<h3>How to download and install Bad 2 Bad: Apocalypse Mod APK 1.2.4</h3>
|
129 |
-
<p>If you want to download and install <strong>Bad 2 Bad: Apocalypse Mod APK 1.2.4</strong>, you need to follow these steps:</p>
|
130 |
-
<h4>Step 1: Enable unknown sources on your device</h4>
|
131 |
-
<p>You need to enable unknown sources on your device to allow it to install apps from sources other than the Google Play Store. To do this, go to your device's settings, then security, then unknown sources, and turn it on.</p>
|
132 |
-
<h4>Step 2: Download the mod apk file from a reliable source</h4>
|
133 |
-
<p>You need to download the mod apk file from a reliable source that offers a safe and secure download link. You can search for such sources on the internet, or use one of these links:</p>
|
134 |
-
<ul>
|
135 |
-
<li>[Download link 1]</li>
|
136 |
-
<li>[Download link 2]</li>
|
137 |
-
<li>[Download link 3]</li>
|
138 |
-
</ul>
|
139 |
-
<p>Make sure you download the mod apk file that matches the version of the game you want to play, which is 1.2.4 in this case.</p>
|
140 |
-
<h4>Step 3: Locate and install the mod apk file on your device</h4>
|
141 |
-
<p>After you download the mod apk file, you need to locate and install it on your device. To do this, go to your device's file manager, then find the folder where you saved the mod apk file, then tap on it to start the installation process. You may need to grant some permissions to allow the installation to proceed.</p>
|
142 |
-
<h4>Step 4: Launch the game and enjoy the mod features</h4>
|
143 |
-
<p>Once the installation is complete, you can launch the game and enjoy the mod features. You will see that you have unlimited money, unlocked characters, and no ads in the game. You can now play the game as you wish and have fun.</p>
|
144 |
-
<h2>Conclusion</h2>
|
145 |
-
<p><strong>Bad 2 Bad: Apocalypse Mod APK 1.2.4</strong> is a fun and action-packed role-playing game that will keep you entertained for hours. You will love the story, the gameplay, the features, and the graphics of the game. You will also appreciate the mod features that give you more advantages and extra features in the game. However, you should also be aware of the drawbacks and risks of using the mod apk file, and make sure you download it from a reliable source. If you want to try this game, you can follow the steps we provided above to download and install it on your device.</p>
|
146 |
-
<h3>Why you should play Bad 2 Bad: Apocalypse Mod APK 1.2.4</h3>
|
147 |
-
<p>You should play <strong>Bad 2 Bad: Apocalypse Mod APK 1.2.4</strong> because:</p>
|
148 |
-
<ul>
|
149 |
-
<li>It is a fun and action-packed role-playing game that will keep you entertained for hours.</li>
|
150 |
-
<li>It has a rich and immersive story that will keep you hooked from the start.</li>
|
151 |
-
<li>It has a simple and intuitive gameplay that will suit any player.</li>
|
152 |
-
<li>It has a wide variety of characters, weapons, and skills that you can choose from and customize.</li>
|
153 |
-
<li>It has a stunning graphics that will impress you with its details and animations.</li>
|
154 |
-
<li>It has a mod features that give you unlimited money, unlocked characters, and no ads in the game.</li>
|
155 |
-
</ul>
|
156 |
-
<h3>FAQs</h3>
|
157 |
-
<p>Here are some frequently asked questions about <strong>Bad 2 Bad: Apocalypse Mod APK 1.2.4</strong>:</p>
|
158 |
-
<ol>
|
159 |
-
<li><strong>Is Bad 2 Bad: Apocalypse Mod APK 1.2.4 safe to use?</strong></li>
|
160 |
-
<p>Yes, it is safe to use if you download it from a reliable source that offers a safe and secure download link. However, you should always be careful when downloading and installing any mod apk file from an unknown or untrusted source, as it may contain viruses, malware, spyware, or other harmful software that can damage your device or steal your personal information.</p>
|
161 |
-
<li><strong>Is Bad 2 Bad: Apocalypse Mod APK 1.2.4 legal to use?</strong></li>
|
162 |
-
<p>No, it is not legal to use, as it violates the terms and conditions of the original game developer. By using the mod apk file, you are modifying the original game files and accessing features that are not authorized or intended by the developer. This can result in legal actions or penalties from the developer or other parties involved.</p>
|
163 |
-
<li><strong>Does Bad 2 Bad: Apocalypse Mod APK 1.2.4 require root access?</strong></li>
|
164 |
-
<p>No, it does not require root access, as it works on any Android device without rooting. However, some devices may need to enable unknown sources on their settings to allow the installation of apps from sources other than the Google Play Store.</p>
|
165 |
-
<li><strong>Does Bad 2 Bad: Apocalypse Mod APK 1.2.4 work offline?</strong></li>
|
166 |
-
<p>Yes, it works offline, as it does not require an internet connection to play the game. However, some features or functions may not work properly or at all without an internet connection, such as online multiplayer mode or cloud save.</p>
|
167 |
-
<li><strong>Can I update Bad 2 Bad: Apocalypse Mod APK 1.2.4?</strong></li>
|
168 |
-
<p>No, you cannot update it, as it is not compatible with the official version of the game. If you want to update the game, you need to uninstall the mod apk file and install the official version from the Google Play Store. However, you may lose your progress and data when you do this.</p>
|
169 |
-
</ol></p> 197e85843d<br />
|
170 |
-
<br />
|
171 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/AirReceiver for Windows 10 The Best Way to Stream Music Video and Photos.md
DELETED
@@ -1,144 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>AirReceiver Windows 10 Download: How to Mirror Your Screen to Your PC</h1>
|
3 |
-
<p>Have you ever wanted to mirror your screen from your iPhone, iPad, Android, Chromebook, or Windows 10 device to your PC? If so, you might be interested in AirReceiver, a multi-protocol receiver app that allows you to do just that. In this article, we will explain what AirReceiver is, why you should use it, and how to download and install it on your Windows 10 PC.</p>
|
4 |
-
<h2>What is AirReceiver?</h2>
|
5 |
-
<p>AirReceiver is an app that turns your Windows 10 PC into a universal mirroring receiver, allowing you to receive streams from any device that supports AirPlay, Google Cast, or Miracast. These are the three most popular screen mirroring technologies that let you wirelessly project your screen to another device, such as a TV, a projector, or a PC.</p>
|
6 |
-
<h2>airreceiver windows 10 download</h2><br /><p><b><b>Download File</b> ✅ <a href="https://jinyurl.com/2uNNy7">https://jinyurl.com/2uNNy7</a></b></p><br /><br />
|
7 |
-
<h3>AirReceiver Features</h3>
|
8 |
-
<p>Some of the features of AirReceiver are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>It supports multiple protocols, including AirPlay, Google Cast, and Miracast.</li>
|
11 |
-
<li>It supports multiple devices, including iOS, Android, Chromebook, Mac, and Windows 10.</li>
|
12 |
-
<li>It supports multiple streams simultaneously, so you can mirror more than one device at the same time.</li>
|
13 |
-
<li>It supports live streaming to YouTube with picture-in-picture webcam support.</li>
|
14 |
-
<li>It supports encrypted AirPlay for secure transmission of your personal data.</li>
|
15 |
-
<li>It supports retina quality mirroring for high-resolution displays.</li>
|
16 |
-
</ul>
|
17 |
-
<h3>AirReceiver Price</h3>
|
18 |
-
<p>AirReceiver is not a free app. It costs $2.99 on the Microsoft Store. However, you can try it for free for 15 minutes before you buy it. You can also get a refund within 14 days if you are not satisfied with the app.</p>
|
19 |
-
<h2>Why Use AirReceiver?</h2>
|
20 |
-
<p>There are many reasons why you might want to use AirReceiver to mirror your screen to your PC. Here are some of them:</p>
|
21 |
-
<h3>Benefits of Screen Mirroring</h3>
|
22 |
-
<ul>
|
23 |
-
<li>You can enjoy a bigger and better view of your content on your PC monitor or TV.</li>
|
24 |
-
<li>You can share your screen with others easily and conveniently.</li>
|
25 |
-
<li>You can record or capture your screen for later use or reference.</li>
|
26 |
-
<li>You can enhance your productivity and creativity by using multiple screens.</li>
|
27 |
-
<li>You can play games or watch videos on a larger screen with better sound quality.</li>
|
28 |
-
</ul>
|
29 |
-
<h3>Use Cases of AirReceiver</h3>
|
30 |
-
<p>Some of the use cases of AirReceiver are:</p>
|
31 |
-
<ul>
|
32 |
-
<li>You can use it for entertainment purposes, such as watching movies, shows, photos, or music from your device on your PC.</li>
|
33 |
-
<li>You can use it for educational purposes, such as presenting slides, documents, or websites from your device on your PC.</li>
|
34 |
-
<li>You can use it for business purposes, such as demonstrating apps, software, or products from your device on your PC.</li>
|
35 |
-
<li>You can use it for gaming purposes, such as playing mobile games on your PC with a keyboard and mouse.</li>
|
36 |
-
<li>You can use it for personal purposes, such as browsing social media, chatting with friends, or checking emails from your device on your PC.</li>
|
37 |
-
</ul>
|
38 |
-
<h2>How to Download and Install AirReceiver on Windows 10</h2>
|
39 |
-
<p>Downloading and installing AirReceiver on your Windows 10 PC is very easy and straightforward. Just follow these steps:</p>
|
40 |
-
<h3>Step 1: Get AirReceiver from the Microsoft Store</h3>
|
41 |
-
<p>The first step is to get AirReceiver from the Microsoft Store. You can do this by clicking on this link: <a href="">AirReceiver on Microsoft Store</a>. Alternatively, you can search for AirReceiver on the Microsoft Store app on your PC. Once you find the app, click on the Get button to purchase it. You can also try it for free for 15 minutes before you buy it.</p>
|
42 |
-
<p>airreceiver windows 10 app store<br />
|
43 |
-
airreceiver windows 10 free trial<br />
|
44 |
-
airreceiver windows 10 review<br />
|
45 |
-
airreceiver windows 10 alternative<br />
|
46 |
-
airreceiver windows 10 setup<br />
|
47 |
-
airreceiver windows 10 not working<br />
|
48 |
-
airreceiver windows 10 crack<br />
|
49 |
-
airreceiver windows 10 license key<br />
|
50 |
-
airreceiver windows 10 miracast<br />
|
51 |
-
airreceiver windows 10 chromecast<br />
|
52 |
-
airreceiver windows 10 airplay<br />
|
53 |
-
airreceiver windows 10 wireless display<br />
|
54 |
-
airreceiver windows 10 screen mirroring<br />
|
55 |
-
airreceiver windows 10 cast to device<br />
|
56 |
-
airreceiver windows 10 connect to iphone<br />
|
57 |
-
airreceiver windows 10 connect to android<br />
|
58 |
-
airreceiver windows 10 connect to mac<br />
|
59 |
-
airreceiver windows 10 connect to ipad<br />
|
60 |
-
airreceiver windows 10 connect to chromebook<br />
|
61 |
-
airreceiver windows 10 connect to firestick<br />
|
62 |
-
airreceiver windows 10 vs airserver<br />
|
63 |
-
airreceiver windows 10 vs lonelyscreen<br />
|
64 |
-
airreceiver windows 10 vs reflector<br />
|
65 |
-
airreceiver windows 10 vs apowermirror<br />
|
66 |
-
airreceiver windows 10 vs x-mirage<br />
|
67 |
-
how to use airreceiver windows 10<br />
|
68 |
-
how to install airreceiver windows 10<br />
|
69 |
-
how to uninstall airreceiver windows 10<br />
|
70 |
-
how to update airreceiver windows 10<br />
|
71 |
-
how to activate airreceiver windows 10<br />
|
72 |
-
how to fix airreceiver windows 10<br />
|
73 |
-
how to stream with airreceiver windows 10<br />
|
74 |
-
how to mirror with airreceiver windows 10<br />
|
75 |
-
how to cast with airreceiver windows 10<br />
|
76 |
-
how to project with airreceiver windows 10<br />
|
77 |
-
is airreceiver compatible with windows 10<br />
|
78 |
-
is airreceiver safe for windows 10<br />
|
79 |
-
is airreceiver worth it for windows 10<br />
|
80 |
-
is there a free version of airreceiver for windows 10<br />
|
81 |
-
is there a better option than airreceiver for windows 10<br />
|
82 |
-
what is the best setting for airreceiver on windows 10<br />
|
83 |
-
what is the difference between airserver and airreceiver on windows 10<br />
|
84 |
-
what devices can i use with airreceiver on windows 10<br />
|
85 |
-
what protocols does airreceiver support on windows 10<br />
|
86 |
-
what are the benefits of using airreceiver on windows 10<br />
|
87 |
-
where can i download the latest version of airreceiver for windows 10 <br />
|
88 |
-
where can i find the user manual for airreceiver for windows 10 <br />
|
89 |
-
where can i get help with troubleshooting for airreceiver for windows 10</p>
|
90 |
-
<h3>Step 2: Launch AirReceiver and Configure Settings</h3>
|
91 |
-
<p>The second step is to launch AirReceiver and configure its settings. You can do this by clicking on the Start menu and typing AirReceiver. Then, click on the app icon to open it. You will see a window with a list of available receivers. You can rename them, delete them, or add new ones by clicking on the buttons at the bottom. You can also access the settings menu by clicking on the gear icon at the top right corner. Here, you can adjust various options, such as resolution, audio, network, hardware accelerator, and more.</p>
|
92 |
-
<h3>Step 3: Connect Your Device to AirReceiver</h3>
|
93 |
-
<p>The third step is to connect your device to AirReceiver. Depending on what device and protocol you are using, the steps may vary slightly. Here are the general steps for each protocol:</p>
|
94 |
-
<h4>How to Connect via AirPlay</h4>
|
95 |
-
<ul>
|
96 |
-
<li>Make sure your device and your PC are connected to the same Wi-Fi network.</li>
|
97 |
-
<li>On your iOS or Mac device, swipe up from the bottom or click on the Control Center icon.</li>
|
98 |
-
<li>Tap or click on the Screen Mirroring or AirPlay icon.</li>
|
99 |
-
<li>Select your PC name from the list of available receivers.</li>
|
100 |
-
<li>If prompted, enter the PIN code that appears on your PC screen.</li>
|
101 |
-
<li>Your device screen should now be mirrored to your PC.</li>
|
102 |
-
</ul>
|
103 |
-
<h4>How to Connect via Google Cast</h4>
|
104 |
-
<ul>
|
105 |
-
<li>Make sure your device and your PC are connected to the same Wi-Fi network.</li>
|
106 |
-
<li>On your Android or Chromebook device, swipe down from the top or click on the Quick Settings icon.</li>
|
107 |
-
<li>Tap or click on the Cast or Screen Cast icon.</li>
|
108 |
-
<li>Select your PC name from the list of available receivers.</li>
|
109 |
-
<li>Your device screen should now be mirrored to your PC.</li>
|
110 |
-
</ul>
|
111 |
-
<h4>How to Connect via Miracast</h4>
|
112 |
-
<ul>
|
113 |
-
<li>Make sure your device and your PC are connected to the same Wi-Fi network.</li>
|
114 |
-
<li>On your Windows 10 device, press the Windows key + K to open the Connect menu.</li>
|
115 |
-
<li>Select your PC name from the list of available receivers.</li>
|
116 |
-
<li>If prompted, allow your device to connect to your PC.</li>
|
117 |
-
<li>Your device screen should now be mirrored to your PC.</li>
|
118 |
-
</ul>
|
119 |
-
<h2>Troubleshooting Tips for AirReceiver</h2>
|
120 |
-
<p>Sometimes, you may encounter some issues or errors when using AirReceiver. Here are some troubleshooting tips that may help you fix them:</p>
|
121 |
-
<h3>Check Your Network Connection</h3>
|
122 |
-
<p>One of the most common causes of screen mirroring problems is a poor or unstable network connection. Make sure that both your device and your PC are connected to the same Wi-Fi network and that the signal is strong and consistent. You can also try restarting your router or modem if you suspect that there is a problem with your network.</p>
|
123 |
-
<h3>Update Your Device and AirReceiver App</h3>
|
124 |
-
<p>Another possible cause of screen mirroring problems is an outdated device or app. Make sure that both your device and your AirReceiver app are updated to the latest version and that they are compatible with each other. You can check for updates on your device settings or on the Microsoft Store app for AirReceiver.</p>
|
125 |
-
<h3>Disable Hardware Accelerator in AirReceiver Settings</h3>
|
126 |
-
<p>Sometimes, enabling hardware accelerator in AirReceiver settings may cause some issues or errors with screen mirroring. This is because some devices or graphics cards may not support this feature well. If you experience any problems with hardware accelerator, you can try disabling it in AirReceiver settings and see if that solves the issue.</p>
|
127 |
-
<h2>Conclusion</h2>
|
128 |
-
<p>AirReceiver is a great app that allows you to mirror your screen from any device that supports AirPlay, Google Cast, or Miracast to your Windows 10 PC. It has many features and benefits that make it worth trying. It is also easy to download and install on your PC and connect with your device. If you are looking for a simple and effective way to mirror your screen to your PC, you should give AirReceiver a try. You will not regret it.</p>
|
129 |
-
<h2>FAQs</h2>
|
130 |
-
<p>Here are some frequently asked questions about AirReceiver:</p>
|
131 |
-
<ol>
|
132 |
-
<li>What devices are compatible with AirReceiver?</li>
|
133 |
-
<p>AirReceiver is compatible with any device that supports AirPlay, Google Cast, or Miracast. This includes iOS, Android, Chromebook, Mac, and Windows 10 devices.</p>
|
134 |
-
<li>How many devices can I connect to AirReceiver at the same time?</li>
|
135 |
-
<p>AirReceiver can support up to 16 devices simultaneously. However, this may depend on your network bandwidth and PC performance.</p>
|
136 |
-
<li>Can I use AirReceiver without Wi-Fi?</li>
|
137 |
-
<p>No, you need a Wi-Fi connection to use AirReceiver. Both your device and your PC must be connected to the same Wi-Fi network.</p>
|
138 |
-
<li>Can I use AirReceiver for audio only?</li>
|
139 |
-
<p>Yes, you can use AirReceiver for audio only. You can select the Audio Only option in the settings menu of AirReceiver. This will reduce the bandwidth usage and improve the audio quality.</p>
|
140 |
-
<li>Can I use AirReceiver offline?</li>
|
141 |
-
<p>No, you need an internet connection to use AirReceiver. You need to download and install the app from the Microsoft Store and activate it with your Microsoft account.</p>
|
142 |
-
</ol></p> 197e85843d<br />
|
143 |
-
<br />
|
144 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Como baixar o Roblox APK Mod e aproveitar ao mximo o seu celular.md
DELETED
@@ -1,93 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Baixar Roblox Apk Mod: Tudo o que você precisa saber</h1>
|
3 |
-
<p>Roblox é uma plataforma de jogos online que permite criar e jogar seus próprios jogos em 3D. Com milhões de usuários e uma variedade infinita de experiências imersivas criadas pela comunidade, Roblox é um dos aplicativos mais populares do planeta. Mas você sabia que existe uma versão modificada do Roblox que oferece ainda mais recursos e vantagens? Neste artigo, vamos explicar o que é o Roblox mod, quais são seus benefícios, como baixar e instalar no seu dispositivo Android, quais são os riscos de usá-lo e quais são as alternativas disponíveis.</p>
|
4 |
-
<h2>O que é o Roblox mod e quais são seus benefícios?</h2>
|
5 |
-
<p>Roblox mod é um aplicativo gratuito que permite personalizar seu personagem, criar e explorar seus próprios mundos, participar de uma grande comunidade online de outros jogadores e batalhar com outros jogadores em mini-jogos e torneios online. Com o Roblox mod, você pode desbloquear recursos que não estão disponíveis na versão oficial do Roblox, como:</p>
|
6 |
-
<h2>baixar roblox apk mod</h2><br /><p><b><b>Download File</b> ✔ <a href="https://jinyurl.com/2uNTzs">https://jinyurl.com/2uNTzs</a></b></p><br /><br />
|
7 |
-
<ul>
|
8 |
-
<li>Opções de customização ilimitadas para seu avatar, com toneladas de chapéus, camisas, rostos, equipamentos e muito mais.</li>
|
9 |
-
<li>Robux ilimitados, a moeda virtual do Roblox que permite comprar itens e acessórios exclusivos.</li>
|
10 |
-
<li>Hacks de jogo, como aim bot, wall hacks, speed hacks, flying hacks e outros truques que facilitam sua vitória.</li>
|
11 |
-
<li>Integração com o Discord, o aplicativo de comunicação por voz e texto mais usado pelos gamers.</li>
|
12 |
-
</ul>
|
13 |
-
<p>Com esses benefícios, você pode criar o personagem que sempre quis ser e nunca conseguiu antes, explorar mundos incríveis criados por outros usuários ou por você mesmo, interagir com seus amigos e milhões de outras pessoas em diferentes plataformas e se divertir muito mais no universo virtual do Roblox.</p>
|
14 |
-
<h2>Como baixar e instalar o Roblox mod no seu dispositivo Android?</h2>
|
15 |
-
<p>Baixar e instalar o Roblox mod no seu dispositivo Android é muito fácil. Basta seguir estes passos:</p>
|
16 |
-
<ol>
|
17 |
-
<li>Baixe o arquivo APK do Roblox mod a partir de um site confiável, como .</li>
|
18 |
-
<li>Copie o arquivo APK baixado para o seu dispositivo Android.</li>
|
19 |
-
<li>Abra qualquer aplicativo de gerenciador de arquivos no seu dispositivo e vá para o local onde você copiou o arquivo APK.</li>
|
20 |
-
<li>Toque no arquivo APK e selecione "Instalar".</li>
|
21 |
-
<li>Aguarde o processo de instalação terminar.</li>
|
22 |
-
</ol>
|
23 |
-
<p>Pronto! Agora você pode abrir o Roblox mod e aproveitar todos os seus recursos. Lembre-se de que você precisa ter uma conexão de internet para entrar. O Roblox mod funciona melhor com Wi-Fi.</p>
|
24 |
-
<h2>Quais são os riscos de usar o Roblox mod?</h2>
|
25 |
-
<p>Apesar dos benefícios do Roblox mod, também existem alguns riscos que você deve estar ciente antes de usá-lo. Some of the risks of using the Roblox mod are:</p>
|
26 |
-
<ul>
|
27 |
-
<li>Violação dos termos de serviço do Roblox. Ao usar o Roblox mod, você está quebrando as regras do Roblox, que proíbem o uso de qualquer software ou aplicativo não autorizado que modifique ou interfira com o funcionamento normal do Roblox. Isso pode resultar em consequências legais ou disciplinares, como a suspensão ou exclusão da sua conta.</li>
|
28 |
-
<li>Suspensão ou exclusão da sua conta. Se o Roblox detectar que você está usando o Roblox mod, ele pode banir a sua conta permanentemente ou temporariamente, dependendo da gravidade da infração. Isso significa que você perderá todo o seu progresso, seus itens, seus amigos e suas criações no Roblox.</li>
|
29 |
-
<li>Cyberbullying ou contato indesejado. Ao usar o Roblox mod, você pode se expor a situações de cyberbullying ou contato indesejado de outros usuários que não gostam de trapaceiros ou que querem tirar vantagem de você. Você pode receber mensagens ofensivas, ameaças, convites inapropriados ou solicitações de informações pessoais. Você deve sempre ter cuidado com quem você interage online e denunciar qualquer comportamento abusivo.</li>
|
30 |
-
<li>Malware ou vírus. Ao baixar e instalar o Roblox mod, você pode estar colocando em risco a segurança do seu dispositivo e dos seus dados. Alguns sites que oferecem o Roblox mod podem conter malware ou vírus que podem danificar o seu dispositivo, roubar as suas informações, acessar a sua câmera ou microfone, exibir anúncios indesejados ou redirecionar você para sites maliciosos. Você deve sempre verificar a reputação e a confiabilidade do site antes de baixar qualquer arquivo e usar um antivírus para proteger o seu dispositivo.</li>
|
31 |
-
</ul>
|
32 |
-
<p>Portanto, antes de usar o Roblox mod, você deve estar ciente dos riscos envolvidos e das possíveis consequências que podem ocorrer. Você deve também respeitar os direitos autorais e a propriedade intelectual do Roblox e dos seus criadores, bem como as normas de conduta e segurança online.</p>
|
33 |
-
<h2>Quais são as alternativas ao Roblox mod?</h2>
|
34 |
-
<p>Se você quer desfrutar de jogos semelhantes ao Roblox sem usar o Roblox mod, existem algumas alternativas que você pode experimentar. Algumas delas são:</p>
|
35 |
-
<p>como baixar roblox apk mod menu<br />
|
36 |
-
baixar roblox apk mod atualizado 2023<br />
|
37 |
-
baixar roblox apk mod dinheiro infinito<br />
|
38 |
-
baixar roblox apk mod tudo desbloqueado<br />
|
39 |
-
baixar roblox apk mod hack<br />
|
40 |
-
baixar roblox apk mod para android<br />
|
41 |
-
baixar roblox apk mod com robux gratis<br />
|
42 |
-
baixar roblox apk mod versão mais recente<br />
|
43 |
-
baixar roblox apk mod sem anúncios<br />
|
44 |
-
baixar roblox apk mod com skins premium<br />
|
45 |
-
baixar roblox apk mod online<br />
|
46 |
-
baixar roblox apk mod offline<br />
|
47 |
-
baixar roblox apk mod mega<br />
|
48 |
-
baixar roblox apk mod mediafire<br />
|
49 |
-
baixar roblox apk mod pelo google drive<br />
|
50 |
-
baixar roblox apk mod no pc<br />
|
51 |
-
baixar roblox apk mod no celular<br />
|
52 |
-
baixar roblox apk mod no aptoide<br />
|
53 |
-
baixar roblox apk mod no uptodown<br />
|
54 |
-
baixar roblox apk mod no happy mod<br />
|
55 |
-
baixar roblox apk mod com fly hack<br />
|
56 |
-
baixar roblox apk mod com wall hack<br />
|
57 |
-
baixar roblox apk mod com speed hack<br />
|
58 |
-
baixar roblox apk mod com god mode<br />
|
59 |
-
baixar roblox apk mod com night mode<br />
|
60 |
-
baixar roblox apk mod com troll features<br />
|
61 |
-
baixar roblox apk mod com jump cheat<br />
|
62 |
-
baixar roblox apk mod com pass through walls<br />
|
63 |
-
baixar roblox apk mod com lock camera pov<br />
|
64 |
-
baixar roblox apk mod com anti ban<br />
|
65 |
-
baixar roblox apk mod para jogos de terror<br />
|
66 |
-
baixar roblox apk mod para jogos de aventura<br />
|
67 |
-
baixar roblox apk mod para jogos de simulação<br />
|
68 |
-
baixar roblox apk mod para jogos de ação<br />
|
69 |
-
baixar roblox apk mod para jogos de corrida<br />
|
70 |
-
baixar roblox apk mod para jogos de tiro<br />
|
71 |
-
baixar roblox apk mod para jogos de rpg<br />
|
72 |
-
baixar roblox apk mod para jogos de esporte<br />
|
73 |
-
baixar roblox apk mod para jogos de música<br />
|
74 |
-
baixar roblox apk mod para jogos de educação</p>
|
75 |
-
<ul>
|
76 |
-
<li>Minetest. Minetest é um jogo de sandbox em 3D inspirado no Minecraft, que permite criar e explorar mundos infinitos com blocos. Você pode jogar sozinho ou com outros jogadores online, criar seus próprios jogos e mods, e personalizar seu personagem com skins e texturas. Minetest é gratuito e de código aberto, disponível para Windows, Linux, Mac OS X e Android.</li>
|
77 |
-
<li>Garry's Mod. Garry's Mod é um jogo de física em 3D que permite manipular objetos e personagens de vários jogos da Valve, como Half-Life 2, Team Fortress 2 e Counter-Strike. Você pode criar suas próprias cenas, animações, veículos, armas e muito mais. Você também pode jogar com outros jogadores online em diversos modos de jogo, como Prop Hunt, Trouble in Terrorist Town e Sandbox. Garry's Mod é pago e disponível para Windows, Linux e Mac OS X.</li>
|
78 |
-
<li>World to Build. World to Build é uma plataforma de jogos online que permite criar e jogar seus próprios jogos em 3D. Você pode usar ferramentas simples e intuitivas para construir seus mundos, adicionar scripts, sons, texturas e efeitos. Você também pode interagir com outros jogadores online em diferentes gêneros de jogos, como ação, aventura, RPG e muito mais. World to Build é gratuito e disponível para Windows.</li>
|
79 |
-
</ul>
|
80 |
-
<h2>Conclusão</h2>
|
81 |
-
<p>Neste artigo, nós explicamos o que é o Roblox mod, quais são seus benefícios, como baixar e instalar no seu dispositivo Android, quais são os riscos de usá-lo e quais são as alternativas disponíveis. Esperamos que este artigo tenha sido útil para você e que você tenha aprendido algo novo sobre o universo do Roblox.</p>
|
82 |
-
<p>O Roblox mod é uma vers ão modificada do Roblox que oferece recursos extras e vantagens para os jogadores, como opções de customização ilimitadas, Robux ilimitados, hacks de jogo e integração com o Discord. No entanto, usar o Roblox mod também tem seus riscos, como violação dos termos de serviço do Roblox, suspensão ou exclusão da sua conta, cyberbullying ou contato indesejado e malware ou vírus. Por isso, é importante estar ciente das consequências e das alternativas antes de usar o Roblox mod. Se você quer jogar jogos semelhantes ao Roblox sem usar o Roblox mod, você pode experimentar o Minetest, o Garry's Mod ou o World to Build, que são plataformas de jogos online que permitem criar e jogar seus próprios jogos em 3D.</p>
|
83 |
-
<h2>FAQs</h2>
|
84 |
-
<p>Aqui estão algumas perguntas frequentes sobre o Roblox mod:</p>
|
85 |
-
<ol>
|
86 |
-
<li>O que é Roblox? <br>Roblox é uma plataforma de jogos online que permite criar e jogar seus próprios jogos em 3D. Com milhões de usuários e uma variedade infinita de experiências imersivas criadas pela comunidade, Roblox é um dos aplicativos mais populares do planeta.</li>
|
87 |
-
<li>O que é Roblox mod? <br>Roblox mod é uma versão modificada do Roblox que oferece recursos extras e vantagens para os jogadores, como opções de customização ilimitadas, Robux ilimitados, hacks de jogo e integração com o Discord.</li>
|
88 |
-
<li>Como baixar e instalar o Roblox mod no seu dispositivo Android? <br>Baixar e instalar o Roblox mod no seu dispositivo Android é muito fácil. Basta seguir estes passos: <br>- Baixe o arquivo APK do Roblox mod a partir de um site confiável, como . <br>- Copie o arquivo APK baixado para o seu dispositivo Android. <br>- Abra qualquer aplicativo de gerenciador de arquivos no seu dispositivo e vá para o local onde você copiou o arquivo APK. <br>- Toque no arquivo APK e selecione "Instalar". <br>- Aguarde o processo de instalação terminar.</li>
|
89 |
-
<li>Quais são os riscos de usar o Roblox mod? <br>Alguns dos riscos de usar o Roblox mod são: <br>- Violação dos termos de serviço do Roblox. Ao usar o Roblox mod, você está quebrando as regras do Roblox, que proíbem o uso de qualquer software ou aplicativo não autorizado que modifique ou interfira com o funcionamento normal do Roblox. Isso pode resultar em consequências legais ou disciplinares, como a suspensão ou exclusão da sua conta. <br>- Suspensão ou exclusão da sua conta. Se o Roblox detectar que você está usando o Roblox mod, ele pode banir a sua conta permanentemente ou temporariamente, dependendo da gravidade da infração. Isso significa que você perderá todo o seu progresso, seus itens, seus amigos e suas criações no Roblox. <br>- Cyberbullying ou contato indesejado. Ao usar o Roblox mod, você pode se expor a situações de cyberbullying ou contato indesejado de outros usuários que não gostam de trapaceiros ou que querem tirar vantagem de você. Você pode receber mensagens ofensivas, ameaças, convites inapropriados ou solicitações de informações pessoais. Você deve sempre ter cuidado com quem você interage online e denunciar qualquer comportamento abusivo. <br>- Malware ou vírus. Ao baixar e instalar o Roblox mod, você pode estar colocando em risco a segurança do seu dispositivo e dos seus dados. Alguns sites que oferecem o Roblox mod podem conter malware ou vírus que podem danificar o seu dispositivo, roubar as suas informações, acessar a sua câmera ou microfone, exibir anúncios indesejados ou redirecionar você para sites maliciosos. Você deve sempre verificar a reputação e a confiabilidade do site antes de baixar qualquer arquivo e usar um antivírus para proteger o seu dispositivo.</li>
|
90 |
-
<li>Quais são as alternativas ao Roblox mod? <br>Se você quer desfrutar de jogos semelhantes ao Roblox sem usar o Roblox mod, existem algumas alternativas que você pode experimentar. Algumas del as delas são: <br>- Minetest. Minetest é um jogo de sandbox em 3D inspirado no Minecraft, que permite criar e explorar mundos infinitos com blocos. Você pode jogar sozinho ou com outros jogadores online, criar seus próprios jogos e mods, e personalizar seu personagem com skins e texturas. Minetest é gratuito e de código aberto, disponível para Windows, Linux, Mac OS X e Android. <br>- Garry's Mod. Garry's Mod é um jogo de física em 3D que permite manipular objetos e personagens de vários jogos da Valve, como Half-Life 2, Team Fortress 2 e Counter-Strike. Você pode criar suas próprias cenas, animações, veículos, armas e muito mais. Você também pode jogar com outros jogadores online em diversos modos de jogo, como Prop Hunt, Trouble in Terrorist Town e Sandbox. Garry's Mod é pago e disponível para Windows, Linux e Mac OS X. <br>- World to Build. World to Build é uma plataforma de jogos online que permite criar e jogar seus próprios jogos em 3D. Você pode usar ferramentas simples e intuitivas para construir seus mundos, adicionar scripts, sons, texturas e efeitos. Você também pode interagir com outros jogadores online em diferentes gêneros de jogos, como ação, aventura, RPG e muito mais. World to Build é gratuito e disponível para Windows.</li>
|
91 |
-
</ol></p> 401be4b1e0<br />
|
92 |
-
<br />
|
93 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Fate Grand Order Mod Apk Unlimited Quartz 2022.md
DELETED
@@ -1,127 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Fate/Grand Order Mod APK Unlimited Quartz 2022: How to Get It and Why You Need It</h1>
|
3 |
-
<p>If you are a fan of the Fate series, you probably have heard of or played Fate/Grand Order, one of the most popular mobile RPGs in the world. In this game, you can summon and command various Servants, who are legendary heroes from different historical periods and mythologies, and fight against enemies in epic battles. However, you may also have encountered some challenges in getting enough Saint Quartz, the premium currency of the game, to summon your favorite Servants. That's why you may be interested in Fate/Grand Order Mod APK Unlimited Quartz 2022, a modified version of the game that gives you unlimited Saint Quartz for free. In this article, we will explain what Fate/Grand Order is, what Saint Quartz is and why it is important, how to get Saint Quartz for free in the official game, what Fate/Grand Order Mod APK Unlimited Quartz 2022 is and how to get it, and some FAQs about the mod apk.</p>
|
4 |
-
<h2>fate grand order mod apk unlimited quartz 2022</h2><br /><p><b><b>Download Zip</b> ✺ <a href="https://jinyurl.com/2uNPPt">https://jinyurl.com/2uNPPt</a></b></p><br /><br />
|
5 |
-
<h2>What is Fate/Grand Order?</h2>
|
6 |
-
<h3>A popular mobile RPG based on the Fate series</h3>
|
7 |
-
<p>Fate/Grand Order is a mobile RPG developed by Delightworks and published by Aniplex. It is based on the Fate series, which is a multimedia franchise created by Type-Moon that includes visual novels, anime, manga, light novels, games, and more. The story of Fate/Grand Order revolves around Chaldea, a secret organization that monitors and preserves human history. However, Chaldea discovers that human history is facing extinction due to a mysterious anomaly in the year 2004. To prevent this, Chaldea sends its agents, called Masters, to various eras and regions in history, where they summon and team up with Servants, who are heroic spirits from different historical periods and mythologies. Together, they fight against enemies and restore the proper course of history.</p>
|
8 |
-
<h3>The gameplay and features of Fate/Grand Order</h3>
|
9 |
-
<p>The gameplay of Fate/Grand Order consists of two main modes: story mode and event mode. In story mode, you follow the main storyline of the game and travel to different eras and regions in history, where you encounter various characters and enemies from the Fate series. In event mode, you participate in limited-time events that offer special rewards and challenges. The combat system of Fate/Grand Order is turn-based and card-based. You can choose up to six Servants to form your party, and each Servant has five cards that represent their attacks and skills. You can select up to three cards per turn, and depending on the combination of cards, you can activate different effects and bonuses. Each Servant also has a unique Noble Phantasm, which is a powerful special attack that can be unleashed when their gauge is full. The game features hundreds of Servants to collect and customize, each with their own stats, skills, classes, and personalities. You can also interact with your Servants in the My Room feature, where you can chat with them, give them gifts, and bond with them. The game also has a social aspect, where you can add other players as friends and borrow their Servants for your missions.</p>
|
10 |
-
<h2>What is Saint Quartz and Why is it Important?</h2>
|
11 |
-
<h3>The premium currency of Fate/Grand Order</h3>
|
12 |
-
<p>Saint Quartz is the premium currency of Fate/Grand Order, which means that it is the most valuable and rare resource in the game. You can use Saint Quartz for various purposes, such as:</p>
|
13 |
-
<ul>
|
14 |
-
<li>Summoning new Servants and Craft Essences, which are items that enhance the abilities of your Servants.</li>
|
15 |
-
<li>Refilling your Action Points (AP), which are required to start missions.</li>
|
16 |
-
<li>Reviving your party when they are defeated in battle.</li>
|
17 |
-
<li>Expanding your inventory and storage space.</li>
|
18 |
-
</ul>
|
19 |
-
<p>Saint Quartz can be obtained by spending real money or by completing certain tasks and achievements in the game. However, the amount of Saint Quartz that you can get for free is very limited and not enough to satisfy your summoning needs.</p>
|
20 |
-
<p></p>
|
21 |
-
<h3>The uses and benefits of Saint Quartz</h3>
|
22 |
-
<p>The main use and benefit of Saint Quartz is to summon new Servants and Craft Essences. The summoning system of Fate/Grand Order is based on a gacha mechanic, which means that you have to spend Saint Quartz to randomly draw from a pool of available Servants and Craft Essences. The quality and rarity of the Servants and Craft Essences that you can get vary depending on the type of summoning banner that you choose. There are three types of summoning banners:</p>
|
23 |
-
<table>
|
24 |
-
<tr>
|
25 |
-
<th>Type</th>
|
26 |
-
<th>Description</th>
|
27 |
-
<th>Cost</th>
|
28 |
-
</tr>
|
29 |
-
<tr>
|
30 |
-
<td>Friend Point Summon</td>
|
31 |
-
<td>This banner allows you to summon low-rarity Servants and Craft Essences using Friend Points, which are earned by interacting with other players. This banner does not require Saint Quartz, but it also does not offer any high-rarity Servants or Craft Essences.</td>
|
32 |
-
<td>200 Friend Points per summon</td>
|
33 |
-
</tr>
|
34 |
-
<tr>
|
35 |
-
<td>Story Summon</td>
|
36 |
-
<td>This banner allows you to summon Servants and Craft Essences from the main story chapters that you have cleared. This banner has a large pool of Servants and Craft Essences, but it does not have any rate-up or guaranteed features, which means that the chances of getting a specific or high-rarity Servant or Craft Essence are very low.</td>
|
37 |
-
<td>3 Saint Quartz or 1 Summon Ticket per summon</td>
|
38 |
-
</tr>
|
39 |
-
<tr>
|
40 |
-
<td>Limited-Time Summon</td>
|
41 |
-
<td>This banner allows you to summon Servants and Craft Essences that are featured for a limited period of time. This banner usually has a rate-up feature, which means that the chances of getting a specific or high-rarity Servant or Craft Essence are increased. This banner also sometimes has a guaranteed feature, which means that you are guaranteed to get at least one high-rarity Servant or Craft Essence per summon. However, this banner also has a higher cost than the Story Summon.</td>
|
42 |
-
<td>30 Saint Quartz or 10 Summon Tickets per 10x summon</td>
|
43 |
-
</tr>
|
44 |
-
</table> <p>As you can see, Saint Quartz is very important for summoning new Servants and Craft Essences, which can help you progress in the game and enjoy the story and events. However, Saint Quartz is also very scarce and expensive, which can make summoning a frustrating and disappointing experience. That's why many players are looking for ways to get Saint Quartz for free in Fate/Grand Order.</p>
|
45 |
-
<h2>How to Get Saint Quartz for Free in Fate/Grand Order?</h2>
|
46 |
-
<h3>The official ways to earn Saint Quartz</h3>
|
47 |
-
<p>There are some official ways to earn Saint Quartz for free in Fate/Grand Order, such as:</p>
|
48 |
-
<ul>
|
49 |
-
<li>Completing main story chapters and interludes.</li>
|
50 |
-
<li>Clearing free quests and daily quests.</li>
|
51 |
-
<li>Logging in daily and weekly.</li>
|
52 |
-
<li>Participating in events and campaigns.</li>
|
53 |
-
<li>Bonding with your Servants.</li>
|
54 |
-
<li>Achieving certain milestones and records.</li>
|
55 |
-
</ul>
|
56 |
-
<p>These methods can give you a steady but slow income of Saint Quartz, which can add up over time. However, they also have some drawbacks and limitations that make them insufficient for satisfying your summoning needs.</p>
|
57 |
-
<h3>The drawbacks and limitations of the official ways</h3>
|
58 |
-
<p>The official ways to earn Saint Quartz for free in Fate/Grand Order have some drawbacks and limitations, such as:</p>
|
59 |
-
<ul>
|
60 |
-
<li>They are time-consuming and require a lot of grinding and patience.</li>
|
61 |
-
<li>They are finite and depend on the availability of new content and updates.</li>
|
62 |
-
<li>They are unpredictable and vary in quantity and frequency.</li>
|
63 |
-
<li>They are not enough to guarantee getting the Servants and Craft Essences that you want, especially if they are high-rarity or limited-time ones.</li>
|
64 |
-
</ul>
|
65 |
-
<p>Therefore, relying on the official ways to earn Saint Quartz for free in Fate/Grand Order can be frustrating and disappointing, especially if you have bad luck or high expectations. That's why some players are looking for alternative ways to get Saint Quartz for free in Fate/Grand Order, such as using Fate/Grand Order Mod APK Unlimited Quartz 2022.</p>
|
66 |
-
<h2>What is Fate/Grand Order Mod APK Unlimited Quartz 2022?</h2>
|
67 |
-
<h3>A modified version of the game that gives unlimited Saint Quartz</h3>
|
68 |
-
<p>Fate/Grand Order Mod APK Unlimited Quartz 2022 is a modified version of the game that gives you unlimited Saint Quartz for free. This means that you can summon as many Servants and Craft Essences as you want without spending any real money or waiting for any official rewards. You can also use the unlimited Saint Quartz to refill your AP, revive your party, expand your inventory, and more. With Fate/Grand Order Mod APK Unlimited Quartz 2022, you can enjoy the game without any restrictions or limitations.</p>
|
69 |
-
<h3>The advantages and disadvantages of using Fate/Grand Order Mod APK Unlimited Quartz 2022</h3>
|
70 |
-
<p>Fate/Grand Order Mod APK Unlimited Quartz 2022 has some advantages and disadvantages that you should consider before using it. Some of the advantages are:</p>
|
71 |
-
<ul>
|
72 |
-
<li>You can get unlimited Saint Quartz for free, which can save you a lot of money and time.</li>
|
73 |
-
<li>You can summon any Servant or Craft Essence that you want, which can help you progress in the game and enjoy the story and events.</li>
|
74 |
-
<li>You can experiment with different combinations of Servants and Craft Essences, which can enhance your gameplay experience and fun.</li>
|
75 |
-
</ul>
|
76 |
-
<p>Some of the disadvantages are:</p>
|
77 |
-
<ul>
|
78 |
-
<li>You may lose the challenge and excitement of the game, which can make it boring and repetitive.</li>
|
79 |
-
<li>You may lose the satisfaction and joy of getting a rare or desired Servant or Craft Essence through hard work or luck.</li>
|
80 |
-
<li>You may encounter some technical issues or errors with the mod apk, which can affect your gameplay performance or quality.</li>
|
81 |
-
<li>You may violate the terms of service or rules of the game, which can result in your account being banned or suspended.</li>
|
82 |
-
</ul> <p>Therefore, using Fate/Grand Order Mod APK Unlimited Quartz 2022 has its pros and cons, and you should weigh them carefully before deciding to use it. You should also be aware of the risks and consequences of using the mod apk, and take the necessary precautions to protect your account and device.</p>
|
83 |
-
<h2>How to Download and Install Fate/Grand Order Mod APK Unlimited Quartz 2022?</h2>
|
84 |
-
<h3>The steps to download and install the mod apk</h3>
|
85 |
-
<p>If you have decided to use Fate/Grand Order Mod APK Unlimited Quartz 2022, you will need to follow these steps to download and install it on your device:</p>
|
86 |
-
<ol>
|
87 |
-
<li>Uninstall the original version of Fate/Grand Order from your device.</li>
|
88 |
-
<li>Go to a trusted website that provides the link to download Fate/Grand Order Mod APK Unlimited Quartz 2022. You can search for it on Google or Bing, or use one of these links: .</li>
|
89 |
-
<li>Download the mod apk file from the website. Make sure that you have enough storage space on your device.</li>
|
90 |
-
<li>Enable the installation of apps from unknown sources on your device. You can do this by going to your device settings, security, and allowing unknown sources.</li>
|
91 |
-
<li>Locate the mod apk file on your device and tap on it to start the installation process.</li>
|
92 |
-
<li>Follow the instructions on the screen and wait for the installation to finish.</li>
|
93 |
-
<li>Launch the mod apk and enjoy unlimited Saint Quartz in Fate/Grand Order.</li>
|
94 |
-
</ol>
|
95 |
-
<h3>The precautions and risks of using the mod apk</h3>
|
96 |
-
<p>While using Fate/Grand Order Mod APK Unlimited Quartz 2022 can be fun and convenient, it also comes with some precautions and risks that you should be aware of. Some of them are:</p>
|
97 |
-
<ul>
|
98 |
-
<li>You should always backup your data before using the mod apk, in case something goes wrong or you want to switch back to the original version of the game.</li>
|
99 |
-
<li>You should always use a VPN or proxy service when using the mod apk, to hide your IP address and location from the game servers and avoid detection.</li>
|
100 |
-
<li>You should always use a secondary or dummy account when using the mod apk, to avoid losing your main account or getting banned or suspended.</li>
|
101 |
-
<li>You should always scan the mod apk file for viruses or malware before installing it on your device, to prevent any damage or harm to your device or data.</li>
|
102 |
-
<li>You should always update the mod apk whenever there is a new version available, to ensure compatibility and functionality with the latest version of the game.</li>
|
103 |
-
</ul>
|
104 |
-
<h2>Conclusion</h2>
|
105 |
-
<h3>A summary of the main points of the article</h3>
|
106 |
-
<p>In conclusion, Fate/Grand Order Mod APK Unlimited Quartz 2022 is a modified version of the game that gives you unlimited Saint Quartz for free. This can help you summon any Servant or Craft Essence that you want, and enjoy the game without any restrictions or limitations. However, using Fate/Grand Order Mod APK Unlimited Quartz 2022 also has some drawbacks and risks, such as losing the challenge and excitement of the game, violating the terms of service or rules of the game, encountering technical issues or errors with the mod apk, and risking your account or device. Therefore, you should weigh the pros and cons carefully before deciding to use Fate/Grand Order Mod APK Unlimited Quartz 2022, and take the necessary precautions to protect your account and device.</p>
|
107 |
-
<h3>A call to action for the readers</h3>
|
108 |
-
<p>If you are interested in trying out Fate/Grand Order Mod APK Unlimited Quartz 2022, you can download it from one of these links: . However, if you prefer to play Fate/Grand Order without using any mods or cheats, you can download the original version of the game from Google Play Store or App Store. Either way, we hope that you have fun and enjoy playing Fate/Grand Order!</p>
|
109 |
-
<h2>FAQs</h2>
|
110 |
-
<h4>Is Fate/Grand Order Mod APK Unlimited Quartz 2022 safe to use?</h4>
|
111 |
-
<p>Fate/Grand Order Mod APK Unlimited Quartz 2022 is not officially endorsed or supported by Delightworks or Aniplex, so there is no guarantee that it is safe or secure to use. It may contain viruses or malware that can harm your device or data, or it may not work properly with the latest version of the game. Therefore, you should always scan the mod apk file for viruses or malware before installing it on your device, and use a VPN or proxy service when using it online.</p>
|
112 |
-
<h4>Will I get banned for using Fate/Grand Order Mod APK Unlimited Quartz 2022?</h4>
|
113 |
-
<p>Fate/Grand Order Mod APK Unlimited Quartz 2022 is a mod or cheat that violates the terms of service or rules of the game, so there is a possibility that you will get banned or suspended for using it. The game developers and publishers have the right to monitor and detect any suspicious or abnormal activities on your account, and take appropriate actions to prevent any unfair or illegal practices. Therefore, you should always use a secondary or dummy account when using Fate/Grand Order Mod APK Unlimited Quartz 2022, and avoid using it on your main account or with other players.</p>
|
114 |
-
<h4>Can I use Fate/Grand Order Mod APK Unlimited Quartz 2022 on iOS devices?</h4>
|
115 |
-
<p>Fate/Grand Order Mod APK Unlimited Quartz 2022 is an apk file, which means that it is only compatible with Android devices. You cannot use it on iOS devices, such as iPhones or iPads, unless you have an emulator or jailbreak that allows you to run Android apps on iOS devices. However, this may also cause some technical issues or errors with the mod apk or the game, and may also increase the risk of getting banned or suspended. Therefore, we do not recommend using Fate/Grand Order Mod APK Unlimited Quartz 2022 on iOS devices.</p>
|
116 |
-
<h4>How often is Fate/Grand Order Mod APK Unlimited Quartz 2022 updated?</h4>
|
117 |
-
<p>Fate/Grand Order Mod APK Unlimited Quartz 2022 is updated whenever there is a new version of the game available, or whenever there are new features or improvements added to the mod apk. However, the update frequency and availability may vary depending on the source or website that provides the mod apk. Some sources or websites may update the mod apk faster or more regularly than others, while some may not update it at all. Therefore, you should always check the source or website that you download the mod apk from, and make sure that it is reliable and trustworthy.</p>
|
118 |
-
<h4>Where can I find more information about Fate/Grand Order Mod APK Unlimited Quartz 2022?</h4>
|
119 |
-
<p>If you want to find more information about Fate/Grand Order Mod APK Unlimited Quartz 2022, such as its features, functions, screenshots, reviews, feedbacks, ratings, comments, tips, tricks, guides, tutorials, FAQs, and more, you can visit some of these websites:</p>
|
120 |
-
<ul>
|
121 |
-
<li> </li>
|
122 |
-
<li> </li>
|
123 |
-
<li> </li>
|
124 |
-
</ul>
|
125 |
-
<p>However, you should also be careful and cautious when visiting these websites, as some of them may contain viruses or malware that can harm your device or data, or some of them may provide false or misleading information that can confuse or mislead you. Therefore, you should always scan the websites for viruses or malware before accessing them, and use your own judgment and common sense when reading or following their information.</p> 197e85843d<br />
|
126 |
-
<br />
|
127 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/StyleGANEX/utils/data_utils.py
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Code adopted from pix2pixHD:
|
3 |
-
https://github.com/NVIDIA/pix2pixHD/blob/master/data/image_folder.py
|
4 |
-
"""
|
5 |
-
import os
|
6 |
-
|
7 |
-
IMG_EXTENSIONS = [
|
8 |
-
'.jpg', '.JPG', '.jpeg', '.JPEG',
|
9 |
-
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff'
|
10 |
-
]
|
11 |
-
|
12 |
-
|
13 |
-
def is_image_file(filename):
|
14 |
-
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
|
15 |
-
|
16 |
-
|
17 |
-
def make_dataset(dir):
|
18 |
-
images = []
|
19 |
-
assert os.path.isdir(dir), '%s is not a valid directory' % dir
|
20 |
-
for root, _, fnames in sorted(os.walk(dir)):
|
21 |
-
for fname in fnames:
|
22 |
-
if is_image_file(fname):
|
23 |
-
path = os.path.join(root, fname)
|
24 |
-
images.append(path)
|
25 |
-
return images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/emotion/model.py
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
|
2 |
-
from data_gen.tts.emotion.params_model import *
|
3 |
-
from data_gen.tts.emotion.params_data import *
|
4 |
-
from torch.nn.utils import clip_grad_norm_
|
5 |
-
from scipy.optimize import brentq
|
6 |
-
from torch import nn
|
7 |
-
import numpy as np
|
8 |
-
import torch
|
9 |
-
|
10 |
-
|
11 |
-
class EmotionEncoder(nn.Module):
|
12 |
-
def __init__(self, device, loss_device):
|
13 |
-
super().__init__()
|
14 |
-
self.loss_device = loss_device
|
15 |
-
|
16 |
-
# Network defition
|
17 |
-
self.lstm = nn.LSTM(input_size=mel_n_channels,
|
18 |
-
hidden_size=model_hidden_size,
|
19 |
-
num_layers=model_num_layers,
|
20 |
-
batch_first=True).to(device)
|
21 |
-
self.linear = nn.Linear(in_features=model_hidden_size,
|
22 |
-
out_features=model_embedding_size).to(device)
|
23 |
-
self.relu = torch.nn.ReLU().to(device)
|
24 |
-
|
25 |
-
|
26 |
-
# Cosine similarity scaling (with fixed initial parameter values)
|
27 |
-
self.similarity_weight = nn.Parameter(torch.tensor([10.])).to(loss_device)
|
28 |
-
self.similarity_bias = nn.Parameter(torch.tensor([-5.])).to(loss_device)
|
29 |
-
|
30 |
-
# Loss
|
31 |
-
self.loss_fn = nn.CrossEntropyLoss().to(loss_device)
|
32 |
-
|
33 |
-
def do_gradient_ops(self):
|
34 |
-
# Gradient scale
|
35 |
-
self.similarity_weight.grad *= 0.01
|
36 |
-
self.similarity_bias.grad *= 0.01
|
37 |
-
|
38 |
-
# Gradient clipping
|
39 |
-
clip_grad_norm_(self.parameters(), 3, norm_type=2)
|
40 |
-
|
41 |
-
def forward(self, utterances, hidden_init=None):
|
42 |
-
"""
|
43 |
-
Computes the embeddings of a batch of utterance spectrograms.
|
44 |
-
|
45 |
-
:param utterances: batch of mel-scale filterbanks of same duration as a tensor of shape
|
46 |
-
(batch_size, n_frames, n_channels)
|
47 |
-
:param hidden_init: initial hidden state of the LSTM as a tensor of shape (num_layers,
|
48 |
-
batch_size, hidden_size). Will default to a tensor of zeros if None.
|
49 |
-
:return: the embeddings as a tensor of shape (batch_size, embedding_size)
|
50 |
-
"""
|
51 |
-
# Pass the input through the LSTM layers and retrieve all outputs, the final hidden state
|
52 |
-
# and the final cell state.
|
53 |
-
out, (hidden, cell) = self.lstm(utterances, hidden_init)
|
54 |
-
|
55 |
-
# We take only the hidden state of the last layer
|
56 |
-
embeds_raw = self.relu(self.linear(hidden[-1]))
|
57 |
-
|
58 |
-
# L2-normalize it
|
59 |
-
embeds = embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
|
60 |
-
|
61 |
-
return embeds
|
62 |
-
|
63 |
-
def inference(self, utterances, hidden_init=None):
|
64 |
-
"""
|
65 |
-
Computes the embeddings of a batch of utterance spectrograms.
|
66 |
-
|
67 |
-
:param utterances: batch of mel-scale filterbanks of same duration as a tensor of shape
|
68 |
-
(batch_size, n_frames, n_channels)
|
69 |
-
:param hidden_init: initial hidden state of the LSTM as a tensor of shape (num_layers,
|
70 |
-
batch_size, hidden_size). Will default to a tensor of zeros if None.
|
71 |
-
:return: the embeddings as a tensor of shape (batch_size, embedding_size)
|
72 |
-
"""
|
73 |
-
# Pass the input through the LSTM layers and retrieve all outputs, the final hidden state
|
74 |
-
# and the final cell state.
|
75 |
-
|
76 |
-
out, (hidden, cell) = self.lstm(utterances, hidden_init)
|
77 |
-
|
78 |
-
return hidden[-1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AILab-CVC/SEED-LLaMA/models/pipeline_stable_unclip_img2img.py
DELETED
@@ -1,794 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import inspect
|
16 |
-
import warnings
|
17 |
-
from typing import Any, Callable, Dict, List, Optional, Union
|
18 |
-
|
19 |
-
import PIL
|
20 |
-
import torch
|
21 |
-
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
22 |
-
|
23 |
-
from diffusers.utils.import_utils import is_accelerate_available
|
24 |
-
|
25 |
-
from diffusers.image_processor import VaeImageProcessor
|
26 |
-
|
27 |
-
from diffusers.image_processor import VaeImageProcessor
|
28 |
-
from diffusers.loaders import LoraLoaderMixin, TextualInversionLoaderMixin
|
29 |
-
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
30 |
-
from diffusers.models.embeddings import get_timestep_embedding
|
31 |
-
from diffusers.schedulers import KarrasDiffusionSchedulers
|
32 |
-
from diffusers.utils import is_accelerate_version, logging, randn_tensor, replace_example_docstring
|
33 |
-
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
|
34 |
-
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
|
35 |
-
|
36 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
37 |
-
|
38 |
-
EXAMPLE_DOC_STRING = """
|
39 |
-
Examples:
|
40 |
-
```py
|
41 |
-
>>> import requests
|
42 |
-
>>> import torch
|
43 |
-
>>> from PIL import Image
|
44 |
-
>>> from io import BytesIO
|
45 |
-
|
46 |
-
>>> from diffusers import StableUnCLIPImg2ImgPipeline
|
47 |
-
|
48 |
-
>>> pipe = StableUnCLIPImg2ImgPipeline.from_pretrained(
|
49 |
-
... "fusing/stable-unclip-2-1-l-img2img", torch_dtype=torch.float16
|
50 |
-
... ) # TODO update model path
|
51 |
-
>>> pipe = pipe.to("cuda")
|
52 |
-
|
53 |
-
>>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
54 |
-
|
55 |
-
>>> response = requests.get(url)
|
56 |
-
>>> init_image = Image.open(BytesIO(response.content)).convert("RGB")
|
57 |
-
>>> init_image = init_image.resize((768, 512))
|
58 |
-
|
59 |
-
>>> prompt = "A fantasy landscape, trending on artstation"
|
60 |
-
|
61 |
-
>>> images = pipe(prompt, init_image).images
|
62 |
-
>>> images[0].save("fantasy_landscape.png")
|
63 |
-
```
|
64 |
-
"""
|
65 |
-
|
66 |
-
|
67 |
-
class StableUnCLIPImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
|
68 |
-
"""
|
69 |
-
Pipeline for text-guided image-to-image generation using stable unCLIP.
|
70 |
-
|
71 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
72 |
-
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
73 |
-
|
74 |
-
Args:
|
75 |
-
feature_extractor ([`CLIPImageProcessor`]):
|
76 |
-
Feature extractor for image pre-processing before being encoded.
|
77 |
-
image_encoder ([`CLIPVisionModelWithProjection`]):
|
78 |
-
CLIP vision model for encoding images.
|
79 |
-
image_normalizer ([`StableUnCLIPImageNormalizer`]):
|
80 |
-
Used to normalize the predicted image embeddings before the noise is applied and un-normalize the image
|
81 |
-
embeddings after the noise has been applied.
|
82 |
-
image_noising_scheduler ([`KarrasDiffusionSchedulers`]):
|
83 |
-
Noise schedule for adding noise to the predicted image embeddings. The amount of noise to add is determined
|
84 |
-
by the `noise_level`.
|
85 |
-
tokenizer (`~transformers.CLIPTokenizer`):
|
86 |
-
A [`~transformers.CLIPTokenizer`)].
|
87 |
-
text_encoder ([`~transformers.CLIPTextModel`]):
|
88 |
-
Frozen [`~transformers.CLIPTextModel`] text-encoder.
|
89 |
-
unet ([`UNet2DConditionModel`]):
|
90 |
-
A [`UNet2DConditionModel`] to denoise the encoded image latents.
|
91 |
-
scheduler ([`KarrasDiffusionSchedulers`]):
|
92 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents.
|
93 |
-
vae ([`AutoencoderKL`]):
|
94 |
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
95 |
-
"""
|
96 |
-
|
97 |
-
_exclude_from_cpu_offload = ["image_normalizer"]
|
98 |
-
|
99 |
-
# image encoding components
|
100 |
-
feature_extractor: CLIPImageProcessor
|
101 |
-
image_encoder: CLIPVisionModelWithProjection
|
102 |
-
|
103 |
-
# image noising components
|
104 |
-
image_normalizer: StableUnCLIPImageNormalizer
|
105 |
-
image_noising_scheduler: KarrasDiffusionSchedulers
|
106 |
-
|
107 |
-
# regular denoising components
|
108 |
-
tokenizer: CLIPTokenizer
|
109 |
-
text_encoder: CLIPTextModel
|
110 |
-
unet: UNet2DConditionModel
|
111 |
-
scheduler: KarrasDiffusionSchedulers
|
112 |
-
|
113 |
-
vae: AutoencoderKL
|
114 |
-
|
115 |
-
def __init__(
|
116 |
-
self,
|
117 |
-
# image encoding components
|
118 |
-
feature_extractor: CLIPImageProcessor,
|
119 |
-
image_encoder: CLIPVisionModelWithProjection,
|
120 |
-
# image noising components
|
121 |
-
image_normalizer: StableUnCLIPImageNormalizer,
|
122 |
-
image_noising_scheduler: KarrasDiffusionSchedulers,
|
123 |
-
# regular denoising components
|
124 |
-
tokenizer: CLIPTokenizer,
|
125 |
-
text_encoder: CLIPTextModel,
|
126 |
-
unet: UNet2DConditionModel,
|
127 |
-
scheduler: KarrasDiffusionSchedulers,
|
128 |
-
# vae
|
129 |
-
vae: AutoencoderKL,
|
130 |
-
):
|
131 |
-
super().__init__()
|
132 |
-
|
133 |
-
self.register_modules(
|
134 |
-
feature_extractor=feature_extractor,
|
135 |
-
image_encoder=image_encoder,
|
136 |
-
image_normalizer=image_normalizer,
|
137 |
-
image_noising_scheduler=image_noising_scheduler,
|
138 |
-
tokenizer=tokenizer,
|
139 |
-
text_encoder=text_encoder,
|
140 |
-
unet=unet,
|
141 |
-
scheduler=scheduler,
|
142 |
-
vae=vae,
|
143 |
-
)
|
144 |
-
|
145 |
-
self.vae_scale_factor = 2**(len(self.vae.config.block_out_channels) - 1)
|
146 |
-
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
147 |
-
|
148 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
|
149 |
-
def enable_vae_slicing(self):
|
150 |
-
r"""
|
151 |
-
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
152 |
-
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
153 |
-
"""
|
154 |
-
self.vae.enable_slicing()
|
155 |
-
|
156 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
|
157 |
-
def disable_vae_slicing(self):
|
158 |
-
r"""
|
159 |
-
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
|
160 |
-
computing decoding in one step.
|
161 |
-
"""
|
162 |
-
self.vae.disable_slicing()
|
163 |
-
|
164 |
-
def enable_model_cpu_offload(self, gpu_id=0):
|
165 |
-
r"""
|
166 |
-
Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a
|
167 |
-
time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs.
|
168 |
-
Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the
|
169 |
-
iterative execution of the `unet`.
|
170 |
-
"""
|
171 |
-
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
172 |
-
from accelerate import cpu_offload_with_hook
|
173 |
-
else:
|
174 |
-
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
|
175 |
-
|
176 |
-
device = torch.device(f"cuda:{gpu_id}")
|
177 |
-
|
178 |
-
if self.device.type != "cpu":
|
179 |
-
self.to("cpu", silence_dtype_warnings=True)
|
180 |
-
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
|
181 |
-
|
182 |
-
hook = None
|
183 |
-
for cpu_offloaded_model in [self.text_encoder, self.image_encoder, self.unet, self.vae]:
|
184 |
-
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
|
185 |
-
|
186 |
-
# We'll offload the last model manually.
|
187 |
-
self.final_offload_hook = hook
|
188 |
-
|
189 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
190 |
-
def _encode_prompt(
|
191 |
-
self,
|
192 |
-
prompt,
|
193 |
-
device,
|
194 |
-
num_images_per_prompt,
|
195 |
-
do_classifier_free_guidance,
|
196 |
-
negative_prompt=None,
|
197 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
198 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
199 |
-
lora_scale: Optional[float] = None,
|
200 |
-
):
|
201 |
-
r"""
|
202 |
-
Encodes the prompt into text encoder hidden states.
|
203 |
-
|
204 |
-
Args:
|
205 |
-
prompt (`str` or `List[str]`, *optional*):
|
206 |
-
prompt to be encoded
|
207 |
-
device: (`torch.device`):
|
208 |
-
torch device
|
209 |
-
num_images_per_prompt (`int`):
|
210 |
-
number of images that should be generated per prompt
|
211 |
-
do_classifier_free_guidance (`bool`):
|
212 |
-
whether to use classifier free guidance or not
|
213 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
214 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
215 |
-
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
216 |
-
less than `1`).
|
217 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
218 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
219 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
220 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
221 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
222 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
223 |
-
argument.
|
224 |
-
lora_scale (`float`, *optional*):
|
225 |
-
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
226 |
-
"""
|
227 |
-
# set lora scale so that monkey patched LoRA
|
228 |
-
# function of text encoder can correctly access it
|
229 |
-
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
230 |
-
self._lora_scale = lora_scale
|
231 |
-
|
232 |
-
if prompt is not None and isinstance(prompt, str):
|
233 |
-
batch_size = 1
|
234 |
-
elif prompt is not None and isinstance(prompt, list):
|
235 |
-
batch_size = len(prompt)
|
236 |
-
else:
|
237 |
-
batch_size = prompt_embeds.shape[0]
|
238 |
-
|
239 |
-
if prompt_embeds is None:
|
240 |
-
# textual inversion: procecss multi-vector tokens if necessary
|
241 |
-
if isinstance(self, TextualInversionLoaderMixin):
|
242 |
-
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
243 |
-
|
244 |
-
text_inputs = self.tokenizer(
|
245 |
-
prompt,
|
246 |
-
padding="max_length",
|
247 |
-
max_length=self.tokenizer.model_max_length,
|
248 |
-
truncation=True,
|
249 |
-
return_tensors="pt",
|
250 |
-
)
|
251 |
-
text_input_ids = text_inputs.input_ids
|
252 |
-
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
253 |
-
|
254 |
-
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
|
255 |
-
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1])
|
256 |
-
logger.warning("The following part of your input was truncated because CLIP can only handle sequences up to"
|
257 |
-
f" {self.tokenizer.model_max_length} tokens: {removed_text}")
|
258 |
-
|
259 |
-
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
260 |
-
attention_mask = text_inputs.attention_mask.to(device)
|
261 |
-
else:
|
262 |
-
attention_mask = None
|
263 |
-
|
264 |
-
prompt_embeds = self.text_encoder(
|
265 |
-
text_input_ids.to(device),
|
266 |
-
attention_mask=attention_mask,
|
267 |
-
)
|
268 |
-
prompt_embeds = prompt_embeds[0]
|
269 |
-
|
270 |
-
if self.text_encoder is not None:
|
271 |
-
prompt_embeds_dtype = self.text_encoder.dtype
|
272 |
-
elif self.unet is not None:
|
273 |
-
prompt_embeds_dtype = self.unet.dtype
|
274 |
-
else:
|
275 |
-
prompt_embeds_dtype = prompt_embeds.dtype
|
276 |
-
|
277 |
-
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
278 |
-
|
279 |
-
bs_embed, seq_len, _ = prompt_embeds.shape
|
280 |
-
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
281 |
-
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
282 |
-
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
283 |
-
|
284 |
-
# get unconditional embeddings for classifier free guidance
|
285 |
-
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
286 |
-
uncond_tokens: List[str]
|
287 |
-
if negative_prompt is None:
|
288 |
-
uncond_tokens = [""] * batch_size
|
289 |
-
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
290 |
-
raise TypeError(f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
291 |
-
f" {type(prompt)}.")
|
292 |
-
elif isinstance(negative_prompt, str):
|
293 |
-
uncond_tokens = [negative_prompt]
|
294 |
-
elif batch_size != len(negative_prompt):
|
295 |
-
raise ValueError(
|
296 |
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
297 |
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
298 |
-
" the batch size of `prompt`.")
|
299 |
-
else:
|
300 |
-
uncond_tokens = negative_prompt
|
301 |
-
|
302 |
-
# textual inversion: procecss multi-vector tokens if necessary
|
303 |
-
if isinstance(self, TextualInversionLoaderMixin):
|
304 |
-
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
305 |
-
|
306 |
-
max_length = prompt_embeds.shape[1]
|
307 |
-
uncond_input = self.tokenizer(
|
308 |
-
uncond_tokens,
|
309 |
-
padding="max_length",
|
310 |
-
max_length=max_length,
|
311 |
-
truncation=True,
|
312 |
-
return_tensors="pt",
|
313 |
-
)
|
314 |
-
|
315 |
-
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
316 |
-
attention_mask = uncond_input.attention_mask.to(device)
|
317 |
-
else:
|
318 |
-
attention_mask = None
|
319 |
-
|
320 |
-
negative_prompt_embeds = self.text_encoder(
|
321 |
-
uncond_input.input_ids.to(device),
|
322 |
-
attention_mask=attention_mask,
|
323 |
-
)
|
324 |
-
negative_prompt_embeds = negative_prompt_embeds[0]
|
325 |
-
|
326 |
-
if do_classifier_free_guidance:
|
327 |
-
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
328 |
-
seq_len = negative_prompt_embeds.shape[1]
|
329 |
-
|
330 |
-
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
331 |
-
|
332 |
-
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
333 |
-
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
334 |
-
|
335 |
-
# For classifier free guidance, we need to do two forward passes.
|
336 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
337 |
-
# to avoid doing two forward passes
|
338 |
-
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
339 |
-
|
340 |
-
return prompt_embeds
|
341 |
-
|
342 |
-
def _encode_image(
|
343 |
-
self,
|
344 |
-
image,
|
345 |
-
device,
|
346 |
-
batch_size,
|
347 |
-
num_images_per_prompt,
|
348 |
-
do_classifier_free_guidance,
|
349 |
-
noise_level,
|
350 |
-
generator,
|
351 |
-
image_embeds,
|
352 |
-
negative_image_embeds,
|
353 |
-
):
|
354 |
-
dtype = next(self.image_encoder.parameters()).dtype
|
355 |
-
|
356 |
-
if isinstance(image, PIL.Image.Image):
|
357 |
-
# the image embedding should repeated so it matches the total batch size of the prompt
|
358 |
-
repeat_by = batch_size
|
359 |
-
else:
|
360 |
-
# assume the image input is already properly batched and just needs to be repeated so
|
361 |
-
# it matches the num_images_per_prompt.
|
362 |
-
#
|
363 |
-
# NOTE(will) this is probably missing a few number of side cases. I.e. batched/non-batched
|
364 |
-
# `image_embeds`. If those happen to be common use cases, let's think harder about
|
365 |
-
# what the expected dimensions of inputs should be and how we handle the encoding.
|
366 |
-
repeat_by = num_images_per_prompt
|
367 |
-
|
368 |
-
if image_embeds is None:
|
369 |
-
if not isinstance(image, torch.Tensor):
|
370 |
-
image = self.feature_extractor(images=image, return_tensors="pt").pixel_values
|
371 |
-
|
372 |
-
image = image.to(device=device, dtype=dtype)
|
373 |
-
image_embeds = self.image_encoder(image).image_embeds
|
374 |
-
|
375 |
-
image_embeds = self.noise_image_embeddings(
|
376 |
-
image_embeds=image_embeds,
|
377 |
-
noise_level=noise_level,
|
378 |
-
generator=generator,
|
379 |
-
)
|
380 |
-
|
381 |
-
# duplicate image embeddings for each generation per prompt, using mps friendly method
|
382 |
-
image_embeds = image_embeds.unsqueeze(1)
|
383 |
-
bs_embed, seq_len, _ = image_embeds.shape
|
384 |
-
image_embeds = image_embeds.repeat(1, repeat_by, 1)
|
385 |
-
image_embeds = image_embeds.view(bs_embed * repeat_by, seq_len, -1)
|
386 |
-
image_embeds = image_embeds.squeeze(1)
|
387 |
-
|
388 |
-
if negative_image_embeds is not None:
|
389 |
-
negative_image_embeds = self.noise_image_embeddings(
|
390 |
-
image_embeds=negative_image_embeds,
|
391 |
-
noise_level=0,
|
392 |
-
generator=generator,
|
393 |
-
)
|
394 |
-
# duplicate negative image embeddings for each generation per prompt, using mps friendly method
|
395 |
-
negative_image_embeds = negative_image_embeds.unsqueeze(1)
|
396 |
-
bs_embed, seq_len, _ = negative_image_embeds.shape
|
397 |
-
negative_image_embeds = negative_image_embeds.repeat(1, repeat_by, 1)
|
398 |
-
negative_image_embeds = negative_image_embeds.view(bs_embed * repeat_by, seq_len, -1)
|
399 |
-
negative_image_embeds = negative_image_embeds.squeeze(1)
|
400 |
-
|
401 |
-
if do_classifier_free_guidance:
|
402 |
-
if negative_image_embeds is None:
|
403 |
-
negative_image_embeds = torch.zeros_like(image_embeds)
|
404 |
-
|
405 |
-
# For classifier free guidance, we need to do two forward passes.
|
406 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
407 |
-
# to avoid doing two forward passes
|
408 |
-
image_embeds = torch.cat([negative_image_embeds, image_embeds])
|
409 |
-
|
410 |
-
return image_embeds
|
411 |
-
|
412 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
|
413 |
-
def decode_latents(self, latents):
|
414 |
-
warnings.warn(
|
415 |
-
"The decode_latents method is deprecated and will be removed in a future version. Please"
|
416 |
-
" use VaeImageProcessor instead",
|
417 |
-
FutureWarning,
|
418 |
-
)
|
419 |
-
latents = 1 / self.vae.config.scaling_factor * latents
|
420 |
-
image = self.vae.decode(latents, return_dict=False)[0]
|
421 |
-
image = (image / 2 + 0.5).clamp(0, 1)
|
422 |
-
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
423 |
-
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
424 |
-
return image
|
425 |
-
|
426 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
427 |
-
def prepare_extra_step_kwargs(self, generator, eta):
|
428 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
429 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
430 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
431 |
-
# and should be between [0, 1]
|
432 |
-
|
433 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
434 |
-
extra_step_kwargs = {}
|
435 |
-
if accepts_eta:
|
436 |
-
extra_step_kwargs["eta"] = eta
|
437 |
-
|
438 |
-
# check if the scheduler accepts generator
|
439 |
-
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
440 |
-
if accepts_generator:
|
441 |
-
extra_step_kwargs["generator"] = generator
|
442 |
-
return extra_step_kwargs
|
443 |
-
|
444 |
-
def check_inputs(
|
445 |
-
self,
|
446 |
-
prompt,
|
447 |
-
image,
|
448 |
-
height,
|
449 |
-
width,
|
450 |
-
callback_steps,
|
451 |
-
noise_level,
|
452 |
-
negative_prompt=None,
|
453 |
-
prompt_embeds=None,
|
454 |
-
negative_prompt_embeds=None,
|
455 |
-
image_embeds=None,
|
456 |
-
):
|
457 |
-
if height % 8 != 0 or width % 8 != 0:
|
458 |
-
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
459 |
-
|
460 |
-
if (callback_steps is None) or (callback_steps is not None and
|
461 |
-
(not isinstance(callback_steps, int) or callback_steps <= 0)):
|
462 |
-
raise ValueError(f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
463 |
-
f" {type(callback_steps)}.")
|
464 |
-
|
465 |
-
if prompt is not None and prompt_embeds is not None:
|
466 |
-
raise ValueError("Provide either `prompt` or `prompt_embeds`. Please make sure to define only one of the two.")
|
467 |
-
|
468 |
-
if prompt is None and prompt_embeds is None:
|
469 |
-
raise ValueError(
|
470 |
-
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.")
|
471 |
-
|
472 |
-
if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
473 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
474 |
-
|
475 |
-
if negative_prompt is not None and negative_prompt_embeds is not None:
|
476 |
-
raise ValueError(
|
477 |
-
"Provide either `negative_prompt` or `negative_prompt_embeds`. Cannot leave both `negative_prompt` and `negative_prompt_embeds` undefined."
|
478 |
-
)
|
479 |
-
|
480 |
-
if prompt is not None and negative_prompt is not None:
|
481 |
-
if type(prompt) is not type(negative_prompt):
|
482 |
-
raise TypeError(f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
483 |
-
f" {type(prompt)}.")
|
484 |
-
|
485 |
-
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
486 |
-
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
487 |
-
raise ValueError(
|
488 |
-
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
489 |
-
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
490 |
-
f" {negative_prompt_embeds.shape}.")
|
491 |
-
|
492 |
-
if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps:
|
493 |
-
raise ValueError(
|
494 |
-
f"`noise_level` must be between 0 and {self.image_noising_scheduler.config.num_train_timesteps - 1}, inclusive."
|
495 |
-
)
|
496 |
-
|
497 |
-
if image is not None and image_embeds is not None:
|
498 |
-
raise ValueError("Provide either `image` or `image_embeds`. Please make sure to define only one of the two.")
|
499 |
-
|
500 |
-
if image is None and image_embeds is None:
|
501 |
-
raise ValueError(
|
502 |
-
"Provide either `image` or `image_embeds`. Cannot leave both `image` and `image_embeds` undefined.")
|
503 |
-
|
504 |
-
if image is not None:
|
505 |
-
if (not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image) and not isinstance(image, list)):
|
506 |
-
raise ValueError(
|
507 |
-
"`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is"
|
508 |
-
f" {type(image)}")
|
509 |
-
|
510 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
511 |
-
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
512 |
-
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
513 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
514 |
-
raise ValueError(
|
515 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
516 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators.")
|
517 |
-
|
518 |
-
if latents is None:
|
519 |
-
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
520 |
-
else:
|
521 |
-
latents = latents.to(device)
|
522 |
-
|
523 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
524 |
-
latents = latents * self.scheduler.init_noise_sigma
|
525 |
-
return latents
|
526 |
-
|
527 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_unclip.StableUnCLIPPipeline.noise_image_embeddings
|
528 |
-
def noise_image_embeddings(
|
529 |
-
self,
|
530 |
-
image_embeds: torch.Tensor,
|
531 |
-
noise_level: int,
|
532 |
-
noise: Optional[torch.FloatTensor] = None,
|
533 |
-
generator: Optional[torch.Generator] = None,
|
534 |
-
):
|
535 |
-
"""
|
536 |
-
Add noise to the image embeddings. The amount of noise is controlled by a `noise_level` input. A higher
|
537 |
-
`noise_level` increases the variance in the final un-noised images.
|
538 |
-
|
539 |
-
The noise is applied in two ways:
|
540 |
-
1. A noise schedule is applied directly to the embeddings.
|
541 |
-
2. A vector of sinusoidal time embeddings are appended to the output.
|
542 |
-
|
543 |
-
In both cases, the amount of noise is controlled by the same `noise_level`.
|
544 |
-
|
545 |
-
The embeddings are normalized before the noise is applied and un-normalized after the noise is applied.
|
546 |
-
"""
|
547 |
-
if noise is None:
|
548 |
-
noise = randn_tensor(image_embeds.shape, generator=generator, device=image_embeds.device, dtype=image_embeds.dtype)
|
549 |
-
|
550 |
-
noise_level = torch.tensor([noise_level] * image_embeds.shape[0], device=image_embeds.device)
|
551 |
-
|
552 |
-
self.image_normalizer.to(image_embeds.device)
|
553 |
-
image_embeds = self.image_normalizer.scale(image_embeds)
|
554 |
-
|
555 |
-
image_embeds = self.image_noising_scheduler.add_noise(image_embeds, timesteps=noise_level, noise=noise)
|
556 |
-
|
557 |
-
image_embeds = self.image_normalizer.unscale(image_embeds)
|
558 |
-
|
559 |
-
noise_level = get_timestep_embedding(timesteps=noise_level,
|
560 |
-
embedding_dim=image_embeds.shape[-1],
|
561 |
-
flip_sin_to_cos=True,
|
562 |
-
downscale_freq_shift=0)
|
563 |
-
|
564 |
-
# `get_timestep_embeddings` does not contain any weights and will always return f32 tensors,
|
565 |
-
# but we might actually be running in fp16. so we need to cast here.
|
566 |
-
# there might be better ways to encapsulate this.
|
567 |
-
noise_level = noise_level.to(image_embeds.dtype)
|
568 |
-
|
569 |
-
image_embeds = torch.cat((image_embeds, noise_level), 1)
|
570 |
-
|
571 |
-
return image_embeds
|
572 |
-
|
573 |
-
@torch.no_grad()
|
574 |
-
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
575 |
-
def __call__(
|
576 |
-
self,
|
577 |
-
image: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
578 |
-
prompt: Union[str, List[str]] = None,
|
579 |
-
height: Optional[int] = None,
|
580 |
-
width: Optional[int] = None,
|
581 |
-
num_inference_steps: int = 20,
|
582 |
-
guidance_scale: float = 10,
|
583 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
584 |
-
num_images_per_prompt: Optional[int] = 1,
|
585 |
-
eta: float = 0.0,
|
586 |
-
generator: Optional[torch.Generator] = None,
|
587 |
-
latents: Optional[torch.FloatTensor] = None,
|
588 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
589 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
590 |
-
output_type: Optional[str] = "pil",
|
591 |
-
return_dict: bool = True,
|
592 |
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
593 |
-
callback_steps: int = 1,
|
594 |
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
595 |
-
noise_level: int = 0,
|
596 |
-
image_embeds: Optional[torch.FloatTensor] = None,
|
597 |
-
negative_image_embeds: Optional[torch.FloatTensor] = None,
|
598 |
-
):
|
599 |
-
r"""
|
600 |
-
The call function to the pipeline for generation.
|
601 |
-
|
602 |
-
Args:
|
603 |
-
prompt (`str` or `List[str]`, *optional*):
|
604 |
-
The prompt or prompts to guide the image generation. If not defined, either `prompt_embeds` will be
|
605 |
-
used or prompt is initialized to `""`.
|
606 |
-
image (`torch.FloatTensor` or `PIL.Image.Image`):
|
607 |
-
`Image` or tensor representing an image batch. The image is encoded to its CLIP embedding which the
|
608 |
-
`unet` is conditioned on. The image is _not_ encoded by the `vae` and then used as the latents in the
|
609 |
-
denoising process like it is in the standard Stable Diffusion text-guided image variation process.
|
610 |
-
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
611 |
-
The height in pixels of the generated image.
|
612 |
-
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
613 |
-
The width in pixels of the generated image.
|
614 |
-
num_inference_steps (`int`, *optional*, defaults to 20):
|
615 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
616 |
-
expense of slower inference.
|
617 |
-
guidance_scale (`float`, *optional*, defaults to 10.0):
|
618 |
-
A higher guidance scale value encourages the model to generate images closely linked to the text
|
619 |
-
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
620 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
621 |
-
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
622 |
-
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
623 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
624 |
-
The number of images to generate per prompt.
|
625 |
-
eta (`float`, *optional*, defaults to 0.0):
|
626 |
-
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
627 |
-
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
628 |
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
629 |
-
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
630 |
-
generation deterministic.
|
631 |
-
latents (`torch.FloatTensor`, *optional*):
|
632 |
-
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
633 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
634 |
-
tensor is generated by sampling using the supplied random `generator`.
|
635 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
636 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
637 |
-
provided, text embeddings are generated from the `prompt` input argument.
|
638 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
639 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
640 |
-
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
641 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
642 |
-
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
643 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
644 |
-
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
|
645 |
-
callback (`Callable`, *optional*):
|
646 |
-
A function that calls every `callback_steps` steps during inference. The function is called with the
|
647 |
-
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
648 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
649 |
-
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
650 |
-
every step.
|
651 |
-
cross_attention_kwargs (`dict`, *optional*):
|
652 |
-
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
653 |
-
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
654 |
-
noise_level (`int`, *optional*, defaults to `0`):
|
655 |
-
The amount of noise to add to the image embeddings. A higher `noise_level` increases the variance in
|
656 |
-
the final un-noised images. See [`StableUnCLIPPipeline.noise_image_embeddings`] for more details.
|
657 |
-
image_embeds (`torch.FloatTensor`, *optional*):
|
658 |
-
Pre-generated CLIP embeddings to condition the `unet` on. These latents are not used in the denoising
|
659 |
-
process. If you want to provide pre-generated latents, pass them to `__call__` as `latents`.
|
660 |
-
|
661 |
-
Examples:
|
662 |
-
|
663 |
-
Returns:
|
664 |
-
[`~pipelines.ImagePipelineOutput`] or `tuple`:
|
665 |
-
[`~ pipeline_utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning
|
666 |
-
a tuple, the first element is a list with the generated images.
|
667 |
-
"""
|
668 |
-
# 0. Default height and width to unet
|
669 |
-
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
670 |
-
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
671 |
-
|
672 |
-
if prompt is None and prompt_embeds is None:
|
673 |
-
prompt = len(image) * [""] if isinstance(image, list) else ""
|
674 |
-
|
675 |
-
# 1. Check inputs. Raise error if not correct
|
676 |
-
self.check_inputs(
|
677 |
-
prompt=prompt,
|
678 |
-
image=image,
|
679 |
-
height=height,
|
680 |
-
width=width,
|
681 |
-
callback_steps=callback_steps,
|
682 |
-
noise_level=noise_level,
|
683 |
-
negative_prompt=negative_prompt,
|
684 |
-
prompt_embeds=prompt_embeds,
|
685 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
686 |
-
image_embeds=image_embeds,
|
687 |
-
)
|
688 |
-
|
689 |
-
# 2. Define call parameters
|
690 |
-
if prompt is not None and isinstance(prompt, str):
|
691 |
-
batch_size = 1
|
692 |
-
elif prompt is not None and isinstance(prompt, list):
|
693 |
-
batch_size = len(prompt)
|
694 |
-
else:
|
695 |
-
batch_size = prompt_embeds.shape[0]
|
696 |
-
|
697 |
-
batch_size = batch_size * num_images_per_prompt
|
698 |
-
|
699 |
-
device = self._execution_device
|
700 |
-
|
701 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
702 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
703 |
-
# corresponds to doing no classifier free guidance.
|
704 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
705 |
-
|
706 |
-
# 3. Encode input prompt
|
707 |
-
text_encoder_lora_scale = (cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None)
|
708 |
-
prompt_embeds = self._encode_prompt(
|
709 |
-
prompt=prompt,
|
710 |
-
device=device,
|
711 |
-
num_images_per_prompt=num_images_per_prompt,
|
712 |
-
do_classifier_free_guidance=do_classifier_free_guidance,
|
713 |
-
negative_prompt=negative_prompt,
|
714 |
-
prompt_embeds=prompt_embeds,
|
715 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
716 |
-
lora_scale=text_encoder_lora_scale,
|
717 |
-
)
|
718 |
-
|
719 |
-
# 4. Encoder input image
|
720 |
-
noise_level = torch.tensor([noise_level], device=device)
|
721 |
-
image_embeds = self._encode_image(
|
722 |
-
image=image,
|
723 |
-
device=device,
|
724 |
-
batch_size=batch_size,
|
725 |
-
num_images_per_prompt=num_images_per_prompt,
|
726 |
-
do_classifier_free_guidance=do_classifier_free_guidance,
|
727 |
-
noise_level=noise_level,
|
728 |
-
generator=generator,
|
729 |
-
image_embeds=image_embeds,
|
730 |
-
negative_image_embeds=negative_image_embeds,
|
731 |
-
)
|
732 |
-
|
733 |
-
# 5. Prepare timesteps
|
734 |
-
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
735 |
-
timesteps = self.scheduler.timesteps
|
736 |
-
|
737 |
-
# 6. Prepare latent variables
|
738 |
-
num_channels_latents = self.unet.config.in_channels
|
739 |
-
latents = self.prepare_latents(
|
740 |
-
batch_size=batch_size,
|
741 |
-
num_channels_latents=num_channels_latents,
|
742 |
-
height=height,
|
743 |
-
width=width,
|
744 |
-
dtype=prompt_embeds.dtype,
|
745 |
-
device=device,
|
746 |
-
generator=generator,
|
747 |
-
latents=latents,
|
748 |
-
)
|
749 |
-
|
750 |
-
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
751 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
752 |
-
|
753 |
-
# 8. Denoising loop
|
754 |
-
for i, t in enumerate(self.progress_bar(timesteps)):
|
755 |
-
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
756 |
-
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
757 |
-
|
758 |
-
# predict the noise residual
|
759 |
-
noise_pred = self.unet(
|
760 |
-
latent_model_input,
|
761 |
-
t,
|
762 |
-
encoder_hidden_states=prompt_embeds,
|
763 |
-
class_labels=image_embeds,
|
764 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
765 |
-
return_dict=False,
|
766 |
-
)[0]
|
767 |
-
|
768 |
-
# perform guidance
|
769 |
-
if do_classifier_free_guidance:
|
770 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
771 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
772 |
-
|
773 |
-
# compute the previous noisy sample x_t -> x_t-1
|
774 |
-
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
775 |
-
|
776 |
-
if callback is not None and i % callback_steps == 0:
|
777 |
-
callback(i, t, latents)
|
778 |
-
|
779 |
-
# 9. Post-processing
|
780 |
-
if not output_type == "latent":
|
781 |
-
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
782 |
-
else:
|
783 |
-
image = latents
|
784 |
-
|
785 |
-
image = self.image_processor.postprocess(image, output_type=output_type)
|
786 |
-
|
787 |
-
# Offload last model to CPU
|
788 |
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
789 |
-
self.final_offload_hook.offload()
|
790 |
-
|
791 |
-
if not return_dict:
|
792 |
-
return (image, )
|
793 |
-
|
794 |
-
return ImagePipelineOutput(images=image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ASJMO/freegpt/server/bp.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
from flask import Blueprint
|
2 |
-
|
3 |
-
bp = Blueprint('bp', __name__,
|
4 |
-
template_folder='./../client/html',
|
5 |
-
static_folder='./../client',
|
6 |
-
static_url_path='assets')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AUBADA-ALARABI/poetry2023/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Poetry2023
|
3 |
-
emoji: 👁
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.16.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
duplicated_from: Abdllh/poetry2023
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abubakari/Sepsis-fastapi-prediction-app/main.py
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
import pandas as pd
|
2 |
-
import joblib
|
3 |
-
from fastapi import FastAPI
|
4 |
-
import uvicorn
|
5 |
-
import numpy as np
|
6 |
-
import os
|
7 |
-
|
8 |
-
app = FastAPI()
|
9 |
-
|
10 |
-
def load_model():
|
11 |
-
num_imputer_filepath = "numerical_imputer.joblib"
|
12 |
-
scaler_filepath = "scaler.joblib"
|
13 |
-
model_filepath = "lr_model.joblib"
|
14 |
-
|
15 |
-
num_imputer = joblib.load(num_imputer_filepath)
|
16 |
-
scaler = joblib.load(scaler_filepath)
|
17 |
-
model = joblib.load(model_filepath)
|
18 |
-
|
19 |
-
return num_imputer, scaler, model
|
20 |
-
|
21 |
-
def preprocess_input_data(input_data, num_imputer, scaler):
|
22 |
-
input_data_df = pd.DataFrame([input_data], index=[0]) # Add index [0] to the DataFrame
|
23 |
-
input_data_scaled = scaler.transform(input_data_df)
|
24 |
-
input_data_scaled = pd.DataFrame(input_data_scaled, columns=input_data_df.columns)
|
25 |
-
input_data_imputed = num_imputer.transform(input_data_scaled)
|
26 |
-
|
27 |
-
return input_data_imputed
|
28 |
-
|
29 |
-
|
30 |
-
@app.get("/")
|
31 |
-
def read_root():
|
32 |
-
|
33 |
-
info = """
|
34 |
-
Welcome to the Sepsis Prediction API! 🩺💉. This API provides advanced machine learning predictions for sepsis. ⚡📊 For more information and to explore the API's capabilities, please visit the documentation: https://abubakari-sepsis-fastapi-prediction-app.hf.space/docs/
|
35 |
-
"""
|
36 |
-
return info.strip()
|
37 |
-
|
38 |
-
|
39 |
-
@app.get("/sepsis/predict")
|
40 |
-
def predict_sepsis_endpoint(PRG: float, PL: float, PR: float, SK: float, TS: float,
|
41 |
-
M11: float, BD2: float, Age: float, Insurance: int):
|
42 |
-
num_imputer, scaler, model = load_model()
|
43 |
-
|
44 |
-
input_data = {
|
45 |
-
'PRG': PRG,
|
46 |
-
'PL': PL,
|
47 |
-
'PR': PR,
|
48 |
-
'SK': SK,
|
49 |
-
'TS': TS,
|
50 |
-
'M11': M11,
|
51 |
-
'BD2': BD2,
|
52 |
-
'Age': Age,
|
53 |
-
'Insurance': Insurance
|
54 |
-
}
|
55 |
-
|
56 |
-
input_scaled_df = preprocess_input_data(input_data, num_imputer, scaler)
|
57 |
-
|
58 |
-
probabilities = model.predict_proba(input_scaled_df)[0]
|
59 |
-
prediction = np.argmax(probabilities)
|
60 |
-
|
61 |
-
sepsis_status = "Positive" if prediction == 1 else "Negative"
|
62 |
-
|
63 |
-
probability = probabilities[1] if prediction == 1 else probabilities[0]
|
64 |
-
|
65 |
-
#statement = f"The patient is {sepsis_status}. There is a {'high' if prediction == 1 else 'low'} probability ({probability:.2f}) that the patient is susceptible to developing sepsis."
|
66 |
-
|
67 |
-
if prediction == 1:
|
68 |
-
status_icon = "✔" # Red 'X' icon for positive sepsis prediction
|
69 |
-
sepsis_explanation = "Sepsis is a life-threatening condition caused by an infection. A positive prediction suggests that the patient might be exhibiting sepsis symptoms and requires immediate medical attention."
|
70 |
-
else:
|
71 |
-
status_icon = "✘" # Green checkmark icon for negative sepsis prediction
|
72 |
-
sepsis_explanation = "Sepsis is a life-threatening condition caused by an infection. A negative prediction suggests that the patient is not currently exhibiting sepsis symptoms."
|
73 |
-
|
74 |
-
statement = f"The patient's sepsis status is {sepsis_status} {status_icon} with a probability of {probability:.2f}. {sepsis_explanation}"
|
75 |
-
|
76 |
-
user_input_statement = "Please note this is the user-inputted data: "
|
77 |
-
|
78 |
-
output_df = pd.DataFrame([input_data])
|
79 |
-
|
80 |
-
result = {'predicted_sepsis': sepsis_status, 'statement': statement, 'user_input_statement': user_input_statement, 'input_data_df': output_df.to_dict('records')}
|
81 |
-
|
82 |
-
return result
|
83 |
-
|
84 |
-
if __name__ == "__main__":
|
85 |
-
uvicorn.run(app, host="0.0.0.0", port=7860, reload=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/AchyuthGamer-OpenGPT/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/TheBloke/Mistral-7B-Instruct-v0.1-GPTQ").launch()
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/conversation/[id]/+page.server.ts
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import { collections } from "$lib/server/database";
|
2 |
-
import { error } from "@sveltejs/kit";
|
3 |
-
import { authCondition } from "$lib/server/auth";
|
4 |
-
import type { WebSearchMessageResult } from "$lib/types/WebSearch";
|
5 |
-
import { UrlDependency } from "$lib/types/UrlDependency";
|
6 |
-
|
7 |
-
export const load = async ({ params, depends, locals }) => {
|
8 |
-
return {
|
9 |
-
title: "Untitled",
|
10 |
-
model: "",
|
11 |
-
searches: undefined,
|
12 |
-
};
|
13 |
-
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/grayscalepipeline-plugin.d.ts
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
// import * as Phaser from 'phaser';
|
2 |
-
import GrayScalePostFxPipeline from './grayscalepipeline';
|
3 |
-
|
4 |
-
export default GrayScalePipelinePlugin;
|
5 |
-
|
6 |
-
declare namespace GrayScalePipelinePlugin {
|
7 |
-
|
8 |
-
interface IConfig extends GrayScalePostFxPipeline.IConfig {
|
9 |
-
name?: string,
|
10 |
-
}
|
11 |
-
|
12 |
-
}
|
13 |
-
|
14 |
-
declare class GrayScalePipelinePlugin extends Phaser.Plugins.BasePlugin {
|
15 |
-
add(
|
16 |
-
gameObject: Phaser.GameObjects.GameObject,
|
17 |
-
config?: GrayScalePipelinePlugin.IConfig
|
18 |
-
): GrayScalePostFxPipeline;
|
19 |
-
|
20 |
-
remove(
|
21 |
-
gameObject: Phaser.GameObjects.GameObject,
|
22 |
-
name?: string
|
23 |
-
): this;
|
24 |
-
|
25 |
-
get(
|
26 |
-
gameObject: Phaser.GameObjects.GameObject,
|
27 |
-
name?: string
|
28 |
-
): GrayScalePostFxPipeline | GrayScalePostFxPipeline[];
|
29 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/label/Factory.d.ts
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
import Label from './Label';
|
2 |
-
|
3 |
-
export default function (
|
4 |
-
config?: Label.IConfig
|
5 |
-
): Label;
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AliUsama98/Aliusama_spellchecker/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/oliverguhr/spelling-correction-english-base").launch()
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/score_sde_vp.md
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Variance Preserving Stochastic Differential Equation (VP-SDE) scheduler
|
14 |
-
|
15 |
-
## Overview
|
16 |
-
|
17 |
-
Original paper can be found [here](https://arxiv.org/abs/2011.13456).
|
18 |
-
|
19 |
-
<Tip warning={true}>
|
20 |
-
|
21 |
-
Score SDE-VP is under construction.
|
22 |
-
|
23 |
-
</Tip>
|
24 |
-
|
25 |
-
## ScoreSdeVpScheduler
|
26 |
-
[[autodoc]] schedulers.scheduling_sde_vp.ScoreSdeVpScheduler
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/score_sde_ve/__init__.py
DELETED
File without changes
|
spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/faster_rcnn_r50_fpn.py',
|
3 |
-
'../_base_/datasets/coco_detection.py',
|
4 |
-
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
|
5 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/match_costs/match_cost.py
DELETED
@@ -1,184 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from mmdet.core.bbox.iou_calculators import bbox_overlaps
|
4 |
-
from mmdet.core.bbox.transforms import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh
|
5 |
-
from .builder import MATCH_COST
|
6 |
-
|
7 |
-
|
8 |
-
@MATCH_COST.register_module()
|
9 |
-
class BBoxL1Cost(object):
|
10 |
-
"""BBoxL1Cost.
|
11 |
-
|
12 |
-
Args:
|
13 |
-
weight (int | float, optional): loss_weight
|
14 |
-
box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN
|
15 |
-
|
16 |
-
Examples:
|
17 |
-
>>> from mmdet.core.bbox.match_costs.match_cost import BBoxL1Cost
|
18 |
-
>>> import torch
|
19 |
-
>>> self = BBoxL1Cost()
|
20 |
-
>>> bbox_pred = torch.rand(1, 4)
|
21 |
-
>>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])
|
22 |
-
>>> factor = torch.tensor([10, 8, 10, 8])
|
23 |
-
>>> self(bbox_pred, gt_bboxes, factor)
|
24 |
-
tensor([[1.6172, 1.6422]])
|
25 |
-
"""
|
26 |
-
|
27 |
-
def __init__(self, weight=1., box_format='xyxy'):
|
28 |
-
self.weight = weight
|
29 |
-
assert box_format in ['xyxy', 'xywh']
|
30 |
-
self.box_format = box_format
|
31 |
-
|
32 |
-
def __call__(self, bbox_pred, gt_bboxes):
|
33 |
-
"""
|
34 |
-
Args:
|
35 |
-
bbox_pred (Tensor): Predicted boxes with normalized coordinates
|
36 |
-
(cx, cy, w, h), which are all in range [0, 1]. Shape
|
37 |
-
[num_query, 4].
|
38 |
-
gt_bboxes (Tensor): Ground truth boxes with normalized
|
39 |
-
coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
|
40 |
-
|
41 |
-
Returns:
|
42 |
-
torch.Tensor: bbox_cost value with weight
|
43 |
-
"""
|
44 |
-
if self.box_format == 'xywh':
|
45 |
-
gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes)
|
46 |
-
elif self.box_format == 'xyxy':
|
47 |
-
bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred)
|
48 |
-
bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1)
|
49 |
-
return bbox_cost * self.weight
|
50 |
-
|
51 |
-
|
52 |
-
@MATCH_COST.register_module()
|
53 |
-
class FocalLossCost(object):
|
54 |
-
"""FocalLossCost.
|
55 |
-
|
56 |
-
Args:
|
57 |
-
weight (int | float, optional): loss_weight
|
58 |
-
alpha (int | float, optional): focal_loss alpha
|
59 |
-
gamma (int | float, optional): focal_loss gamma
|
60 |
-
eps (float, optional): default 1e-12
|
61 |
-
|
62 |
-
Examples:
|
63 |
-
>>> from mmdet.core.bbox.match_costs.match_cost import FocalLossCost
|
64 |
-
>>> import torch
|
65 |
-
>>> self = FocalLossCost()
|
66 |
-
>>> cls_pred = torch.rand(4, 3)
|
67 |
-
>>> gt_labels = torch.tensor([0, 1, 2])
|
68 |
-
>>> factor = torch.tensor([10, 8, 10, 8])
|
69 |
-
>>> self(cls_pred, gt_labels)
|
70 |
-
tensor([[-0.3236, -0.3364, -0.2699],
|
71 |
-
[-0.3439, -0.3209, -0.4807],
|
72 |
-
[-0.4099, -0.3795, -0.2929],
|
73 |
-
[-0.1950, -0.1207, -0.2626]])
|
74 |
-
"""
|
75 |
-
|
76 |
-
def __init__(self, weight=1., alpha=0.25, gamma=2, eps=1e-12):
|
77 |
-
self.weight = weight
|
78 |
-
self.alpha = alpha
|
79 |
-
self.gamma = gamma
|
80 |
-
self.eps = eps
|
81 |
-
|
82 |
-
def __call__(self, cls_pred, gt_labels):
|
83 |
-
"""
|
84 |
-
Args:
|
85 |
-
cls_pred (Tensor): Predicted classification logits, shape
|
86 |
-
[num_query, num_class].
|
87 |
-
gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
|
88 |
-
|
89 |
-
Returns:
|
90 |
-
torch.Tensor: cls_cost value with weight
|
91 |
-
"""
|
92 |
-
cls_pred = cls_pred.sigmoid()
|
93 |
-
neg_cost = -(1 - cls_pred + self.eps).log() * (
|
94 |
-
1 - self.alpha) * cls_pred.pow(self.gamma)
|
95 |
-
pos_cost = -(cls_pred + self.eps).log() * self.alpha * (
|
96 |
-
1 - cls_pred).pow(self.gamma)
|
97 |
-
cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels]
|
98 |
-
return cls_cost * self.weight
|
99 |
-
|
100 |
-
|
101 |
-
@MATCH_COST.register_module()
|
102 |
-
class ClassificationCost(object):
|
103 |
-
"""ClsSoftmaxCost.
|
104 |
-
|
105 |
-
Args:
|
106 |
-
weight (int | float, optional): loss_weight
|
107 |
-
|
108 |
-
Examples:
|
109 |
-
>>> from mmdet.core.bbox.match_costs.match_cost import \
|
110 |
-
... ClassificationCost
|
111 |
-
>>> import torch
|
112 |
-
>>> self = ClassificationCost()
|
113 |
-
>>> cls_pred = torch.rand(4, 3)
|
114 |
-
>>> gt_labels = torch.tensor([0, 1, 2])
|
115 |
-
>>> factor = torch.tensor([10, 8, 10, 8])
|
116 |
-
>>> self(cls_pred, gt_labels)
|
117 |
-
tensor([[-0.3430, -0.3525, -0.3045],
|
118 |
-
[-0.3077, -0.2931, -0.3992],
|
119 |
-
[-0.3664, -0.3455, -0.2881],
|
120 |
-
[-0.3343, -0.2701, -0.3956]])
|
121 |
-
"""
|
122 |
-
|
123 |
-
def __init__(self, weight=1.):
|
124 |
-
self.weight = weight
|
125 |
-
|
126 |
-
def __call__(self, cls_pred, gt_labels):
|
127 |
-
"""
|
128 |
-
Args:
|
129 |
-
cls_pred (Tensor): Predicted classification logits, shape
|
130 |
-
[num_query, num_class].
|
131 |
-
gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
|
132 |
-
|
133 |
-
Returns:
|
134 |
-
torch.Tensor: cls_cost value with weight
|
135 |
-
"""
|
136 |
-
# Following the official DETR repo, contrary to the loss that
|
137 |
-
# NLL is used, we approximate it in 1 - cls_score[gt_label].
|
138 |
-
# The 1 is a constant that doesn't change the matching,
|
139 |
-
# so it can be omitted.
|
140 |
-
cls_score = cls_pred.softmax(-1)
|
141 |
-
cls_cost = -cls_score[:, gt_labels]
|
142 |
-
return cls_cost * self.weight
|
143 |
-
|
144 |
-
|
145 |
-
@MATCH_COST.register_module()
|
146 |
-
class IoUCost(object):
|
147 |
-
"""IoUCost.
|
148 |
-
|
149 |
-
Args:
|
150 |
-
iou_mode (str, optional): iou mode such as 'iou' | 'giou'
|
151 |
-
weight (int | float, optional): loss weight
|
152 |
-
|
153 |
-
Examples:
|
154 |
-
>>> from mmdet.core.bbox.match_costs.match_cost import IoUCost
|
155 |
-
>>> import torch
|
156 |
-
>>> self = IoUCost()
|
157 |
-
>>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]])
|
158 |
-
>>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])
|
159 |
-
>>> self(bboxes, gt_bboxes)
|
160 |
-
tensor([[-0.1250, 0.1667],
|
161 |
-
[ 0.1667, -0.5000]])
|
162 |
-
"""
|
163 |
-
|
164 |
-
def __init__(self, iou_mode='giou', weight=1.):
|
165 |
-
self.weight = weight
|
166 |
-
self.iou_mode = iou_mode
|
167 |
-
|
168 |
-
def __call__(self, bboxes, gt_bboxes):
|
169 |
-
"""
|
170 |
-
Args:
|
171 |
-
bboxes (Tensor): Predicted boxes with unnormalized coordinates
|
172 |
-
(x1, y1, x2, y2). Shape [num_query, 4].
|
173 |
-
gt_bboxes (Tensor): Ground truth boxes with unnormalized
|
174 |
-
coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
|
175 |
-
|
176 |
-
Returns:
|
177 |
-
torch.Tensor: iou_cost value with weight
|
178 |
-
"""
|
179 |
-
# overlaps: [num_bboxes, num_gt]
|
180 |
-
overlaps = bbox_overlaps(
|
181 |
-
bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False)
|
182 |
-
# The 1 is a constant that doesn't change the matching, so omitted.
|
183 |
-
iou_cost = -overlaps
|
184 |
-
return iou_cost * self.weight
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/necks/bfp.py
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
import torch.nn as nn
|
2 |
-
import torch.nn.functional as F
|
3 |
-
from mmcv.cnn import ConvModule, xavier_init
|
4 |
-
from mmcv.cnn.bricks import NonLocal2d
|
5 |
-
|
6 |
-
from ..builder import NECKS
|
7 |
-
|
8 |
-
|
9 |
-
@NECKS.register_module()
|
10 |
-
class BFP(nn.Module):
|
11 |
-
"""BFP (Balanced Feature Pyramids)
|
12 |
-
|
13 |
-
BFP takes multi-level features as inputs and gather them into a single one,
|
14 |
-
then refine the gathered feature and scatter the refined results to
|
15 |
-
multi-level features. This module is used in Libra R-CNN (CVPR 2019), see
|
16 |
-
the paper `Libra R-CNN: Towards Balanced Learning for Object Detection
|
17 |
-
<https://arxiv.org/abs/1904.02701>`_ for details.
|
18 |
-
|
19 |
-
Args:
|
20 |
-
in_channels (int): Number of input channels (feature maps of all levels
|
21 |
-
should have the same channels).
|
22 |
-
num_levels (int): Number of input feature levels.
|
23 |
-
conv_cfg (dict): The config dict for convolution layers.
|
24 |
-
norm_cfg (dict): The config dict for normalization layers.
|
25 |
-
refine_level (int): Index of integration and refine level of BSF in
|
26 |
-
multi-level features from bottom to top.
|
27 |
-
refine_type (str): Type of the refine op, currently support
|
28 |
-
[None, 'conv', 'non_local'].
|
29 |
-
"""
|
30 |
-
|
31 |
-
def __init__(self,
|
32 |
-
in_channels,
|
33 |
-
num_levels,
|
34 |
-
refine_level=2,
|
35 |
-
refine_type=None,
|
36 |
-
conv_cfg=None,
|
37 |
-
norm_cfg=None):
|
38 |
-
super(BFP, self).__init__()
|
39 |
-
assert refine_type in [None, 'conv', 'non_local']
|
40 |
-
|
41 |
-
self.in_channels = in_channels
|
42 |
-
self.num_levels = num_levels
|
43 |
-
self.conv_cfg = conv_cfg
|
44 |
-
self.norm_cfg = norm_cfg
|
45 |
-
|
46 |
-
self.refine_level = refine_level
|
47 |
-
self.refine_type = refine_type
|
48 |
-
assert 0 <= self.refine_level < self.num_levels
|
49 |
-
|
50 |
-
if self.refine_type == 'conv':
|
51 |
-
self.refine = ConvModule(
|
52 |
-
self.in_channels,
|
53 |
-
self.in_channels,
|
54 |
-
3,
|
55 |
-
padding=1,
|
56 |
-
conv_cfg=self.conv_cfg,
|
57 |
-
norm_cfg=self.norm_cfg)
|
58 |
-
elif self.refine_type == 'non_local':
|
59 |
-
self.refine = NonLocal2d(
|
60 |
-
self.in_channels,
|
61 |
-
reduction=1,
|
62 |
-
use_scale=False,
|
63 |
-
conv_cfg=self.conv_cfg,
|
64 |
-
norm_cfg=self.norm_cfg)
|
65 |
-
|
66 |
-
def init_weights(self):
|
67 |
-
"""Initialize the weights of FPN module."""
|
68 |
-
for m in self.modules():
|
69 |
-
if isinstance(m, nn.Conv2d):
|
70 |
-
xavier_init(m, distribution='uniform')
|
71 |
-
|
72 |
-
def forward(self, inputs):
|
73 |
-
"""Forward function."""
|
74 |
-
assert len(inputs) == self.num_levels
|
75 |
-
|
76 |
-
# step 1: gather multi-level features by resize and average
|
77 |
-
feats = []
|
78 |
-
gather_size = inputs[self.refine_level].size()[2:]
|
79 |
-
for i in range(self.num_levels):
|
80 |
-
if i < self.refine_level:
|
81 |
-
gathered = F.adaptive_max_pool2d(
|
82 |
-
inputs[i], output_size=gather_size)
|
83 |
-
else:
|
84 |
-
gathered = F.interpolate(
|
85 |
-
inputs[i], size=gather_size, mode='nearest')
|
86 |
-
feats.append(gathered)
|
87 |
-
|
88 |
-
bsf = sum(feats) / len(feats)
|
89 |
-
|
90 |
-
# step 2: refine gathered features
|
91 |
-
if self.refine_type is not None:
|
92 |
-
bsf = self.refine(bsf)
|
93 |
-
|
94 |
-
# step 3: scatter refined features to multi-levels by a residual path
|
95 |
-
outs = []
|
96 |
-
for i in range(self.num_levels):
|
97 |
-
out_size = inputs[i].size()[2:]
|
98 |
-
if i < self.refine_level:
|
99 |
-
residual = F.interpolate(bsf, size=out_size, mode='nearest')
|
100 |
-
else:
|
101 |
-
residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
|
102 |
-
outs.append(residual + inputs[i])
|
103 |
-
|
104 |
-
return tuple(outs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_512x1024_40k_cityscapes.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
_base_ = './fcn_hr18_512x1024_40k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://msra/hrnetv2_w48',
|
4 |
-
backbone=dict(
|
5 |
-
extra=dict(
|
6 |
-
stage2=dict(num_channels=(48, 96)),
|
7 |
-
stage3=dict(num_channels=(48, 96, 192)),
|
8 |
-
stage4=dict(num_channels=(48, 96, 192, 384)))),
|
9 |
-
decode_head=dict(
|
10 |
-
in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/c_model.py
DELETED
@@ -1,194 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn.functional as F
|
3 |
-
from .base_model import BaseModel
|
4 |
-
from . import networks, losses
|
5 |
-
|
6 |
-
|
7 |
-
class C(BaseModel):
|
8 |
-
"""This class implements the conv-based model for image completion"""
|
9 |
-
def name(self):
|
10 |
-
return "Conv-based Image Completion"
|
11 |
-
|
12 |
-
@staticmethod
|
13 |
-
def modify_options(parser, is_train=True):
|
14 |
-
"""Add new options and rewrite default values for existing options"""
|
15 |
-
parser.add_argument('--coarse_or_refine', type=str, default='coarse', help='train the transform or refined network')
|
16 |
-
parser.add_argument('--down_layers', type=int, default=4, help='# times down sampling for refine generator')
|
17 |
-
if is_train:
|
18 |
-
parser.add_argument('--lambda_rec', type=float, default=10.0, help='weight for image reconstruction loss')
|
19 |
-
parser.add_argument('--lambda_g', type=float, default=1.0, help='weight for discriminator loss')
|
20 |
-
parser.add_argument('--lambda_lp', type=float, default=10.0, help='weight for the perceptual loss')
|
21 |
-
parser.add_argument('--lambda_gradient', type=float, default=0.0, help='weight for the gradient penalty')
|
22 |
-
|
23 |
-
return parser
|
24 |
-
|
25 |
-
def __init__(self, opt):
|
26 |
-
"""inital the Transformer model"""
|
27 |
-
BaseModel.__init__(self, opt)
|
28 |
-
self.visual_names = ['img', 'img_m', 'img_g', 'img_out']
|
29 |
-
self.model_names = ['E', 'G', 'D',]
|
30 |
-
self.loss_names = ['G_rec', 'G_lp', 'G_GAN', 'D_real', 'D_fake']
|
31 |
-
|
32 |
-
self.netE = networks.define_E(opt)
|
33 |
-
self.netG = networks.define_G(opt)
|
34 |
-
self.netD = networks.define_D(opt, opt.fixed_size)
|
35 |
-
|
36 |
-
if 'refine' in self.opt.coarse_or_refine:
|
37 |
-
opt = self._refine_opt(opt)
|
38 |
-
self.netG_Ref = networks.define_G(opt)
|
39 |
-
self.netD_Ref = networks.define_D(opt, opt.fine_size)
|
40 |
-
self.visual_names += ['img_ref', 'img_ref_out']
|
41 |
-
self.model_names += ['G_Ref', 'D_Ref']
|
42 |
-
|
43 |
-
if self.isTrain:
|
44 |
-
# define the loss function
|
45 |
-
self.L1loss = torch.nn.L1Loss()
|
46 |
-
self.GANloss = losses.GANLoss(opt.gan_mode).to(self.device)
|
47 |
-
self.NormalVGG = losses.Normalization(self.device)
|
48 |
-
self.LPIPSloss = losses.LPIPSLoss(ckpt_path=opt.lipip_path).to(self.device)
|
49 |
-
if len(self.opt.gpu_ids) > 0:
|
50 |
-
self.LPIPSloss = torch.nn.parallel.DataParallel(self.LPIPSloss, self.opt.gpu_ids)
|
51 |
-
# define the optimizer
|
52 |
-
if 'coarse' in self.opt.coarse_or_refine:
|
53 |
-
self.optimizerG = torch.optim.Adam(list(self.netE.parameters()) + list(self.netG.parameters()),
|
54 |
-
lr=opt.lr, betas=(opt.beta1, opt.beta2))
|
55 |
-
self.optimizerD = torch.optim.Adam(self.netD.parameters(), lr=opt.lr * 4, betas=(opt.beta1, opt.beta2))
|
56 |
-
self.optimizers.append(self.optimizerG)
|
57 |
-
self.optimizers.append(self.optimizerD)
|
58 |
-
if 'refine' in self.opt.coarse_or_refine:
|
59 |
-
self.optimizerGRef = torch.optim.Adam(self.netG_Ref.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
|
60 |
-
self.optimizerDRef = torch.optim.Adam(self.netD_Ref.parameters(), lr=opt.lr * 4, betas=(opt.beta1, opt.beta2))
|
61 |
-
self.optimizers.append(self.optimizerGRef)
|
62 |
-
self.optimizers.append(self.optimizerDRef)
|
63 |
-
else:
|
64 |
-
self.visual_names = ['img', 'img_m']
|
65 |
-
|
66 |
-
def set_input(self, input):
|
67 |
-
"""Unpack input data from the data loader and perform necessary pre-process steps"""
|
68 |
-
self.input = input
|
69 |
-
|
70 |
-
self.image_paths = self.input['img_path']
|
71 |
-
self.img_org = input['img_org'].to(self.device) * 2 - 1
|
72 |
-
self.img = input['img'].to(self.device) * 2 - 1
|
73 |
-
self.mask = input['mask'].to(self.device)
|
74 |
-
|
75 |
-
# get I_m and I_c for image with mask and complement regions for training
|
76 |
-
self.img_m = self.mask * self.img_org
|
77 |
-
|
78 |
-
@torch.no_grad()
|
79 |
-
def test(self):
|
80 |
-
"""Run forward processing for testing"""
|
81 |
-
fixed_img = F.interpolate(self.img_m, size=self.img.size()[2:], mode='bicubic', align_corners=True).clamp(-1, 1)
|
82 |
-
fixed_mask = (F.interpolate(self.mask, size=self.img.size()[2:], mode='bicubic', align_corners=True) > 0.9).type_as(fixed_img)
|
83 |
-
out, mask = self.netE(fixed_img, mask=fixed_mask, return_mask=True)
|
84 |
-
|
85 |
-
# sample result
|
86 |
-
for i in range(self.opt.nsampling):
|
87 |
-
img_g = self.netG(out)
|
88 |
-
img_g_org = F.interpolate(img_g, size=self.img_org.size()[2:], mode='bicubic', align_corners=True).clamp(-1,1)
|
89 |
-
img_out = self.mask * self.img_org + (1 - self.mask) * img_g_org
|
90 |
-
self.save_results(img_out, path=self.opt.save_dir + '/img_out', data_name=i)
|
91 |
-
if 'refine' in self.opt.coarse_or_refine:
|
92 |
-
img_ref = self.netG_Ref(img_out, mask=self.mask)
|
93 |
-
img_ref_out = self.mask * self.img_org + (1 - self.mask) * img_ref
|
94 |
-
self.save_results(img_ref_out, path=self.opt.save_dir + '/img_ref_out', data_name=i)
|
95 |
-
|
96 |
-
def forward(self):
|
97 |
-
"""Run forward processing to get the outputs"""
|
98 |
-
fixed_img = F.interpolate(self.img_m, size=self.img.size()[2:], mode='bicubic', align_corners=True).clamp(-1, 1)
|
99 |
-
self.fixed_mask = (F.interpolate(self.mask, size=self.img.size()[2:], mode='bicubic', align_corners=True) > 0.9).type_as(fixed_img)
|
100 |
-
out, mask = self.netE(fixed_img, mask=self.fixed_mask, return_mask=True)
|
101 |
-
self.img_g = self.netG(out)
|
102 |
-
img_g_org = F.interpolate(self.img_g, size=self.img_org.size()[2:], mode='bicubic', align_corners=True).clamp(-1, 1)
|
103 |
-
self.img_out = self.mask * self.img_org + (1 - self.mask) * img_g_org
|
104 |
-
|
105 |
-
if 'refine' in self.opt.coarse_or_refine:
|
106 |
-
self.img_ref = self.netG_Ref(self.img_out, self.mask)
|
107 |
-
self.img_ref_out = self.mask * self.img_org + (1 - self.mask) * self.img_ref
|
108 |
-
|
109 |
-
def backward_D_basic(self, netD, real, fake):
|
110 |
-
"""
|
111 |
-
Calculate GAN loss for the discriminator
|
112 |
-
:param netD: the discriminator D
|
113 |
-
:param real: real examples
|
114 |
-
:param fake: examples generated by a generator
|
115 |
-
:return: discriminator loss
|
116 |
-
"""
|
117 |
-
self.loss_D_real = self.GANloss(netD(real), True, is_dis=True)
|
118 |
-
self.loss_D_fake = self.GANloss(netD(fake), False, is_dis=True)
|
119 |
-
loss_D = self.loss_D_real + self.loss_D_fake
|
120 |
-
if self.opt.lambda_gradient > 0:
|
121 |
-
self.loss_D_Gradient, _ = losses.cal_gradient_penalty(netD, real, fake, real.device, lambda_gp=self.opt.lambda_gradient)
|
122 |
-
loss_D += self.loss_D_Gradient
|
123 |
-
loss_D.backward()
|
124 |
-
return loss_D
|
125 |
-
|
126 |
-
def backward_D(self):
|
127 |
-
"""Calculate the GAN loss for discriminator"""
|
128 |
-
self.loss_D = 0
|
129 |
-
if 'coarse' in self.opt.coarse_or_refine:
|
130 |
-
self.set_requires_grad([self.netD], True)
|
131 |
-
self.optimizerD.zero_grad()
|
132 |
-
real = self.img.detach()
|
133 |
-
fake = self.img_g.detach()
|
134 |
-
self.loss_D += self.backward_D_basic(self.netD, real, fake) if self.opt.lambda_g > 0 else 0
|
135 |
-
if 'refine' in self.opt.coarse_or_refine:
|
136 |
-
self.set_requires_grad([self.netD_Ref], True)
|
137 |
-
self.optimizerDRef.zero_grad()
|
138 |
-
real = self.img_org.detach()
|
139 |
-
fake = self.img_ref.detach()
|
140 |
-
self.loss_D += self.backward_D_basic(self.netD_Ref, real, fake) if self.opt.lambda_g > 0 else 0
|
141 |
-
|
142 |
-
def backward_G(self):
|
143 |
-
"""Calculate the loss for generator"""
|
144 |
-
self.loss_G_GAN = 0
|
145 |
-
self.loss_G_rec = 0
|
146 |
-
self.loss_G_lp =0
|
147 |
-
if 'coarse' in self.opt.coarse_or_refine:
|
148 |
-
self.set_requires_grad([self.netD], False)
|
149 |
-
self.optimizerG.zero_grad()
|
150 |
-
self.loss_G_GAN += self.GANloss(self.netD(self.img_g), True) * self.opt.lambda_g if self.opt.lambda_g > 0 else 0
|
151 |
-
self.loss_G_rec += (self.L1loss(self.img_g * (1 - self.fixed_mask), self.img * (1 - self.fixed_mask)) * 3 +
|
152 |
-
self.L1loss(self.img_g * self.fixed_mask, self.img_g * self.fixed_mask)) * self.opt.lambda_rec
|
153 |
-
norm_real = self.NormalVGG((self.img + 1) * 0.5)
|
154 |
-
norm_fake = self.NormalVGG((self.img_g + 1) * 0.5)
|
155 |
-
self.loss_G_lp += (self.LPIPSloss(norm_real, norm_fake).mean()) * self.opt.lambda_lp if self.opt.lambda_lp > 0 else 0
|
156 |
-
if 'refine' in self.opt.coarse_or_refine:
|
157 |
-
self.set_requires_grad([self.netD_Ref], False)
|
158 |
-
self.optimizerGRef.zero_grad()
|
159 |
-
self.loss_G_GAN += self.GANloss(self.netD_Ref(self.img_ref), True) * self.opt.lambda_g if self.opt.lambda_g > 0 else 0
|
160 |
-
self.loss_G_rec += (self.L1loss(self.img_ref * (1 - self.mask), self.img_org * (1 - self.mask)) * 3 +
|
161 |
-
self.L1loss(self.img_ref * self.mask, self.img_org * self.mask)) * self.opt.lambda_rec
|
162 |
-
norm_real = self.NormalVGG((self.img_org + 1) * 0.5)
|
163 |
-
norm_fake = self.NormalVGG((self.img_ref + 1) * 0.5)
|
164 |
-
self.loss_G_lp += (self.LPIPSloss(norm_real, norm_fake).mean()) * self.opt.lambda_lp if self.opt.lambda_lp > 0 else 0
|
165 |
-
|
166 |
-
self.loss_G = self.loss_G_GAN + self.loss_G_rec + self.loss_G_lp
|
167 |
-
|
168 |
-
self.loss_G.backward()
|
169 |
-
|
170 |
-
def optimize_parameters(self):
|
171 |
-
"""update network weights"""
|
172 |
-
# forward
|
173 |
-
self.set_requires_grad([self.netE, self.netG], 'coarse' in self.opt.coarse_or_refine)
|
174 |
-
self.forward()
|
175 |
-
# update D
|
176 |
-
self.backward_D()
|
177 |
-
if 'coarse' in self.opt.coarse_or_refine:
|
178 |
-
self.optimizerD.step()
|
179 |
-
if 'refine' in self.opt.coarse_or_refine:
|
180 |
-
self.optimizerDRef.step()
|
181 |
-
# update G
|
182 |
-
self.backward_G()
|
183 |
-
if 'coarse' in self.opt.coarse_or_refine:
|
184 |
-
self.optimizerG.step()
|
185 |
-
if 'refine' in self.opt.coarse_or_refine:
|
186 |
-
self.optimizerGRef.step()
|
187 |
-
|
188 |
-
def _refine_opt(self, opt):
|
189 |
-
"""modify the opt for refine generator and discriminator"""
|
190 |
-
opt.netG = 'refine'
|
191 |
-
opt.netD = 'style'
|
192 |
-
opt.attn_D = True
|
193 |
-
|
194 |
-
return opt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/version.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
__version__ = '1.3.17'
|
3 |
-
|
4 |
-
|
5 |
-
def parse_version_info(version_str: str, length: int = 4) -> tuple:
|
6 |
-
"""Parse a version string into a tuple.
|
7 |
-
|
8 |
-
Args:
|
9 |
-
version_str (str): The version string.
|
10 |
-
length (int): The maximum number of version levels. Default: 4.
|
11 |
-
|
12 |
-
Returns:
|
13 |
-
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
|
14 |
-
(1, 3, 0, 0, 0, 0), and "2.0.0rc1" is parsed into
|
15 |
-
(2, 0, 0, 0, 'rc', 1) (when length is set to 4).
|
16 |
-
"""
|
17 |
-
from packaging.version import parse
|
18 |
-
version = parse(version_str)
|
19 |
-
assert version.release, f'failed to parse version {version_str}'
|
20 |
-
release = list(version.release)
|
21 |
-
release = release[:length]
|
22 |
-
if len(release) < length:
|
23 |
-
release = release + [0] * (length - len(release))
|
24 |
-
if version.is_prerelease:
|
25 |
-
release.extend(list(version.pre))
|
26 |
-
elif version.is_postrelease:
|
27 |
-
release.extend(list(version.post))
|
28 |
-
else:
|
29 |
-
release.extend([0, 0])
|
30 |
-
return tuple(release)
|
31 |
-
|
32 |
-
|
33 |
-
version_info = tuple(int(x) for x in __version__.split('.')[:3])
|
34 |
-
|
35 |
-
__all__ = ['__version__', 'version_info', 'parse_version_info']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/cldm/cldm.py
DELETED
@@ -1,435 +0,0 @@
|
|
1 |
-
import einops
|
2 |
-
import torch
|
3 |
-
import torch as th
|
4 |
-
import torch.nn as nn
|
5 |
-
|
6 |
-
from ldm.modules.diffusionmodules.util import (
|
7 |
-
conv_nd,
|
8 |
-
linear,
|
9 |
-
zero_module,
|
10 |
-
timestep_embedding,
|
11 |
-
)
|
12 |
-
|
13 |
-
from einops import rearrange, repeat
|
14 |
-
from torchvision.utils import make_grid
|
15 |
-
from ldm.modules.attention import SpatialTransformer
|
16 |
-
from ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock
|
17 |
-
from ldm.models.diffusion.ddpm import LatentDiffusion
|
18 |
-
from ldm.util import log_txt_as_img, exists, instantiate_from_config
|
19 |
-
from ldm.models.diffusion.ddim import DDIMSampler
|
20 |
-
|
21 |
-
|
22 |
-
class ControlledUnetModel(UNetModel):
|
23 |
-
def forward(self, x, timesteps=None, context=None, control=None, only_mid_control=False, **kwargs):
|
24 |
-
hs = []
|
25 |
-
with torch.no_grad():
|
26 |
-
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
|
27 |
-
emb = self.time_embed(t_emb)
|
28 |
-
h = x.type(self.dtype)
|
29 |
-
for module in self.input_blocks:
|
30 |
-
h = module(h, emb, context)
|
31 |
-
hs.append(h)
|
32 |
-
h = self.middle_block(h, emb, context)
|
33 |
-
|
34 |
-
if control is not None:
|
35 |
-
h += control.pop()
|
36 |
-
|
37 |
-
for i, module in enumerate(self.output_blocks):
|
38 |
-
if only_mid_control or control is None:
|
39 |
-
h = torch.cat([h, hs.pop()], dim=1)
|
40 |
-
else:
|
41 |
-
h = torch.cat([h, hs.pop() + control.pop()], dim=1)
|
42 |
-
h = module(h, emb, context)
|
43 |
-
|
44 |
-
h = h.type(x.dtype)
|
45 |
-
return self.out(h)
|
46 |
-
|
47 |
-
|
48 |
-
class ControlNet(nn.Module):
|
49 |
-
def __init__(
|
50 |
-
self,
|
51 |
-
image_size,
|
52 |
-
in_channels,
|
53 |
-
model_channels,
|
54 |
-
hint_channels,
|
55 |
-
num_res_blocks,
|
56 |
-
attention_resolutions,
|
57 |
-
dropout=0,
|
58 |
-
channel_mult=(1, 2, 4, 8),
|
59 |
-
conv_resample=True,
|
60 |
-
dims=2,
|
61 |
-
use_checkpoint=False,
|
62 |
-
use_fp16=False,
|
63 |
-
num_heads=-1,
|
64 |
-
num_head_channels=-1,
|
65 |
-
num_heads_upsample=-1,
|
66 |
-
use_scale_shift_norm=False,
|
67 |
-
resblock_updown=False,
|
68 |
-
use_new_attention_order=False,
|
69 |
-
use_spatial_transformer=False, # custom transformer support
|
70 |
-
transformer_depth=1, # custom transformer support
|
71 |
-
context_dim=None, # custom transformer support
|
72 |
-
n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
|
73 |
-
legacy=True,
|
74 |
-
disable_self_attentions=None,
|
75 |
-
num_attention_blocks=None,
|
76 |
-
disable_middle_self_attn=False,
|
77 |
-
use_linear_in_transformer=False,
|
78 |
-
):
|
79 |
-
super().__init__()
|
80 |
-
if use_spatial_transformer:
|
81 |
-
assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
|
82 |
-
|
83 |
-
if context_dim is not None:
|
84 |
-
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
|
85 |
-
from omegaconf.listconfig import ListConfig
|
86 |
-
if type(context_dim) == ListConfig:
|
87 |
-
context_dim = list(context_dim)
|
88 |
-
|
89 |
-
if num_heads_upsample == -1:
|
90 |
-
num_heads_upsample = num_heads
|
91 |
-
|
92 |
-
if num_heads == -1:
|
93 |
-
assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
|
94 |
-
|
95 |
-
if num_head_channels == -1:
|
96 |
-
assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
|
97 |
-
|
98 |
-
self.dims = dims
|
99 |
-
self.image_size = image_size
|
100 |
-
self.in_channels = in_channels
|
101 |
-
self.model_channels = model_channels
|
102 |
-
if isinstance(num_res_blocks, int):
|
103 |
-
self.num_res_blocks = len(channel_mult) * [num_res_blocks]
|
104 |
-
else:
|
105 |
-
if len(num_res_blocks) != len(channel_mult):
|
106 |
-
raise ValueError("provide num_res_blocks either as an int (globally constant) or "
|
107 |
-
"as a list/tuple (per-level) with the same length as channel_mult")
|
108 |
-
self.num_res_blocks = num_res_blocks
|
109 |
-
if disable_self_attentions is not None:
|
110 |
-
# should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
|
111 |
-
assert len(disable_self_attentions) == len(channel_mult)
|
112 |
-
if num_attention_blocks is not None:
|
113 |
-
assert len(num_attention_blocks) == len(self.num_res_blocks)
|
114 |
-
assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
|
115 |
-
print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
|
116 |
-
f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
|
117 |
-
f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
|
118 |
-
f"attention will still not be set.")
|
119 |
-
|
120 |
-
self.attention_resolutions = attention_resolutions
|
121 |
-
self.dropout = dropout
|
122 |
-
self.channel_mult = channel_mult
|
123 |
-
self.conv_resample = conv_resample
|
124 |
-
self.use_checkpoint = use_checkpoint
|
125 |
-
self.dtype = th.float16 if use_fp16 else th.float32
|
126 |
-
self.num_heads = num_heads
|
127 |
-
self.num_head_channels = num_head_channels
|
128 |
-
self.num_heads_upsample = num_heads_upsample
|
129 |
-
self.predict_codebook_ids = n_embed is not None
|
130 |
-
|
131 |
-
time_embed_dim = model_channels * 4
|
132 |
-
self.time_embed = nn.Sequential(
|
133 |
-
linear(model_channels, time_embed_dim),
|
134 |
-
nn.SiLU(),
|
135 |
-
linear(time_embed_dim, time_embed_dim),
|
136 |
-
)
|
137 |
-
|
138 |
-
self.input_blocks = nn.ModuleList(
|
139 |
-
[
|
140 |
-
TimestepEmbedSequential(
|
141 |
-
conv_nd(dims, in_channels, model_channels, 3, padding=1)
|
142 |
-
)
|
143 |
-
]
|
144 |
-
)
|
145 |
-
self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels)])
|
146 |
-
|
147 |
-
self.input_hint_block = TimestepEmbedSequential(
|
148 |
-
conv_nd(dims, hint_channels, 16, 3, padding=1),
|
149 |
-
nn.SiLU(),
|
150 |
-
conv_nd(dims, 16, 16, 3, padding=1),
|
151 |
-
nn.SiLU(),
|
152 |
-
conv_nd(dims, 16, 32, 3, padding=1, stride=2),
|
153 |
-
nn.SiLU(),
|
154 |
-
conv_nd(dims, 32, 32, 3, padding=1),
|
155 |
-
nn.SiLU(),
|
156 |
-
conv_nd(dims, 32, 96, 3, padding=1, stride=2),
|
157 |
-
nn.SiLU(),
|
158 |
-
conv_nd(dims, 96, 96, 3, padding=1),
|
159 |
-
nn.SiLU(),
|
160 |
-
conv_nd(dims, 96, 256, 3, padding=1, stride=2),
|
161 |
-
nn.SiLU(),
|
162 |
-
zero_module(conv_nd(dims, 256, model_channels, 3, padding=1))
|
163 |
-
)
|
164 |
-
|
165 |
-
self._feature_size = model_channels
|
166 |
-
input_block_chans = [model_channels]
|
167 |
-
ch = model_channels
|
168 |
-
ds = 1
|
169 |
-
for level, mult in enumerate(channel_mult):
|
170 |
-
for nr in range(self.num_res_blocks[level]):
|
171 |
-
layers = [
|
172 |
-
ResBlock(
|
173 |
-
ch,
|
174 |
-
time_embed_dim,
|
175 |
-
dropout,
|
176 |
-
out_channels=mult * model_channels,
|
177 |
-
dims=dims,
|
178 |
-
use_checkpoint=use_checkpoint,
|
179 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
180 |
-
)
|
181 |
-
]
|
182 |
-
ch = mult * model_channels
|
183 |
-
if ds in attention_resolutions:
|
184 |
-
if num_head_channels == -1:
|
185 |
-
dim_head = ch // num_heads
|
186 |
-
else:
|
187 |
-
num_heads = ch // num_head_channels
|
188 |
-
dim_head = num_head_channels
|
189 |
-
if legacy:
|
190 |
-
# num_heads = 1
|
191 |
-
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
|
192 |
-
if exists(disable_self_attentions):
|
193 |
-
disabled_sa = disable_self_attentions[level]
|
194 |
-
else:
|
195 |
-
disabled_sa = False
|
196 |
-
|
197 |
-
if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
|
198 |
-
layers.append(
|
199 |
-
AttentionBlock(
|
200 |
-
ch,
|
201 |
-
use_checkpoint=use_checkpoint,
|
202 |
-
num_heads=num_heads,
|
203 |
-
num_head_channels=dim_head,
|
204 |
-
use_new_attention_order=use_new_attention_order,
|
205 |
-
) if not use_spatial_transformer else SpatialTransformer(
|
206 |
-
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
|
207 |
-
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
|
208 |
-
use_checkpoint=use_checkpoint
|
209 |
-
)
|
210 |
-
)
|
211 |
-
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
212 |
-
self.zero_convs.append(self.make_zero_conv(ch))
|
213 |
-
self._feature_size += ch
|
214 |
-
input_block_chans.append(ch)
|
215 |
-
if level != len(channel_mult) - 1:
|
216 |
-
out_ch = ch
|
217 |
-
self.input_blocks.append(
|
218 |
-
TimestepEmbedSequential(
|
219 |
-
ResBlock(
|
220 |
-
ch,
|
221 |
-
time_embed_dim,
|
222 |
-
dropout,
|
223 |
-
out_channels=out_ch,
|
224 |
-
dims=dims,
|
225 |
-
use_checkpoint=use_checkpoint,
|
226 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
227 |
-
down=True,
|
228 |
-
)
|
229 |
-
if resblock_updown
|
230 |
-
else Downsample(
|
231 |
-
ch, conv_resample, dims=dims, out_channels=out_ch
|
232 |
-
)
|
233 |
-
)
|
234 |
-
)
|
235 |
-
ch = out_ch
|
236 |
-
input_block_chans.append(ch)
|
237 |
-
self.zero_convs.append(self.make_zero_conv(ch))
|
238 |
-
ds *= 2
|
239 |
-
self._feature_size += ch
|
240 |
-
|
241 |
-
if num_head_channels == -1:
|
242 |
-
dim_head = ch // num_heads
|
243 |
-
else:
|
244 |
-
num_heads = ch // num_head_channels
|
245 |
-
dim_head = num_head_channels
|
246 |
-
if legacy:
|
247 |
-
# num_heads = 1
|
248 |
-
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
|
249 |
-
self.middle_block = TimestepEmbedSequential(
|
250 |
-
ResBlock(
|
251 |
-
ch,
|
252 |
-
time_embed_dim,
|
253 |
-
dropout,
|
254 |
-
dims=dims,
|
255 |
-
use_checkpoint=use_checkpoint,
|
256 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
257 |
-
),
|
258 |
-
AttentionBlock(
|
259 |
-
ch,
|
260 |
-
use_checkpoint=use_checkpoint,
|
261 |
-
num_heads=num_heads,
|
262 |
-
num_head_channels=dim_head,
|
263 |
-
use_new_attention_order=use_new_attention_order,
|
264 |
-
) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn
|
265 |
-
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
|
266 |
-
disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
|
267 |
-
use_checkpoint=use_checkpoint
|
268 |
-
),
|
269 |
-
ResBlock(
|
270 |
-
ch,
|
271 |
-
time_embed_dim,
|
272 |
-
dropout,
|
273 |
-
dims=dims,
|
274 |
-
use_checkpoint=use_checkpoint,
|
275 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
276 |
-
),
|
277 |
-
)
|
278 |
-
self.middle_block_out = self.make_zero_conv(ch)
|
279 |
-
self._feature_size += ch
|
280 |
-
|
281 |
-
def make_zero_conv(self, channels):
|
282 |
-
return TimestepEmbedSequential(zero_module(conv_nd(self.dims, channels, channels, 1, padding=0)))
|
283 |
-
|
284 |
-
def forward(self, x, hint, timesteps, context, **kwargs):
|
285 |
-
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
|
286 |
-
emb = self.time_embed(t_emb)
|
287 |
-
|
288 |
-
guided_hint = self.input_hint_block(hint, emb, context)
|
289 |
-
|
290 |
-
outs = []
|
291 |
-
|
292 |
-
h = x.type(self.dtype)
|
293 |
-
for module, zero_conv in zip(self.input_blocks, self.zero_convs):
|
294 |
-
if guided_hint is not None:
|
295 |
-
h = module(h, emb, context)
|
296 |
-
h += guided_hint
|
297 |
-
guided_hint = None
|
298 |
-
else:
|
299 |
-
h = module(h, emb, context)
|
300 |
-
outs.append(zero_conv(h, emb, context))
|
301 |
-
|
302 |
-
h = self.middle_block(h, emb, context)
|
303 |
-
outs.append(self.middle_block_out(h, emb, context))
|
304 |
-
|
305 |
-
return outs
|
306 |
-
|
307 |
-
|
308 |
-
class ControlLDM(LatentDiffusion):
|
309 |
-
|
310 |
-
def __init__(self, control_stage_config, control_key, only_mid_control, *args, **kwargs):
|
311 |
-
super().__init__(*args, **kwargs)
|
312 |
-
self.control_model = instantiate_from_config(control_stage_config)
|
313 |
-
self.control_key = control_key
|
314 |
-
self.only_mid_control = only_mid_control
|
315 |
-
self.control_scales = [1.0] * 13
|
316 |
-
|
317 |
-
@torch.no_grad()
|
318 |
-
def get_input(self, batch, k, bs=None, *args, **kwargs):
|
319 |
-
x, c = super().get_input(batch, self.first_stage_key, *args, **kwargs)
|
320 |
-
control = batch[self.control_key]
|
321 |
-
if bs is not None:
|
322 |
-
control = control[:bs]
|
323 |
-
control = control.to(self.device)
|
324 |
-
control = einops.rearrange(control, 'b h w c -> b c h w')
|
325 |
-
control = control.to(memory_format=torch.contiguous_format).float()
|
326 |
-
return x, dict(c_crossattn=[c], c_concat=[control])
|
327 |
-
|
328 |
-
def apply_model(self, x_noisy, t, cond, *args, **kwargs):
|
329 |
-
assert isinstance(cond, dict)
|
330 |
-
diffusion_model = self.model.diffusion_model
|
331 |
-
|
332 |
-
cond_txt = torch.cat(cond['c_crossattn'], 1)
|
333 |
-
|
334 |
-
if cond['c_concat'] is None:
|
335 |
-
eps = diffusion_model(x=x_noisy, timesteps=t, context=cond_txt, control=None, only_mid_control=self.only_mid_control)
|
336 |
-
else:
|
337 |
-
control = self.control_model(x=x_noisy, hint=torch.cat(cond['c_concat'], 1), timesteps=t, context=cond_txt)
|
338 |
-
control = [c * scale for c, scale in zip(control, self.control_scales)]
|
339 |
-
eps = diffusion_model(x=x_noisy, timesteps=t, context=cond_txt, control=control, only_mid_control=self.only_mid_control)
|
340 |
-
|
341 |
-
return eps
|
342 |
-
|
343 |
-
@torch.no_grad()
|
344 |
-
def get_unconditional_conditioning(self, N):
|
345 |
-
return self.get_learned_conditioning([""] * N)
|
346 |
-
|
347 |
-
@torch.no_grad()
|
348 |
-
def log_images(self, batch, N=4, n_row=2, sample=False, ddim_steps=50, ddim_eta=0.0, return_keys=None,
|
349 |
-
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
|
350 |
-
plot_diffusion_rows=False, unconditional_guidance_scale=9.0, unconditional_guidance_label=None,
|
351 |
-
use_ema_scope=True,
|
352 |
-
**kwargs):
|
353 |
-
use_ddim = ddim_steps is not None
|
354 |
-
|
355 |
-
log = dict()
|
356 |
-
z, c = self.get_input(batch, self.first_stage_key, bs=N)
|
357 |
-
c_cat, c = c["c_concat"][0][:N], c["c_crossattn"][0][:N]
|
358 |
-
N = min(z.shape[0], N)
|
359 |
-
n_row = min(z.shape[0], n_row)
|
360 |
-
log["reconstruction"] = self.decode_first_stage(z)
|
361 |
-
log["control"] = c_cat * 2.0 - 1.0
|
362 |
-
log["conditioning"] = log_txt_as_img((512, 512), batch[self.cond_stage_key], size=16)
|
363 |
-
|
364 |
-
if plot_diffusion_rows:
|
365 |
-
# get diffusion row
|
366 |
-
diffusion_row = list()
|
367 |
-
z_start = z[:n_row]
|
368 |
-
for t in range(self.num_timesteps):
|
369 |
-
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
370 |
-
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
371 |
-
t = t.to(self.device).long()
|
372 |
-
noise = torch.randn_like(z_start)
|
373 |
-
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
|
374 |
-
diffusion_row.append(self.decode_first_stage(z_noisy))
|
375 |
-
|
376 |
-
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
|
377 |
-
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
|
378 |
-
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
|
379 |
-
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
|
380 |
-
log["diffusion_row"] = diffusion_grid
|
381 |
-
|
382 |
-
if sample:
|
383 |
-
# get denoise row
|
384 |
-
samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
|
385 |
-
batch_size=N, ddim=use_ddim,
|
386 |
-
ddim_steps=ddim_steps, eta=ddim_eta)
|
387 |
-
x_samples = self.decode_first_stage(samples)
|
388 |
-
log["samples"] = x_samples
|
389 |
-
if plot_denoise_rows:
|
390 |
-
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
|
391 |
-
log["denoise_row"] = denoise_grid
|
392 |
-
|
393 |
-
if unconditional_guidance_scale > 1.0:
|
394 |
-
uc_cross = self.get_unconditional_conditioning(N)
|
395 |
-
uc_cat = c_cat # torch.zeros_like(c_cat)
|
396 |
-
uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]}
|
397 |
-
samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
|
398 |
-
batch_size=N, ddim=use_ddim,
|
399 |
-
ddim_steps=ddim_steps, eta=ddim_eta,
|
400 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
401 |
-
unconditional_conditioning=uc_full,
|
402 |
-
)
|
403 |
-
x_samples_cfg = self.decode_first_stage(samples_cfg)
|
404 |
-
log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
|
405 |
-
|
406 |
-
return log
|
407 |
-
|
408 |
-
@torch.no_grad()
|
409 |
-
def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
|
410 |
-
ddim_sampler = DDIMSampler(self)
|
411 |
-
b, c, h, w = cond["c_concat"][0].shape
|
412 |
-
shape = (self.channels, h // 8, w // 8)
|
413 |
-
samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs)
|
414 |
-
return samples, intermediates
|
415 |
-
|
416 |
-
def configure_optimizers(self):
|
417 |
-
lr = self.learning_rate
|
418 |
-
params = list(self.control_model.parameters())
|
419 |
-
if not self.sd_locked:
|
420 |
-
params += list(self.model.diffusion_model.output_blocks.parameters())
|
421 |
-
params += list(self.model.diffusion_model.out.parameters())
|
422 |
-
opt = torch.optim.AdamW(params, lr=lr)
|
423 |
-
return opt
|
424 |
-
|
425 |
-
def low_vram_shift(self, is_diffusing):
|
426 |
-
if is_diffusing:
|
427 |
-
self.model = self.model.cuda()
|
428 |
-
self.control_model = self.control_model.cuda()
|
429 |
-
self.first_stage_model = self.first_stage_model.cpu()
|
430 |
-
self.cond_stage_model = self.cond_stage_model.cpu()
|
431 |
-
else:
|
432 |
-
self.model = self.model.cpu()
|
433 |
-
self.control_model = self.control_model.cpu()
|
434 |
-
self.first_stage_model = self.first_stage_model.cuda()
|
435 |
-
self.cond_stage_model = self.cond_stage_model.cuda()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Asahi402/anime-remove-background/app.py
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import huggingface_hub
|
3 |
-
import onnxruntime as rt
|
4 |
-
import numpy as np
|
5 |
-
import cv2
|
6 |
-
|
7 |
-
|
8 |
-
def get_mask(img, s=1024):
|
9 |
-
img = (img / 255).astype(np.float32)
|
10 |
-
h, w = h0, w0 = img.shape[:-1]
|
11 |
-
h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s)
|
12 |
-
ph, pw = s - h, s - w
|
13 |
-
img_input = np.zeros([s, s, 3], dtype=np.float32)
|
14 |
-
img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h))
|
15 |
-
img_input = np.transpose(img_input, (2, 0, 1))
|
16 |
-
img_input = img_input[np.newaxis, :]
|
17 |
-
mask = rmbg_model.run(None, {'img': img_input})[0][0]
|
18 |
-
mask = np.transpose(mask, (1, 2, 0))
|
19 |
-
mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w]
|
20 |
-
mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis]
|
21 |
-
return mask
|
22 |
-
|
23 |
-
|
24 |
-
def rmbg_fn(img):
|
25 |
-
mask = get_mask(img)
|
26 |
-
img = (mask * img + 255 * (1 - mask)).astype(np.uint8)
|
27 |
-
mask = (mask * 255).astype(np.uint8)
|
28 |
-
img = np.concatenate([img, mask], axis=2, dtype=np.uint8)
|
29 |
-
mask = mask.repeat(3, axis=2)
|
30 |
-
return mask, img
|
31 |
-
|
32 |
-
|
33 |
-
if __name__ == "__main__":
|
34 |
-
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
|
35 |
-
model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx")
|
36 |
-
rmbg_model = rt.InferenceSession(model_path, providers=providers)
|
37 |
-
app = gr.Blocks()
|
38 |
-
with app:
|
39 |
-
gr.Markdown("# Anime Remove Background\n\n"
|
40 |
-
"\n\n"
|
41 |
-
"demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)")
|
42 |
-
with gr.Row():
|
43 |
-
with gr.Column():
|
44 |
-
input_img = gr.Image(label="input image")
|
45 |
-
examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)]
|
46 |
-
examples = gr.Dataset(components=[input_img], samples=examples_data)
|
47 |
-
run_btn = gr.Button(variant="primary")
|
48 |
-
output_mask = gr.Image(label="mask")
|
49 |
-
output_img = gr.Image(label="result", image_mode="RGBA")
|
50 |
-
examples.click(lambda x: x[0], [examples], [input_img])
|
51 |
-
run_btn.click(rmbg_fn, [input_img], [output_mask, output_img])
|
52 |
-
app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/setuptools_build.py
DELETED
@@ -1,146 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
import textwrap
|
3 |
-
from typing import List, Optional, Sequence
|
4 |
-
|
5 |
-
# Shim to wrap setup.py invocation with setuptools
|
6 |
-
# Note that __file__ is handled via two {!r} *and* %r, to ensure that paths on
|
7 |
-
# Windows are correctly handled (it should be "C:\\Users" not "C:\Users").
|
8 |
-
_SETUPTOOLS_SHIM = textwrap.dedent(
|
9 |
-
"""
|
10 |
-
exec(compile('''
|
11 |
-
# This is <pip-setuptools-caller> -- a caller that pip uses to run setup.py
|
12 |
-
#
|
13 |
-
# - It imports setuptools before invoking setup.py, to enable projects that directly
|
14 |
-
# import from `distutils.core` to work with newer packaging standards.
|
15 |
-
# - It provides a clear error message when setuptools is not installed.
|
16 |
-
# - It sets `sys.argv[0]` to the underlying `setup.py`, when invoking `setup.py` so
|
17 |
-
# setuptools doesn't think the script is `-c`. This avoids the following warning:
|
18 |
-
# manifest_maker: standard file '-c' not found".
|
19 |
-
# - It generates a shim setup.py, for handling setup.cfg-only projects.
|
20 |
-
import os, sys, tokenize
|
21 |
-
|
22 |
-
try:
|
23 |
-
import setuptools
|
24 |
-
except ImportError as error:
|
25 |
-
print(
|
26 |
-
"ERROR: Can not execute `setup.py` since setuptools is not available in "
|
27 |
-
"the build environment.",
|
28 |
-
file=sys.stderr,
|
29 |
-
)
|
30 |
-
sys.exit(1)
|
31 |
-
|
32 |
-
__file__ = %r
|
33 |
-
sys.argv[0] = __file__
|
34 |
-
|
35 |
-
if os.path.exists(__file__):
|
36 |
-
filename = __file__
|
37 |
-
with tokenize.open(__file__) as f:
|
38 |
-
setup_py_code = f.read()
|
39 |
-
else:
|
40 |
-
filename = "<auto-generated setuptools caller>"
|
41 |
-
setup_py_code = "from setuptools import setup; setup()"
|
42 |
-
|
43 |
-
exec(compile(setup_py_code, filename, "exec"))
|
44 |
-
''' % ({!r},), "<pip-setuptools-caller>", "exec"))
|
45 |
-
"""
|
46 |
-
).rstrip()
|
47 |
-
|
48 |
-
|
49 |
-
def make_setuptools_shim_args(
|
50 |
-
setup_py_path: str,
|
51 |
-
global_options: Optional[Sequence[str]] = None,
|
52 |
-
no_user_config: bool = False,
|
53 |
-
unbuffered_output: bool = False,
|
54 |
-
) -> List[str]:
|
55 |
-
"""
|
56 |
-
Get setuptools command arguments with shim wrapped setup file invocation.
|
57 |
-
|
58 |
-
:param setup_py_path: The path to setup.py to be wrapped.
|
59 |
-
:param global_options: Additional global options.
|
60 |
-
:param no_user_config: If True, disables personal user configuration.
|
61 |
-
:param unbuffered_output: If True, adds the unbuffered switch to the
|
62 |
-
argument list.
|
63 |
-
"""
|
64 |
-
args = [sys.executable]
|
65 |
-
if unbuffered_output:
|
66 |
-
args += ["-u"]
|
67 |
-
args += ["-c", _SETUPTOOLS_SHIM.format(setup_py_path)]
|
68 |
-
if global_options:
|
69 |
-
args += global_options
|
70 |
-
if no_user_config:
|
71 |
-
args += ["--no-user-cfg"]
|
72 |
-
return args
|
73 |
-
|
74 |
-
|
75 |
-
def make_setuptools_bdist_wheel_args(
|
76 |
-
setup_py_path: str,
|
77 |
-
global_options: Sequence[str],
|
78 |
-
build_options: Sequence[str],
|
79 |
-
destination_dir: str,
|
80 |
-
) -> List[str]:
|
81 |
-
# NOTE: Eventually, we'd want to also -S to the flags here, when we're
|
82 |
-
# isolating. Currently, it breaks Python in virtualenvs, because it
|
83 |
-
# relies on site.py to find parts of the standard library outside the
|
84 |
-
# virtualenv.
|
85 |
-
args = make_setuptools_shim_args(
|
86 |
-
setup_py_path, global_options=global_options, unbuffered_output=True
|
87 |
-
)
|
88 |
-
args += ["bdist_wheel", "-d", destination_dir]
|
89 |
-
args += build_options
|
90 |
-
return args
|
91 |
-
|
92 |
-
|
93 |
-
def make_setuptools_clean_args(
|
94 |
-
setup_py_path: str,
|
95 |
-
global_options: Sequence[str],
|
96 |
-
) -> List[str]:
|
97 |
-
args = make_setuptools_shim_args(
|
98 |
-
setup_py_path, global_options=global_options, unbuffered_output=True
|
99 |
-
)
|
100 |
-
args += ["clean", "--all"]
|
101 |
-
return args
|
102 |
-
|
103 |
-
|
104 |
-
def make_setuptools_develop_args(
|
105 |
-
setup_py_path: str,
|
106 |
-
*,
|
107 |
-
global_options: Sequence[str],
|
108 |
-
no_user_config: bool,
|
109 |
-
prefix: Optional[str],
|
110 |
-
home: Optional[str],
|
111 |
-
use_user_site: bool,
|
112 |
-
) -> List[str]:
|
113 |
-
assert not (use_user_site and prefix)
|
114 |
-
|
115 |
-
args = make_setuptools_shim_args(
|
116 |
-
setup_py_path,
|
117 |
-
global_options=global_options,
|
118 |
-
no_user_config=no_user_config,
|
119 |
-
)
|
120 |
-
|
121 |
-
args += ["develop", "--no-deps"]
|
122 |
-
|
123 |
-
if prefix:
|
124 |
-
args += ["--prefix", prefix]
|
125 |
-
if home is not None:
|
126 |
-
args += ["--install-dir", home]
|
127 |
-
|
128 |
-
if use_user_site:
|
129 |
-
args += ["--user", "--prefix="]
|
130 |
-
|
131 |
-
return args
|
132 |
-
|
133 |
-
|
134 |
-
def make_setuptools_egg_info_args(
|
135 |
-
setup_py_path: str,
|
136 |
-
egg_info_dir: Optional[str],
|
137 |
-
no_user_config: bool,
|
138 |
-
) -> List[str]:
|
139 |
-
args = make_setuptools_shim_args(setup_py_path, no_user_config=no_user_config)
|
140 |
-
|
141 |
-
args += ["egg_info"]
|
142 |
-
|
143 |
-
if egg_info_dir:
|
144 |
-
args += ["--egg-base", egg_info_dir]
|
145 |
-
|
146 |
-
return args
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/vcs/__init__.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
# Expose a limited set of classes and functions so callers outside of
|
2 |
-
# the vcs package don't need to import deeper than `pip._internal.vcs`.
|
3 |
-
# (The test directory may still need to import from a vcs sub-package.)
|
4 |
-
# Import all vcs modules to register each VCS in the VcsSupport object.
|
5 |
-
import pip._internal.vcs.bazaar
|
6 |
-
import pip._internal.vcs.git
|
7 |
-
import pip._internal.vcs.mercurial
|
8 |
-
import pip._internal.vcs.subversion # noqa: F401
|
9 |
-
from pip._internal.vcs.versioncontrol import ( # noqa: F401
|
10 |
-
RemoteNotFoundError,
|
11 |
-
RemoteNotValidError,
|
12 |
-
is_url,
|
13 |
-
make_vcs_requirement_url,
|
14 |
-
vcs,
|
15 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/.github/workflows/levenshtein.js
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
Copyright (c) 2011 Andrei Mackenzie
|
3 |
-
|
4 |
-
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
5 |
-
|
6 |
-
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
7 |
-
|
8 |
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
9 |
-
*/
|
10 |
-
|
11 |
-
// Compute the edit distance between the two given strings
|
12 |
-
exports.getEditDistance = function(a, b){
|
13 |
-
if(a.length == 0) return b.length;
|
14 |
-
if(b.length == 0) return a.length;
|
15 |
-
|
16 |
-
var matrix = [];
|
17 |
-
|
18 |
-
// increment along the first column of each row
|
19 |
-
var i;
|
20 |
-
for(i = 0; i <= b.length; i++){
|
21 |
-
matrix[i] = [i];
|
22 |
-
}
|
23 |
-
|
24 |
-
// increment each column in the first row
|
25 |
-
var j;
|
26 |
-
for(j = 0; j <= a.length; j++){
|
27 |
-
matrix[0][j] = j;
|
28 |
-
}
|
29 |
-
|
30 |
-
// Fill in the rest of the matrix
|
31 |
-
for(i = 1; i <= b.length; i++){
|
32 |
-
for(j = 1; j <= a.length; j++){
|
33 |
-
if(b.charAt(i-1) == a.charAt(j-1)){
|
34 |
-
matrix[i][j] = matrix[i-1][j-1];
|
35 |
-
} else {
|
36 |
-
matrix[i][j] = Math.min(matrix[i-1][j-1] + 1, // substitution
|
37 |
-
Math.min(matrix[i][j-1] + 1, // insertion
|
38 |
-
matrix[i-1][j] + 1)); // deletion
|
39 |
-
}
|
40 |
-
}
|
41 |
-
}
|
42 |
-
|
43 |
-
return matrix[b.length][a.length];
|
44 |
-
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BaiyuS/Real-CUGAN-YZ/app.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
from upcunet_v3 import RealWaifuUpScaler
|
2 |
-
import gradio as gr
|
3 |
-
import time
|
4 |
-
import logging
|
5 |
-
import os
|
6 |
-
from PIL import ImageOps
|
7 |
-
import numpy as np
|
8 |
-
import math
|
9 |
-
|
10 |
-
|
11 |
-
def greet(input_img, input_model_name, input_tile_mode):
|
12 |
-
# if input_img.size[0] * input_img.size[1] > 256 * 256:
|
13 |
-
# y = int(math.sqrt(256*256/input_img.size[0]*input_img.size[1]))
|
14 |
-
# x = int(input_img.size[0]/input_img.size[1]*y)
|
15 |
-
# input_img = ImageOps.fit(input_img, (x, y))
|
16 |
-
input_img = np.array(input_img)
|
17 |
-
if input_model_name not in model_cache:
|
18 |
-
t1 = time.time()
|
19 |
-
upscaler = RealWaifuUpScaler(input_model_name[2], ModelPath + input_model_name, half=False, device="cpu")
|
20 |
-
t2 = time.time()
|
21 |
-
logger.info(f'load model time, {t2 - t1}')
|
22 |
-
model_cache[input_model_name] = upscaler
|
23 |
-
else:
|
24 |
-
upscaler = model_cache[input_model_name]
|
25 |
-
logger.info(f'load model from cache')
|
26 |
-
|
27 |
-
start = time.time()
|
28 |
-
result = upscaler(input_img, tile_mode=input_tile_mode)
|
29 |
-
end = time.time()
|
30 |
-
logger.info(f'input_model_name, {input_model_name}')
|
31 |
-
logger.info(f'input_tile_mode, {input_tile_mode}')
|
32 |
-
logger.info(f'input shape, {input_img.shape}')
|
33 |
-
logger.info(f'output shape, {result.shape}')
|
34 |
-
logger.info(f'speed time, {end - start}')
|
35 |
-
return result
|
36 |
-
|
37 |
-
|
38 |
-
if __name__ == '__main__':
|
39 |
-
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(process)d] [%(levelname)s] %(message)s")
|
40 |
-
logger = logging.getLogger()
|
41 |
-
|
42 |
-
ModelPath = "weights_v3/"
|
43 |
-
model_cache = {}
|
44 |
-
|
45 |
-
input_model_name = gr.inputs.Dropdown(os.listdir(ModelPath), default="up2x-latest-denoise2x.pth", label='选择model')
|
46 |
-
input_tile_mode = gr.inputs.Dropdown([0, 1, 2, 3, 4], default=2, label='选择tile_mode')
|
47 |
-
input_img = gr.inputs.Image(label='image', type='pil')
|
48 |
-
|
49 |
-
inputs = [input_img, input_model_name, input_tile_mode]
|
50 |
-
outputs = "image"
|
51 |
-
iface = gr.Interface(fn=greet,
|
52 |
-
inputs=inputs,
|
53 |
-
outputs=outputs,
|
54 |
-
allow_screenshot=False,
|
55 |
-
allow_flagging='never',
|
56 |
-
examples=[['test-img.jpg', "up2x-latest-denoise2x.pth", 2]],
|
57 |
-
article='[https://github.com/bilibili/ailab/tree/main/Real-CUGAN](https://github.com/bilibili/ailab/tree/main/Real-CUGAN)<br>'
|
58 |
-
'感谢b站开源的项目,图片过大会导致内存不足,所有我将图片裁剪小,想体验大图片的效果请自行前往上面的链接。<br>'
|
59 |
-
'修改bbb'
|
60 |
-
'The large image will lead to memory limit exceeded. So I crop and resize image. '
|
61 |
-
'If you want to experience the large image, please go to the link above.')
|
62 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Banbri/zcvzcv/src/app/ocr.tsx
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
"use client"
|
2 |
-
|
3 |
-
import { createWorker } from "tesseract.js"
|
|
|
|
|
|
|
|
spaces/BasToTheMax/voicechange/app.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from TTS.api import TTS
|
3 |
-
import tempfile
|
4 |
-
|
5 |
-
api = TTS(model_name="voice_conversion_models/multilingual/vctk/freevc24")
|
6 |
-
|
7 |
-
def greet(source, target):
|
8 |
-
path = tempfile.NamedTemporaryFile(prefix="bttm_", suffix=".wav").name
|
9 |
-
|
10 |
-
print("adio", source, target, path)
|
11 |
-
api.voice_conversion_to_file(source_wav=source, target_wav=target, file_path=path)
|
12 |
-
print("> Done")
|
13 |
-
|
14 |
-
return path
|
15 |
-
|
16 |
-
app = gr.Interface(fn=greet, inputs=[gr.Audio(type="filepath"), gr.Audio(type="filepath")], outputs=gr.Audio(type="filepath"))
|
17 |
-
app.queue(max_size=5000, concurrency_count=5)
|
18 |
-
app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/engine/train_loop.py
DELETED
@@ -1,273 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
3 |
-
|
4 |
-
import logging
|
5 |
-
import numpy as np
|
6 |
-
import time
|
7 |
-
import weakref
|
8 |
-
import torch
|
9 |
-
|
10 |
-
import detectron2.utils.comm as comm
|
11 |
-
from detectron2.utils.events import EventStorage
|
12 |
-
|
13 |
-
__all__ = ["HookBase", "TrainerBase", "SimpleTrainer"]
|
14 |
-
|
15 |
-
|
16 |
-
class HookBase:
|
17 |
-
"""
|
18 |
-
Base class for hooks that can be registered with :class:`TrainerBase`.
|
19 |
-
|
20 |
-
Each hook can implement 4 methods. The way they are called is demonstrated
|
21 |
-
in the following snippet:
|
22 |
-
|
23 |
-
.. code-block:: python
|
24 |
-
|
25 |
-
hook.before_train()
|
26 |
-
for iter in range(start_iter, max_iter):
|
27 |
-
hook.before_step()
|
28 |
-
trainer.run_step()
|
29 |
-
hook.after_step()
|
30 |
-
hook.after_train()
|
31 |
-
|
32 |
-
Notes:
|
33 |
-
1. In the hook method, users can access `self.trainer` to access more
|
34 |
-
properties about the context (e.g., current iteration).
|
35 |
-
|
36 |
-
2. A hook that does something in :meth:`before_step` can often be
|
37 |
-
implemented equivalently in :meth:`after_step`.
|
38 |
-
If the hook takes non-trivial time, it is strongly recommended to
|
39 |
-
implement the hook in :meth:`after_step` instead of :meth:`before_step`.
|
40 |
-
The convention is that :meth:`before_step` should only take negligible time.
|
41 |
-
|
42 |
-
Following this convention will allow hooks that do care about the difference
|
43 |
-
between :meth:`before_step` and :meth:`after_step` (e.g., timer) to
|
44 |
-
function properly.
|
45 |
-
|
46 |
-
Attributes:
|
47 |
-
trainer: A weak reference to the trainer object. Set by the trainer when the hook is
|
48 |
-
registered.
|
49 |
-
"""
|
50 |
-
|
51 |
-
def before_train(self):
|
52 |
-
"""
|
53 |
-
Called before the first iteration.
|
54 |
-
"""
|
55 |
-
pass
|
56 |
-
|
57 |
-
def after_train(self):
|
58 |
-
"""
|
59 |
-
Called after the last iteration.
|
60 |
-
"""
|
61 |
-
pass
|
62 |
-
|
63 |
-
def before_step(self):
|
64 |
-
"""
|
65 |
-
Called before each iteration.
|
66 |
-
"""
|
67 |
-
pass
|
68 |
-
|
69 |
-
def after_step(self):
|
70 |
-
"""
|
71 |
-
Called after each iteration.
|
72 |
-
"""
|
73 |
-
pass
|
74 |
-
|
75 |
-
|
76 |
-
class TrainerBase:
|
77 |
-
"""
|
78 |
-
Base class for iterative trainer with hooks.
|
79 |
-
|
80 |
-
The only assumption we made here is: the training runs in a loop.
|
81 |
-
A subclass can implement what the loop is.
|
82 |
-
We made no assumptions about the existence of dataloader, optimizer, model, etc.
|
83 |
-
|
84 |
-
Attributes:
|
85 |
-
iter(int): the current iteration.
|
86 |
-
|
87 |
-
start_iter(int): The iteration to start with.
|
88 |
-
By convention the minimum possible value is 0.
|
89 |
-
|
90 |
-
max_iter(int): The iteration to end training.
|
91 |
-
|
92 |
-
storage(EventStorage): An EventStorage that's opened during the course of training.
|
93 |
-
"""
|
94 |
-
|
95 |
-
def __init__(self):
|
96 |
-
self._hooks = []
|
97 |
-
|
98 |
-
def register_hooks(self, hooks):
|
99 |
-
"""
|
100 |
-
Register hooks to the trainer. The hooks are executed in the order
|
101 |
-
they are registered.
|
102 |
-
|
103 |
-
Args:
|
104 |
-
hooks (list[Optional[HookBase]]): list of hooks
|
105 |
-
"""
|
106 |
-
hooks = [h for h in hooks if h is not None]
|
107 |
-
for h in hooks:
|
108 |
-
assert isinstance(h, HookBase)
|
109 |
-
# To avoid circular reference, hooks and trainer cannot own each other.
|
110 |
-
# This normally does not matter, but will cause memory leak if the
|
111 |
-
# involved objects contain __del__:
|
112 |
-
# See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
|
113 |
-
h.trainer = weakref.proxy(self)
|
114 |
-
self._hooks.extend(hooks)
|
115 |
-
|
116 |
-
def train(self, start_iter: int, max_iter: int):
|
117 |
-
"""
|
118 |
-
Args:
|
119 |
-
start_iter, max_iter (int): See docs above
|
120 |
-
"""
|
121 |
-
logger = logging.getLogger(__name__)
|
122 |
-
logger.info("Starting training from iteration {}".format(start_iter))
|
123 |
-
|
124 |
-
self.iter = self.start_iter = start_iter
|
125 |
-
self.max_iter = max_iter
|
126 |
-
|
127 |
-
with EventStorage(start_iter) as self.storage:
|
128 |
-
try:
|
129 |
-
self.before_train()
|
130 |
-
for self.iter in range(start_iter, max_iter):
|
131 |
-
self.before_step()
|
132 |
-
self.run_step()
|
133 |
-
self.after_step()
|
134 |
-
except Exception:
|
135 |
-
logger.exception("Exception during training:")
|
136 |
-
raise
|
137 |
-
finally:
|
138 |
-
self.after_train()
|
139 |
-
|
140 |
-
def before_train(self):
|
141 |
-
for h in self._hooks:
|
142 |
-
h.before_train()
|
143 |
-
|
144 |
-
def after_train(self):
|
145 |
-
for h in self._hooks:
|
146 |
-
h.after_train()
|
147 |
-
|
148 |
-
def before_step(self):
|
149 |
-
for h in self._hooks:
|
150 |
-
h.before_step()
|
151 |
-
|
152 |
-
def after_step(self):
|
153 |
-
for h in self._hooks:
|
154 |
-
h.after_step()
|
155 |
-
# this guarantees, that in each hook's after_step, storage.iter == trainer.iter
|
156 |
-
self.storage.step()
|
157 |
-
|
158 |
-
def run_step(self):
|
159 |
-
raise NotImplementedError
|
160 |
-
|
161 |
-
|
162 |
-
class SimpleTrainer(TrainerBase):
|
163 |
-
"""
|
164 |
-
A simple trainer for the most common type of task:
|
165 |
-
single-cost single-optimizer single-data-source iterative optimization.
|
166 |
-
It assumes that every step, you:
|
167 |
-
|
168 |
-
1. Compute the loss with a data from the data_loader.
|
169 |
-
2. Compute the gradients with the above loss.
|
170 |
-
3. Update the model with the optimizer.
|
171 |
-
|
172 |
-
If you want to do anything fancier than this,
|
173 |
-
either subclass TrainerBase and implement your own `run_step`,
|
174 |
-
or write your own training loop.
|
175 |
-
"""
|
176 |
-
|
177 |
-
def __init__(self, model, data_loader, optimizer):
|
178 |
-
"""
|
179 |
-
Args:
|
180 |
-
model: a torch Module. Takes a data from data_loader and returns a
|
181 |
-
dict of losses.
|
182 |
-
data_loader: an iterable. Contains data to be used to call model.
|
183 |
-
optimizer: a torch optimizer.
|
184 |
-
"""
|
185 |
-
super().__init__()
|
186 |
-
|
187 |
-
"""
|
188 |
-
We set the model to training mode in the trainer.
|
189 |
-
However it's valid to train a model that's in eval mode.
|
190 |
-
If you want your model (or a submodule of it) to behave
|
191 |
-
like evaluation during training, you can overwrite its train() method.
|
192 |
-
"""
|
193 |
-
model.train()
|
194 |
-
|
195 |
-
self.model = model
|
196 |
-
self.data_loader = data_loader
|
197 |
-
self._data_loader_iter = iter(data_loader)
|
198 |
-
self.optimizer = optimizer
|
199 |
-
|
200 |
-
def run_step(self):
|
201 |
-
"""
|
202 |
-
Implement the standard training logic described above.
|
203 |
-
"""
|
204 |
-
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
|
205 |
-
start = time.perf_counter()
|
206 |
-
"""
|
207 |
-
If you want to do something with the data, you can wrap the dataloader.
|
208 |
-
"""
|
209 |
-
data = next(self._data_loader_iter)
|
210 |
-
data_time = time.perf_counter() - start
|
211 |
-
|
212 |
-
"""
|
213 |
-
If you want to do something with the losses, you can wrap the model.
|
214 |
-
"""
|
215 |
-
loss_dict = self.model(data)
|
216 |
-
losses = sum(loss_dict.values())
|
217 |
-
self._detect_anomaly(losses, loss_dict)
|
218 |
-
|
219 |
-
metrics_dict = loss_dict
|
220 |
-
metrics_dict["data_time"] = data_time
|
221 |
-
self._write_metrics(metrics_dict)
|
222 |
-
|
223 |
-
"""
|
224 |
-
If you need to accumulate gradients or something similar, you can
|
225 |
-
wrap the optimizer with your custom `zero_grad()` method.
|
226 |
-
"""
|
227 |
-
self.optimizer.zero_grad()
|
228 |
-
losses.backward()
|
229 |
-
|
230 |
-
"""
|
231 |
-
If you need gradient clipping/scaling or other processing, you can
|
232 |
-
wrap the optimizer with your custom `step()` method.
|
233 |
-
"""
|
234 |
-
self.optimizer.step()
|
235 |
-
|
236 |
-
def _detect_anomaly(self, losses, loss_dict):
|
237 |
-
if not torch.isfinite(losses).all():
|
238 |
-
raise FloatingPointError(
|
239 |
-
"Loss became infinite or NaN at iteration={}!\nloss_dict = {}".format(
|
240 |
-
self.iter, loss_dict
|
241 |
-
)
|
242 |
-
)
|
243 |
-
|
244 |
-
def _write_metrics(self, metrics_dict: dict):
|
245 |
-
"""
|
246 |
-
Args:
|
247 |
-
metrics_dict (dict): dict of scalar metrics
|
248 |
-
"""
|
249 |
-
metrics_dict = {
|
250 |
-
k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)
|
251 |
-
for k, v in metrics_dict.items()
|
252 |
-
}
|
253 |
-
# gather metrics among all workers for logging
|
254 |
-
# This assumes we do DDP-style training, which is currently the only
|
255 |
-
# supported method in detectron2.
|
256 |
-
all_metrics_dict = comm.gather(metrics_dict)
|
257 |
-
|
258 |
-
if comm.is_main_process():
|
259 |
-
if "data_time" in all_metrics_dict[0]:
|
260 |
-
# data_time among workers can have high variance. The actual latency
|
261 |
-
# caused by data_time is the maximum among workers.
|
262 |
-
data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
|
263 |
-
self.storage.put_scalar("data_time", data_time)
|
264 |
-
|
265 |
-
# average the rest metrics
|
266 |
-
metrics_dict = {
|
267 |
-
k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()
|
268 |
-
}
|
269 |
-
total_losses_reduced = sum(loss for loss in metrics_dict.values())
|
270 |
-
|
271 |
-
self.storage.put_scalar("total_loss", total_losses_reduced)
|
272 |
-
if len(metrics_dict) > 1:
|
273 |
-
self.storage.put_scalars(**metrics_dict)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/type_deduction.h
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
// Copyright (c) 2018 NVIDIA Corporation
|
2 |
-
// (Bryce Adelstein Lelbach <[email protected]>)
|
3 |
-
// Copyright (c) 2013-2018 Eric Niebler (`THRUST_RETURNS`, etc)
|
4 |
-
// Copyright (c) 2016-2018 Casey Carter (`THRUST_RETURNS`, etc)
|
5 |
-
//
|
6 |
-
// Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt)
|
7 |
-
|
8 |
-
#pragma once
|
9 |
-
|
10 |
-
#include <thrust/detail/config.h>
|
11 |
-
#include <thrust/detail/cpp11_required.h>
|
12 |
-
|
13 |
-
#if THRUST_CPP_DIALECT >= 2011
|
14 |
-
|
15 |
-
#include <thrust/detail/preprocessor.h>
|
16 |
-
|
17 |
-
#include <utility>
|
18 |
-
#include <type_traits>
|
19 |
-
|
20 |
-
///////////////////////////////////////////////////////////////////////////////
|
21 |
-
|
22 |
-
/// \def THRUST_FWD(x)
|
23 |
-
/// \brief Performs universal forwarding of a universal reference.
|
24 |
-
///
|
25 |
-
#define THRUST_FWD(x) ::std::forward<decltype(x)>(x)
|
26 |
-
|
27 |
-
/// \def THRUST_MVCAP(x)
|
28 |
-
/// \brief Capture `x` into a lambda by moving.
|
29 |
-
///
|
30 |
-
#define THRUST_MVCAP(x) x = ::std::move(x)
|
31 |
-
|
32 |
-
/// \def THRUST_RETOF(invocable, ...)
|
33 |
-
/// \brief Expands to the type returned by invoking an instance of the invocable
|
34 |
-
/// type \a invocable with parameters of type \c __VA_ARGS__. Must
|
35 |
-
/// be called with 1 or fewer parameters to the invocable.
|
36 |
-
///
|
37 |
-
#define THRUST_RETOF(...) THRUST_PP_DISPATCH(THRUST_RETOF, __VA_ARGS__)
|
38 |
-
#define THRUST_RETOF1(C) decltype(::std::declval<C>()())
|
39 |
-
#define THRUST_RETOF2(C, V) decltype(::std::declval<C>()(::std::declval<V>()))
|
40 |
-
|
41 |
-
/// \def THRUST_RETURNS(...)
|
42 |
-
/// \brief Expands to a function definition that returns the expression
|
43 |
-
/// \c __VA_ARGS__.
|
44 |
-
///
|
45 |
-
#define THRUST_RETURNS(...) \
|
46 |
-
noexcept(noexcept(__VA_ARGS__)) \
|
47 |
-
{ return (__VA_ARGS__); } \
|
48 |
-
/**/
|
49 |
-
|
50 |
-
/// \def THRUST_DECLTYPE_RETURNS(...)
|
51 |
-
/// \brief Expands to a function definition, including a trailing returning
|
52 |
-
/// type, that returns the expression \c __VA_ARGS__.
|
53 |
-
///
|
54 |
-
#define THRUST_DECLTYPE_RETURNS(...) \
|
55 |
-
noexcept(noexcept(__VA_ARGS__)) \
|
56 |
-
-> decltype(__VA_ARGS__) \
|
57 |
-
{ return (__VA_ARGS__); } \
|
58 |
-
/**/
|
59 |
-
|
60 |
-
/// \def THRUST_DECLTYPE_RETURNS_WITH_SFINAE_CONDITION(condition, ...)
|
61 |
-
/// \brief Expands to a function definition, including a trailing returning
|
62 |
-
/// type, that returns the expression \c __VA_ARGS__. It shall only
|
63 |
-
/// participate in overload resolution if \c condition is \c true.
|
64 |
-
///
|
65 |
-
#define THRUST_DECLTYPE_RETURNS_WITH_SFINAE_CONDITION(condition, ...) \
|
66 |
-
noexcept(noexcept(__VA_ARGS__)) \
|
67 |
-
-> typename std::enable_if<condition, decltype(__VA_ARGS__)>::type \
|
68 |
-
{ return (__VA_ARGS__); } \
|
69 |
-
/**/
|
70 |
-
|
71 |
-
///////////////////////////////////////////////////////////////////////////////
|
72 |
-
|
73 |
-
#endif // THRUST_CPP_DIALECT >= 2011
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/version.h
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
/*! \file version.h
|
18 |
-
* \brief Compile-time macros encoding Thrust release version
|
19 |
-
*
|
20 |
-
* <thrust/version.h> is the only Thrust header that is guaranteed to
|
21 |
-
* change with every thrust release.
|
22 |
-
*
|
23 |
-
* It is also the only header that does not cause THRUST_HOST_SYSTEM
|
24 |
-
* and THRUST_DEVICE_SYSTEM to be defined. This way, a user may include
|
25 |
-
* this header and inspect THRUST_VERSION before programatically defining
|
26 |
-
* either of these macros herself.
|
27 |
-
*/
|
28 |
-
|
29 |
-
#pragma once
|
30 |
-
|
31 |
-
// This is the only Thrust header that is guaranteed to
|
32 |
-
// change with every Thrust release.
|
33 |
-
//
|
34 |
-
// THRUST_VERSION % 100 is the sub-minor version
|
35 |
-
// THRUST_VERSION / 100 % 1000 is the minor version
|
36 |
-
// THRUST_VERSION / 100000 is the major version
|
37 |
-
//
|
38 |
-
// Because this header does not #include <thrust/detail/config.h>,
|
39 |
-
// it is the only Thrust header that does not cause
|
40 |
-
// THRUST_HOST_SYSTEM and THRUST_DEVICE_SYSTEM to be defined.
|
41 |
-
|
42 |
-
/*! \def THRUST_VERSION
|
43 |
-
* \brief The preprocessor macro \p THRUST_VERSION encodes the version
|
44 |
-
* number of the Thrust library.
|
45 |
-
*
|
46 |
-
* <tt>THRUST_VERSION % 100</tt> is the sub-minor version.
|
47 |
-
* <tt>THRUST_VERSION / 100 % 1000</tt> is the minor version.
|
48 |
-
* <tt>THRUST_VERSION / 100000</tt> is the major version.
|
49 |
-
*/
|
50 |
-
#define THRUST_VERSION 101000
|
51 |
-
|
52 |
-
/*! \def THRUST_MAJOR_VERSION
|
53 |
-
* \brief The preprocessor macro \p THRUST_MAJOR_VERSION encodes the
|
54 |
-
* major version number of the Thrust library.
|
55 |
-
*/
|
56 |
-
#define THRUST_MAJOR_VERSION (THRUST_VERSION / 100000)
|
57 |
-
|
58 |
-
/*! \def THRUST_MINOR_VERSION
|
59 |
-
* \brief The preprocessor macro \p THRUST_MINOR_VERSION encodes the
|
60 |
-
* minor version number of the Thrust library.
|
61 |
-
*/
|
62 |
-
#define THRUST_MINOR_VERSION (THRUST_VERSION / 100 % 1000)
|
63 |
-
|
64 |
-
/*! \def THRUST_SUBMINOR_VERSION
|
65 |
-
* \brief The preprocessor macro \p THRUST_SUBMINOR_VERSION encodes the
|
66 |
-
* sub-minor version number of the Thrust library.
|
67 |
-
*/
|
68 |
-
#define THRUST_SUBMINOR_VERSION (THRUST_VERSION % 100)
|
69 |
-
|
70 |
-
/*! \def THRUST_PATCH_NUMBER
|
71 |
-
* \brief The preprocessor macro \p THRUST_PATCH_NUMBER encodes the
|
72 |
-
* patch number of the Thrust library.
|
73 |
-
*/
|
74 |
-
#define THRUST_PATCH_NUMBER 0
|
75 |
-
|
76 |
-
/*! \namespace thrust
|
77 |
-
* \brief \p thrust is the top-level namespace which contains all Thrust
|
78 |
-
* functions and types.
|
79 |
-
*/
|
80 |
-
namespace thrust
|
81 |
-
{
|
82 |
-
|
83 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/configs/_base_/datasets/people_real_coco.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
dataset_type = 'WaltDataset'
|
2 |
-
data_root = 'data/cwalt_train/'
|
3 |
-
data_root_test = 'data/cwalt_test/'
|
4 |
-
img_norm_cfg = dict(
|
5 |
-
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
6 |
-
train_pipeline = [
|
7 |
-
dict(type='LoadImageFromFile'),
|
8 |
-
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
|
9 |
-
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
|
10 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
11 |
-
dict(type='Normalize', **img_norm_cfg),
|
12 |
-
dict(type='Pad', size_divisor=32),
|
13 |
-
dict(type='DefaultFormatBundle'),
|
14 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
|
15 |
-
]
|
16 |
-
test_pipeline = [
|
17 |
-
dict(type='LoadImageFromFile'),
|
18 |
-
dict(
|
19 |
-
type='MultiScaleFlipAug',
|
20 |
-
img_scale=(1333, 800),
|
21 |
-
flip=False,
|
22 |
-
transforms=[
|
23 |
-
dict(type='Resize', keep_ratio=True),
|
24 |
-
dict(type='RandomFlip'),
|
25 |
-
dict(type='Normalize', **img_norm_cfg),
|
26 |
-
dict(type='Pad', size_divisor=32),
|
27 |
-
dict(type='ImageToTensor', keys=['img']),
|
28 |
-
dict(type='Collect', keys=['img']),
|
29 |
-
])
|
30 |
-
]
|
31 |
-
data = dict(
|
32 |
-
samples_per_gpu=8,
|
33 |
-
workers_per_gpu=8,
|
34 |
-
train=dict(
|
35 |
-
type=dataset_type,
|
36 |
-
ann_file=data_root + '/',
|
37 |
-
img_prefix=data_root + '/',
|
38 |
-
pipeline=train_pipeline),
|
39 |
-
val=dict(
|
40 |
-
type=dataset_type,
|
41 |
-
ann_file=data_root_test + '/',
|
42 |
-
img_prefix=data_root_test + '/',
|
43 |
-
pipeline=test_pipeline),
|
44 |
-
test=dict(
|
45 |
-
type=dataset_type,
|
46 |
-
ann_file=data_root_test + '/',
|
47 |
-
img_prefix=data_root_test + '/',
|
48 |
-
pipeline=test_pipeline))
|
49 |
-
evaluation = dict(metric=['bbox', 'segm'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChrisCaviar/ControlNet-v1-1/preprocessor.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
import gc
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
import PIL.Image
|
5 |
-
import torch
|
6 |
-
from controlnet_aux import (CannyDetector, ContentShuffleDetector, HEDdetector,
|
7 |
-
LineartAnimeDetector, LineartDetector,
|
8 |
-
MidasDetector, MLSDdetector, NormalBaeDetector,
|
9 |
-
OpenposeDetector, PidiNetDetector)
|
10 |
-
from controlnet_aux.util import HWC3
|
11 |
-
|
12 |
-
from cv_utils import resize_image
|
13 |
-
from depth_estimator import DepthEstimator
|
14 |
-
from image_segmentor import ImageSegmentor
|
15 |
-
|
16 |
-
|
17 |
-
class Preprocessor:
|
18 |
-
MODEL_ID = 'lllyasviel/Annotators'
|
19 |
-
|
20 |
-
def __init__(self):
|
21 |
-
self.model = None
|
22 |
-
self.name = ''
|
23 |
-
|
24 |
-
def load(self, name: str) -> None:
|
25 |
-
if name == self.name:
|
26 |
-
return
|
27 |
-
if name == 'HED':
|
28 |
-
self.model = HEDdetector.from_pretrained(self.MODEL_ID)
|
29 |
-
elif name == 'Midas':
|
30 |
-
self.model = MidasDetector.from_pretrained(self.MODEL_ID)
|
31 |
-
elif name == 'MLSD':
|
32 |
-
self.model = MLSDdetector.from_pretrained(self.MODEL_ID)
|
33 |
-
elif name == 'Openpose':
|
34 |
-
self.model = OpenposeDetector.from_pretrained(self.MODEL_ID)
|
35 |
-
elif name == 'PidiNet':
|
36 |
-
self.model = PidiNetDetector.from_pretrained(self.MODEL_ID)
|
37 |
-
elif name == 'NormalBae':
|
38 |
-
self.model = NormalBaeDetector.from_pretrained(self.MODEL_ID)
|
39 |
-
elif name == 'Lineart':
|
40 |
-
self.model = LineartDetector.from_pretrained(self.MODEL_ID)
|
41 |
-
elif name == 'LineartAnime':
|
42 |
-
self.model = LineartAnimeDetector.from_pretrained(self.MODEL_ID)
|
43 |
-
elif name == 'Canny':
|
44 |
-
self.model = CannyDetector()
|
45 |
-
elif name == 'ContentShuffle':
|
46 |
-
self.model = ContentShuffleDetector()
|
47 |
-
elif name == 'DPT':
|
48 |
-
self.model = DepthEstimator()
|
49 |
-
elif name == 'UPerNet':
|
50 |
-
self.model = ImageSegmentor()
|
51 |
-
else:
|
52 |
-
raise ValueError
|
53 |
-
torch.cuda.empty_cache()
|
54 |
-
gc.collect()
|
55 |
-
self.name = name
|
56 |
-
|
57 |
-
def __call__(self, image: PIL.Image.Image, **kwargs) -> PIL.Image.Image:
|
58 |
-
if self.name == 'Canny':
|
59 |
-
if 'detect_resolution' in kwargs:
|
60 |
-
detect_resolution = kwargs.pop('detect_resolution')
|
61 |
-
image = np.array(image)
|
62 |
-
image = HWC3(image)
|
63 |
-
image = resize_image(image, resolution=detect_resolution)
|
64 |
-
image = self.model(image, **kwargs)
|
65 |
-
return PIL.Image.fromarray(image)
|
66 |
-
elif self.name == 'Midas':
|
67 |
-
detect_resolution = kwargs.pop('detect_resolution', 512)
|
68 |
-
image_resolution = kwargs.pop('image_resolution', 512)
|
69 |
-
image = np.array(image)
|
70 |
-
image = HWC3(image)
|
71 |
-
image = resize_image(image, resolution=detect_resolution)
|
72 |
-
image = self.model(image, **kwargs)
|
73 |
-
image = HWC3(image)
|
74 |
-
image = resize_image(image, resolution=image_resolution)
|
75 |
-
return PIL.Image.fromarray(image)
|
76 |
-
else:
|
77 |
-
return self.model(image, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChrisPreston/diff-svc_minato_aqua/infer_tools/slicer.py
DELETED
@@ -1,142 +0,0 @@
|
|
1 |
-
import librosa
|
2 |
-
import torch
|
3 |
-
import torchaudio
|
4 |
-
|
5 |
-
|
6 |
-
class Slicer:
|
7 |
-
def __init__(self,
|
8 |
-
sr: int,
|
9 |
-
threshold: float = -40.,
|
10 |
-
min_length: int = 5000,
|
11 |
-
min_interval: int = 300,
|
12 |
-
hop_size: int = 20,
|
13 |
-
max_sil_kept: int = 5000):
|
14 |
-
if not min_length >= min_interval >= hop_size:
|
15 |
-
raise ValueError('The following condition must be satisfied: min_length >= min_interval >= hop_size')
|
16 |
-
if not max_sil_kept >= hop_size:
|
17 |
-
raise ValueError('The following condition must be satisfied: max_sil_kept >= hop_size')
|
18 |
-
min_interval = sr * min_interval / 1000
|
19 |
-
self.threshold = 10 ** (threshold / 20.)
|
20 |
-
self.hop_size = round(sr * hop_size / 1000)
|
21 |
-
self.win_size = min(round(min_interval), 4 * self.hop_size)
|
22 |
-
self.min_length = round(sr * min_length / 1000 / self.hop_size)
|
23 |
-
self.min_interval = round(min_interval / self.hop_size)
|
24 |
-
self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
|
25 |
-
|
26 |
-
def _apply_slice(self, waveform, begin, end):
|
27 |
-
if len(waveform.shape) > 1:
|
28 |
-
return waveform[:, begin * self.hop_size: min(waveform.shape[1], end * self.hop_size)]
|
29 |
-
else:
|
30 |
-
return waveform[begin * self.hop_size: min(waveform.shape[0], end * self.hop_size)]
|
31 |
-
|
32 |
-
# @timeit
|
33 |
-
def slice(self, waveform):
|
34 |
-
if len(waveform.shape) > 1:
|
35 |
-
samples = librosa.to_mono(waveform)
|
36 |
-
else:
|
37 |
-
samples = waveform
|
38 |
-
if samples.shape[0] <= self.min_length:
|
39 |
-
return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
|
40 |
-
rms_list = librosa.feature.rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0)
|
41 |
-
sil_tags = []
|
42 |
-
silence_start = None
|
43 |
-
clip_start = 0
|
44 |
-
for i, rms in enumerate(rms_list):
|
45 |
-
# Keep looping while frame is silent.
|
46 |
-
if rms < self.threshold:
|
47 |
-
# Record start of silent frames.
|
48 |
-
if silence_start is None:
|
49 |
-
silence_start = i
|
50 |
-
continue
|
51 |
-
# Keep looping while frame is not silent and silence start has not been recorded.
|
52 |
-
if silence_start is None:
|
53 |
-
continue
|
54 |
-
# Clear recorded silence start if interval is not enough or clip is too short
|
55 |
-
is_leading_silence = silence_start == 0 and i > self.max_sil_kept
|
56 |
-
need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length
|
57 |
-
if not is_leading_silence and not need_slice_middle:
|
58 |
-
silence_start = None
|
59 |
-
continue
|
60 |
-
# Need slicing. Record the range of silent frames to be removed.
|
61 |
-
if i - silence_start <= self.max_sil_kept:
|
62 |
-
pos = rms_list[silence_start: i + 1].argmin() + silence_start
|
63 |
-
if silence_start == 0:
|
64 |
-
sil_tags.append((0, pos))
|
65 |
-
else:
|
66 |
-
sil_tags.append((pos, pos))
|
67 |
-
clip_start = pos
|
68 |
-
elif i - silence_start <= self.max_sil_kept * 2:
|
69 |
-
pos = rms_list[i - self.max_sil_kept: silence_start + self.max_sil_kept + 1].argmin()
|
70 |
-
pos += i - self.max_sil_kept
|
71 |
-
pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
|
72 |
-
pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
|
73 |
-
if silence_start == 0:
|
74 |
-
sil_tags.append((0, pos_r))
|
75 |
-
clip_start = pos_r
|
76 |
-
else:
|
77 |
-
sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
|
78 |
-
clip_start = max(pos_r, pos)
|
79 |
-
else:
|
80 |
-
pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
|
81 |
-
pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
|
82 |
-
if silence_start == 0:
|
83 |
-
sil_tags.append((0, pos_r))
|
84 |
-
else:
|
85 |
-
sil_tags.append((pos_l, pos_r))
|
86 |
-
clip_start = pos_r
|
87 |
-
silence_start = None
|
88 |
-
# Deal with trailing silence.
|
89 |
-
total_frames = rms_list.shape[0]
|
90 |
-
if silence_start is not None and total_frames - silence_start >= self.min_interval:
|
91 |
-
silence_end = min(total_frames, silence_start + self.max_sil_kept)
|
92 |
-
pos = rms_list[silence_start: silence_end + 1].argmin() + silence_start
|
93 |
-
sil_tags.append((pos, total_frames + 1))
|
94 |
-
# Apply and return slices.
|
95 |
-
if len(sil_tags) == 0:
|
96 |
-
return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
|
97 |
-
else:
|
98 |
-
chunks = []
|
99 |
-
# 第一段静音并非从头开始,补上有声片段
|
100 |
-
if sil_tags[0][0]:
|
101 |
-
chunks.append(
|
102 |
-
{"slice": False, "split_time": f"0,{min(waveform.shape[0], sil_tags[0][0] * self.hop_size)}"})
|
103 |
-
for i in range(0, len(sil_tags)):
|
104 |
-
# 标识有声片段(跳过第一段)
|
105 |
-
if i:
|
106 |
-
chunks.append({"slice": False,
|
107 |
-
"split_time": f"{sil_tags[i - 1][1] * self.hop_size},{min(waveform.shape[0], sil_tags[i][0] * self.hop_size)}"})
|
108 |
-
# 标识所有静音片段
|
109 |
-
chunks.append({"slice": True,
|
110 |
-
"split_time": f"{sil_tags[i][0] * self.hop_size},{min(waveform.shape[0], sil_tags[i][1] * self.hop_size)}"})
|
111 |
-
# 最后一段静音并非结尾,补上结尾片段
|
112 |
-
if sil_tags[-1][1] * self.hop_size < len(waveform):
|
113 |
-
chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1] * self.hop_size},{len(waveform)}"})
|
114 |
-
chunk_dict = {}
|
115 |
-
for i in range(len(chunks)):
|
116 |
-
chunk_dict[str(i)] = chunks[i]
|
117 |
-
return chunk_dict
|
118 |
-
|
119 |
-
|
120 |
-
def cut(audio_path, db_thresh=-30, min_len=5000):
|
121 |
-
audio, sr = librosa.load(audio_path, sr=None)
|
122 |
-
slicer = Slicer(
|
123 |
-
sr=sr,
|
124 |
-
threshold=db_thresh,
|
125 |
-
min_length=min_len
|
126 |
-
)
|
127 |
-
chunks = slicer.slice(audio)
|
128 |
-
return chunks
|
129 |
-
|
130 |
-
|
131 |
-
def chunks2audio(audio_path, chunks):
|
132 |
-
chunks = dict(chunks)
|
133 |
-
audio, sr = torchaudio.load(audio_path)
|
134 |
-
if len(audio.shape) == 2 and audio.shape[1] >= 2:
|
135 |
-
audio = torch.mean(audio, dim=0).unsqueeze(0)
|
136 |
-
audio = audio.cpu().numpy()[0]
|
137 |
-
result = []
|
138 |
-
for k, v in chunks.items():
|
139 |
-
tag = v["split_time"].split(",")
|
140 |
-
if tag[0] != tag[1]:
|
141 |
-
result.append((v["slice"], audio[int(tag[0]):int(tag[1])]))
|
142 |
-
return result, sr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/meme-api/meme_generator/__init__.py
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
|
3 |
-
from meme_generator.config import meme_config as config
|
4 |
-
from meme_generator.manager import add_meme as add_meme
|
5 |
-
from meme_generator.manager import get_meme as get_meme
|
6 |
-
from meme_generator.manager import get_meme_keys as get_meme_keys
|
7 |
-
from meme_generator.manager import get_memes as get_memes
|
8 |
-
from meme_generator.manager import load_meme as load_meme
|
9 |
-
from meme_generator.manager import load_memes as load_memes
|
10 |
-
from meme_generator.meme import Meme as Meme
|
11 |
-
from meme_generator.meme import MemeArgsModel as MemeArgsModel
|
12 |
-
from meme_generator.meme import MemeArgsParser as MemeArgsParser
|
13 |
-
from meme_generator.meme import MemeArgsType as MemeArgsType
|
14 |
-
from meme_generator.meme import MemeParamsType as MemeParamsType
|
15 |
-
from meme_generator.version import __version__ as __version__
|
16 |
-
|
17 |
-
if config.meme.load_builtin_memes:
|
18 |
-
for path in (Path(__file__).parent / "memes").iterdir():
|
19 |
-
load_meme(f"meme_generator.memes.{path.name}")
|
20 |
-
for meme_dir in config.meme.meme_dirs:
|
21 |
-
load_memes(meme_dir)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/meme-api/meme_generator/memes/dinosaur/__init__.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from typing import List
|
3 |
-
|
4 |
-
from pil_utils import BuildImage
|
5 |
-
|
6 |
-
from meme_generator import add_meme
|
7 |
-
from meme_generator.utils import make_jpg_or_gif
|
8 |
-
|
9 |
-
img_dir = Path(__file__).parent / "images"
|
10 |
-
|
11 |
-
|
12 |
-
def dinosaur(images: List[BuildImage], texts, args):
|
13 |
-
frame = BuildImage.open(img_dir / "0.png")
|
14 |
-
|
15 |
-
def make(img: BuildImage) -> BuildImage:
|
16 |
-
img = img.convert("RGBA").resize((680, 578), keep_ratio=True)
|
17 |
-
return frame.copy().paste(img, (294, 369), below=True)
|
18 |
-
|
19 |
-
return make_jpg_or_gif(images[0], make)
|
20 |
-
|
21 |
-
|
22 |
-
add_meme("dinosaur", dinosaur, min_images=1, max_images=1, keywords=["恐龙", "小恐龙"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat.b4/g4f/Provider/Providers/Better.py
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import requests
|
4 |
-
from typing import Dict, get_type_hints
|
5 |
-
|
6 |
-
url = 'https://openai-proxy-api.vercel.app/v1/'
|
7 |
-
model = {
|
8 |
-
'gpt-3.5-turbo',
|
9 |
-
'gpt-3.5-turbo-0613'
|
10 |
-
'gpt-3.5-turbo-16k',
|
11 |
-
'gpt-3.5-turbo-16k-0613',
|
12 |
-
'gpt-4',
|
13 |
-
}
|
14 |
-
|
15 |
-
supports_stream = True
|
16 |
-
needs_auth = False
|
17 |
-
|
18 |
-
|
19 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
20 |
-
headers = {
|
21 |
-
'Content-Type': 'application/json',
|
22 |
-
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58',
|
23 |
-
'Referer': 'https://chat.ylokh.xyz/',
|
24 |
-
'Origin': 'https://chat.ylokh.xyz',
|
25 |
-
'Connection': 'keep-alive',
|
26 |
-
}
|
27 |
-
|
28 |
-
json_data = {
|
29 |
-
'messages': messages,
|
30 |
-
'temperature': 1.0,
|
31 |
-
'model': model,
|
32 |
-
'stream': stream,
|
33 |
-
}
|
34 |
-
|
35 |
-
response = requests.post(
|
36 |
-
'https://openai-proxy-api.vercel.app/v1/chat/completions', headers=headers, json=json_data, stream=True
|
37 |
-
)
|
38 |
-
|
39 |
-
for token in response.iter_lines():
|
40 |
-
decoded = token.decode('utf-8')
|
41 |
-
if decoded.startswith('data: '):
|
42 |
-
data_str = decoded.replace('data: ', '')
|
43 |
-
data = json.loads(data_str)
|
44 |
-
if 'choices' in data and 'delta' in data['choices'][0]:
|
45 |
-
delta = data['choices'][0]['delta']
|
46 |
-
content = delta.get('content', '')
|
47 |
-
finish_reason = delta.get('finish_reason', '')
|
48 |
-
|
49 |
-
if finish_reason == 'stop':
|
50 |
-
break
|
51 |
-
if content:
|
52 |
-
yield content
|
53 |
-
|
54 |
-
|
55 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + '(%s)' % ', '.join(
|
56 |
-
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat.b4/g4f/Provider/Providers/You.py
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import time
|
4 |
-
import subprocess
|
5 |
-
|
6 |
-
from ...typing import sha256, Dict, get_type_hints
|
7 |
-
|
8 |
-
url = 'https://you.com'
|
9 |
-
model = 'gpt-3.5-turbo'
|
10 |
-
supports_stream = True
|
11 |
-
needs_auth = False
|
12 |
-
|
13 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
14 |
-
|
15 |
-
path = os.path.dirname(os.path.realpath(__file__))
|
16 |
-
config = json.dumps({
|
17 |
-
'messages': messages}, separators=(',', ':'))
|
18 |
-
|
19 |
-
cmd = ['python3', f'{path}/helpers/you.py', config]
|
20 |
-
|
21 |
-
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
22 |
-
|
23 |
-
for line in iter(p.stdout.readline, b''):
|
24 |
-
yield line.decode('utf-8') #[:-1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|